root/include/linux/memcontrol.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mem_cgroup_is_root
  2. mem_cgroup_disabled
  3. mem_cgroup_protection
  4. mem_cgroup_nodeinfo
  5. mem_cgroup_lruvec
  6. mem_cgroup_from_css
  7. mem_cgroup_put
  8. mem_cgroup_id
  9. mem_cgroup_from_seq
  10. lruvec_memcg
  11. parent_mem_cgroup
  12. mem_cgroup_is_descendant
  13. mm_match_cgroup
  14. mem_cgroup_online
  15. mem_cgroup_get_zone_lru_size
  16. mem_cgroup_enter_user_fault
  17. mem_cgroup_exit_user_fault
  18. task_in_memcg_oom
  19. memcg_page_state
  20. memcg_page_state_local
  21. mod_memcg_state
  22. __mod_memcg_page_state
  23. mod_memcg_page_state
  24. lruvec_page_state
  25. lruvec_page_state_local
  26. mod_lruvec_state
  27. __mod_lruvec_page_state
  28. mod_lruvec_page_state
  29. count_memcg_events
  30. count_memcg_page_event
  31. count_memcg_event_mm
  32. memcg_memory_event
  33. memcg_memory_event_mm
  34. mem_cgroup_is_root
  35. mem_cgroup_disabled
  36. memcg_memory_event
  37. memcg_memory_event_mm
  38. mem_cgroup_protection
  39. mem_cgroup_protected
  40. mem_cgroup_try_charge
  41. mem_cgroup_try_charge_delay
  42. mem_cgroup_commit_charge
  43. mem_cgroup_cancel_charge
  44. mem_cgroup_uncharge
  45. mem_cgroup_uncharge_list
  46. mem_cgroup_migrate
  47. mem_cgroup_lruvec
  48. mem_cgroup_page_lruvec
  49. mm_match_cgroup
  50. get_mem_cgroup_from_mm
  51. get_mem_cgroup_from_page
  52. mem_cgroup_put
  53. mem_cgroup_iter
  54. mem_cgroup_iter_break
  55. mem_cgroup_scan_tasks
  56. mem_cgroup_id
  57. mem_cgroup_from_id
  58. mem_cgroup_from_seq
  59. lruvec_memcg
  60. mem_cgroup_online
  61. mem_cgroup_get_zone_lru_size
  62. mem_cgroup_get_max
  63. mem_cgroup_size
  64. mem_cgroup_print_oom_context
  65. mem_cgroup_print_oom_meminfo
  66. lock_page_memcg
  67. __unlock_page_memcg
  68. unlock_page_memcg
  69. mem_cgroup_handle_over_high
  70. mem_cgroup_enter_user_fault
  71. mem_cgroup_exit_user_fault
  72. task_in_memcg_oom
  73. mem_cgroup_oom_synchronize
  74. mem_cgroup_get_oom_group
  75. mem_cgroup_print_oom_group
  76. memcg_page_state
  77. memcg_page_state_local
  78. __mod_memcg_state
  79. mod_memcg_state
  80. __mod_memcg_page_state
  81. mod_memcg_page_state
  82. lruvec_page_state
  83. lruvec_page_state_local
  84. __mod_lruvec_state
  85. mod_lruvec_state
  86. __mod_lruvec_page_state
  87. mod_lruvec_page_state
  88. __mod_lruvec_slab_state
  89. mod_memcg_obj_state
  90. mem_cgroup_soft_limit_reclaim
  91. mem_cgroup_split_huge_fixup
  92. count_memcg_events
  93. __count_memcg_events
  94. count_memcg_page_event
  95. count_memcg_event_mm
  96. __inc_memcg_state
  97. __dec_memcg_state
  98. __inc_memcg_page_state
  99. __dec_memcg_page_state
  100. __inc_lruvec_state
  101. __dec_lruvec_state
  102. __inc_lruvec_page_state
  103. __dec_lruvec_page_state
  104. __inc_lruvec_slab_state
  105. __dec_lruvec_slab_state
  106. inc_memcg_state
  107. dec_memcg_state
  108. inc_memcg_page_state
  109. dec_memcg_page_state
  110. inc_lruvec_state
  111. dec_lruvec_state
  112. inc_lruvec_page_state
  113. dec_lruvec_page_state
  114. mem_cgroup_track_foreign_dirty
  115. mem_cgroup_wb_domain
  116. mem_cgroup_wb_stats
  117. mem_cgroup_track_foreign_dirty
  118. mem_cgroup_flush_foreign
  119. mem_cgroup_under_socket_pressure
  120. mem_cgroup_sk_alloc
  121. mem_cgroup_sk_free
  122. mem_cgroup_under_socket_pressure
  123. memcg_set_shrinker_bit
  124. memcg_kmem_enabled
  125. memcg_kmem_charge
  126. memcg_kmem_uncharge
  127. memcg_kmem_charge_memcg
  128. memcg_kmem_uncharge_memcg
  129. memcg_cache_id
  130. memcg_kmem_charge
  131. memcg_kmem_uncharge
  132. __memcg_kmem_charge
  133. __memcg_kmem_uncharge
  134. memcg_kmem_enabled
  135. memcg_cache_id
  136. memcg_get_cache_ids
  137. memcg_put_cache_ids
  138. mem_cgroup_from_obj

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /* memcontrol.h - Memory Controller
   3  *
   4  * Copyright IBM Corporation, 2007
   5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6  *
   7  * Copyright 2007 OpenVZ SWsoft Inc
   8  * Author: Pavel Emelianov <xemul@openvz.org>
   9  */
  10 
  11 #ifndef _LINUX_MEMCONTROL_H
  12 #define _LINUX_MEMCONTROL_H
  13 #include <linux/cgroup.h>
  14 #include <linux/vm_event_item.h>
  15 #include <linux/hardirq.h>
  16 #include <linux/jump_label.h>
  17 #include <linux/page_counter.h>
  18 #include <linux/vmpressure.h>
  19 #include <linux/eventfd.h>
  20 #include <linux/mm.h>
  21 #include <linux/vmstat.h>
  22 #include <linux/writeback.h>
  23 #include <linux/page-flags.h>
  24 
  25 struct mem_cgroup;
  26 struct page;
  27 struct mm_struct;
  28 struct kmem_cache;
  29 
  30 /* Cgroup-specific page state, on top of universal node page state */
  31 enum memcg_stat_item {
  32         MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
  33         MEMCG_RSS,
  34         MEMCG_RSS_HUGE,
  35         MEMCG_SWAP,
  36         MEMCG_SOCK,
  37         /* XXX: why are these zone and not node counters? */
  38         MEMCG_KERNEL_STACK_KB,
  39         MEMCG_NR_STAT,
  40 };
  41 
  42 enum memcg_memory_event {
  43         MEMCG_LOW,
  44         MEMCG_HIGH,
  45         MEMCG_MAX,
  46         MEMCG_OOM,
  47         MEMCG_OOM_KILL,
  48         MEMCG_SWAP_MAX,
  49         MEMCG_SWAP_FAIL,
  50         MEMCG_NR_MEMORY_EVENTS,
  51 };
  52 
  53 enum mem_cgroup_protection {
  54         MEMCG_PROT_NONE,
  55         MEMCG_PROT_LOW,
  56         MEMCG_PROT_MIN,
  57 };
  58 
  59 struct mem_cgroup_reclaim_cookie {
  60         pg_data_t *pgdat;
  61         int priority;
  62         unsigned int generation;
  63 };
  64 
  65 #ifdef CONFIG_MEMCG
  66 
  67 #define MEM_CGROUP_ID_SHIFT     16
  68 #define MEM_CGROUP_ID_MAX       USHRT_MAX
  69 
  70 struct mem_cgroup_id {
  71         int id;
  72         refcount_t ref;
  73 };
  74 
  75 /*
  76  * Per memcg event counter is incremented at every pagein/pageout. With THP,
  77  * it will be incremated by the number of pages. This counter is used for
  78  * for trigger some periodic events. This is straightforward and better
  79  * than using jiffies etc. to handle periodic memcg event.
  80  */
  81 enum mem_cgroup_events_target {
  82         MEM_CGROUP_TARGET_THRESH,
  83         MEM_CGROUP_TARGET_SOFTLIMIT,
  84         MEM_CGROUP_TARGET_NUMAINFO,
  85         MEM_CGROUP_NTARGETS,
  86 };
  87 
  88 struct memcg_vmstats_percpu {
  89         long stat[MEMCG_NR_STAT];
  90         unsigned long events[NR_VM_EVENT_ITEMS];
  91         unsigned long nr_page_events;
  92         unsigned long targets[MEM_CGROUP_NTARGETS];
  93 };
  94 
  95 struct mem_cgroup_reclaim_iter {
  96         struct mem_cgroup *position;
  97         /* scan generation, increased every round-trip */
  98         unsigned int generation;
  99 };
 100 
 101 struct lruvec_stat {
 102         long count[NR_VM_NODE_STAT_ITEMS];
 103 };
 104 
 105 /*
 106  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
 107  * which have elements charged to this memcg.
 108  */
 109 struct memcg_shrinker_map {
 110         struct rcu_head rcu;
 111         unsigned long map[0];
 112 };
 113 
 114 /*
 115  * per-zone information in memory controller.
 116  */
 117 struct mem_cgroup_per_node {
 118         struct lruvec           lruvec;
 119 
 120         /* Legacy local VM stats */
 121         struct lruvec_stat __percpu *lruvec_stat_local;
 122 
 123         /* Subtree VM stats (batched updates) */
 124         struct lruvec_stat __percpu *lruvec_stat_cpu;
 125         atomic_long_t           lruvec_stat[NR_VM_NODE_STAT_ITEMS];
 126 
 127         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
 128 
 129         struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
 130 
 131         struct memcg_shrinker_map __rcu *shrinker_map;
 132 
 133         struct rb_node          tree_node;      /* RB tree node */
 134         unsigned long           usage_in_excess;/* Set to the value by which */
 135                                                 /* the soft limit is exceeded*/
 136         bool                    on_tree;
 137         bool                    congested;      /* memcg has many dirty pages */
 138                                                 /* backed by a congested BDI */
 139 
 140         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
 141                                                 /* use container_of        */
 142 };
 143 
 144 struct mem_cgroup_threshold {
 145         struct eventfd_ctx *eventfd;
 146         unsigned long threshold;
 147 };
 148 
 149 /* For threshold */
 150 struct mem_cgroup_threshold_ary {
 151         /* An array index points to threshold just below or equal to usage. */
 152         int current_threshold;
 153         /* Size of entries[] */
 154         unsigned int size;
 155         /* Array of thresholds */
 156         struct mem_cgroup_threshold entries[0];
 157 };
 158 
 159 struct mem_cgroup_thresholds {
 160         /* Primary thresholds array */
 161         struct mem_cgroup_threshold_ary *primary;
 162         /*
 163          * Spare threshold array.
 164          * This is needed to make mem_cgroup_unregister_event() "never fail".
 165          * It must be able to store at least primary->size - 1 entries.
 166          */
 167         struct mem_cgroup_threshold_ary *spare;
 168 };
 169 
 170 enum memcg_kmem_state {
 171         KMEM_NONE,
 172         KMEM_ALLOCATED,
 173         KMEM_ONLINE,
 174 };
 175 
 176 #if defined(CONFIG_SMP)
 177 struct memcg_padding {
 178         char x[0];
 179 } ____cacheline_internodealigned_in_smp;
 180 #define MEMCG_PADDING(name)      struct memcg_padding name;
 181 #else
 182 #define MEMCG_PADDING(name)
 183 #endif
 184 
 185 /*
 186  * Remember four most recent foreign writebacks with dirty pages in this
 187  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
 188  * one in a given round, we're likely to catch it later if it keeps
 189  * foreign-dirtying, so a fairly low count should be enough.
 190  *
 191  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
 192  */
 193 #define MEMCG_CGWB_FRN_CNT      4
 194 
 195 struct memcg_cgwb_frn {
 196         u64 bdi_id;                     /* bdi->id of the foreign inode */
 197         int memcg_id;                   /* memcg->css.id of foreign inode */
 198         u64 at;                         /* jiffies_64 at the time of dirtying */
 199         struct wb_completion done;      /* tracks in-flight foreign writebacks */
 200 };
 201 
 202 /*
 203  * The memory controller data structure. The memory controller controls both
 204  * page cache and RSS per cgroup. We would eventually like to provide
 205  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 206  * to help the administrator determine what knobs to tune.
 207  */
 208 struct mem_cgroup {
 209         struct cgroup_subsys_state css;
 210 
 211         /* Private memcg ID. Used to ID objects that outlive the cgroup */
 212         struct mem_cgroup_id id;
 213 
 214         /* Accounted resources */
 215         struct page_counter memory;
 216         struct page_counter swap;
 217 
 218         /* Legacy consumer-oriented counters */
 219         struct page_counter memsw;
 220         struct page_counter kmem;
 221         struct page_counter tcpmem;
 222 
 223         /* Upper bound of normal memory consumption range */
 224         unsigned long high;
 225 
 226         /* Range enforcement for interrupt charges */
 227         struct work_struct high_work;
 228 
 229         unsigned long soft_limit;
 230 
 231         /* vmpressure notifications */
 232         struct vmpressure vmpressure;
 233 
 234         /*
 235          * Should the accounting and control be hierarchical, per subtree?
 236          */
 237         bool use_hierarchy;
 238 
 239         /*
 240          * Should the OOM killer kill all belonging tasks, had it kill one?
 241          */
 242         bool oom_group;
 243 
 244         /* protected by memcg_oom_lock */
 245         bool            oom_lock;
 246         int             under_oom;
 247 
 248         int     swappiness;
 249         /* OOM-Killer disable */
 250         int             oom_kill_disable;
 251 
 252         /* memory.events and memory.events.local */
 253         struct cgroup_file events_file;
 254         struct cgroup_file events_local_file;
 255 
 256         /* handle for "memory.swap.events" */
 257         struct cgroup_file swap_events_file;
 258 
 259         /* protect arrays of thresholds */
 260         struct mutex thresholds_lock;
 261 
 262         /* thresholds for memory usage. RCU-protected */
 263         struct mem_cgroup_thresholds thresholds;
 264 
 265         /* thresholds for mem+swap usage. RCU-protected */
 266         struct mem_cgroup_thresholds memsw_thresholds;
 267 
 268         /* For oom notifier event fd */
 269         struct list_head oom_notify;
 270 
 271         /*
 272          * Should we move charges of a task when a task is moved into this
 273          * mem_cgroup ? And what type of charges should we move ?
 274          */
 275         unsigned long move_charge_at_immigrate;
 276         /* taken only while moving_account > 0 */
 277         spinlock_t              move_lock;
 278         unsigned long           move_lock_flags;
 279 
 280         MEMCG_PADDING(_pad1_);
 281 
 282         /*
 283          * set > 0 if pages under this cgroup are moving to other cgroup.
 284          */
 285         atomic_t                moving_account;
 286         struct task_struct      *move_lock_task;
 287 
 288         /* Legacy local VM stats and events */
 289         struct memcg_vmstats_percpu __percpu *vmstats_local;
 290 
 291         /* Subtree VM stats and events (batched updates) */
 292         struct memcg_vmstats_percpu __percpu *vmstats_percpu;
 293 
 294         MEMCG_PADDING(_pad2_);
 295 
 296         atomic_long_t           vmstats[MEMCG_NR_STAT];
 297         atomic_long_t           vmevents[NR_VM_EVENT_ITEMS];
 298 
 299         /* memory.events */
 300         atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
 301         atomic_long_t           memory_events_local[MEMCG_NR_MEMORY_EVENTS];
 302 
 303         unsigned long           socket_pressure;
 304 
 305         /* Legacy tcp memory accounting */
 306         bool                    tcpmem_active;
 307         int                     tcpmem_pressure;
 308 
 309 #ifdef CONFIG_MEMCG_KMEM
 310         /* Index in the kmem_cache->memcg_params.memcg_caches array */
 311         int kmemcg_id;
 312         enum memcg_kmem_state kmem_state;
 313         struct list_head kmem_caches;
 314 #endif
 315 
 316         int last_scanned_node;
 317 #if MAX_NUMNODES > 1
 318         nodemask_t      scan_nodes;
 319         atomic_t        numainfo_events;
 320         atomic_t        numainfo_updating;
 321 #endif
 322 
 323 #ifdef CONFIG_CGROUP_WRITEBACK
 324         struct list_head cgwb_list;
 325         struct wb_domain cgwb_domain;
 326         struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
 327 #endif
 328 
 329         /* List of events which userspace want to receive */
 330         struct list_head event_list;
 331         spinlock_t event_list_lock;
 332 
 333 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 334         struct deferred_split deferred_split_queue;
 335 #endif
 336 
 337         struct mem_cgroup_per_node *nodeinfo[0];
 338         /* WARNING: nodeinfo must be the last member here */
 339 };
 340 
 341 /*
 342  * size of first charge trial. "32" comes from vmscan.c's magic value.
 343  * TODO: maybe necessary to use big numbers in big irons.
 344  */
 345 #define MEMCG_CHARGE_BATCH 32U
 346 
 347 extern struct mem_cgroup *root_mem_cgroup;
 348 
 349 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 350 {
 351         return (memcg == root_mem_cgroup);
 352 }
 353 
 354 static inline bool mem_cgroup_disabled(void)
 355 {
 356         return !cgroup_subsys_enabled(memory_cgrp_subsys);
 357 }
 358 
 359 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
 360                                                   bool in_low_reclaim)
 361 {
 362         if (mem_cgroup_disabled())
 363                 return 0;
 364 
 365         if (in_low_reclaim)
 366                 return READ_ONCE(memcg->memory.emin);
 367 
 368         return max(READ_ONCE(memcg->memory.emin),
 369                    READ_ONCE(memcg->memory.elow));
 370 }
 371 
 372 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
 373                                                 struct mem_cgroup *memcg);
 374 
 375 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
 376                           gfp_t gfp_mask, struct mem_cgroup **memcgp,
 377                           bool compound);
 378 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
 379                           gfp_t gfp_mask, struct mem_cgroup **memcgp,
 380                           bool compound);
 381 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
 382                               bool lrucare, bool compound);
 383 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
 384                 bool compound);
 385 void mem_cgroup_uncharge(struct page *page);
 386 void mem_cgroup_uncharge_list(struct list_head *page_list);
 387 
 388 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 389 
 390 static struct mem_cgroup_per_node *
 391 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
 392 {
 393         return memcg->nodeinfo[nid];
 394 }
 395 
 396 /**
 397  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
 398  * @node: node of the wanted lruvec
 399  * @memcg: memcg of the wanted lruvec
 400  *
 401  * Returns the lru list vector holding pages for a given @node or a given
 402  * @memcg and @zone. This can be the node lruvec, if the memory controller
 403  * is disabled.
 404  */
 405 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
 406                                 struct mem_cgroup *memcg)
 407 {
 408         struct mem_cgroup_per_node *mz;
 409         struct lruvec *lruvec;
 410 
 411         if (mem_cgroup_disabled()) {
 412                 lruvec = node_lruvec(pgdat);
 413                 goto out;
 414         }
 415 
 416         mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 417         lruvec = &mz->lruvec;
 418 out:
 419         /*
 420          * Since a node can be onlined after the mem_cgroup was created,
 421          * we have to be prepared to initialize lruvec->pgdat here;
 422          * and if offlined then reonlined, we need to reinitialize it.
 423          */
 424         if (unlikely(lruvec->pgdat != pgdat))
 425                 lruvec->pgdat = pgdat;
 426         return lruvec;
 427 }
 428 
 429 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
 430 
 431 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 432 
 433 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
 434 
 435 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
 436 
 437 static inline
 438 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
 439         return css ? container_of(css, struct mem_cgroup, css) : NULL;
 440 }
 441 
 442 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 443 {
 444         if (memcg)
 445                 css_put(&memcg->css);
 446 }
 447 
 448 #define mem_cgroup_from_counter(counter, member)        \
 449         container_of(counter, struct mem_cgroup, member)
 450 
 451 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
 452                                    struct mem_cgroup *,
 453                                    struct mem_cgroup_reclaim_cookie *);
 454 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 455 int mem_cgroup_scan_tasks(struct mem_cgroup *,
 456                           int (*)(struct task_struct *, void *), void *);
 457 
 458 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 459 {
 460         if (mem_cgroup_disabled())
 461                 return 0;
 462 
 463         return memcg->id.id;
 464 }
 465 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
 466 
 467 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
 468 {
 469         return mem_cgroup_from_css(seq_css(m));
 470 }
 471 
 472 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
 473 {
 474         struct mem_cgroup_per_node *mz;
 475 
 476         if (mem_cgroup_disabled())
 477                 return NULL;
 478 
 479         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 480         return mz->memcg;
 481 }
 482 
 483 /**
 484  * parent_mem_cgroup - find the accounting parent of a memcg
 485  * @memcg: memcg whose parent to find
 486  *
 487  * Returns the parent memcg, or NULL if this is the root or the memory
 488  * controller is in legacy no-hierarchy mode.
 489  */
 490 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 491 {
 492         if (!memcg->memory.parent)
 493                 return NULL;
 494         return mem_cgroup_from_counter(memcg->memory.parent, memory);
 495 }
 496 
 497 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
 498                               struct mem_cgroup *root)
 499 {
 500         if (root == memcg)
 501                 return true;
 502         if (!root->use_hierarchy)
 503                 return false;
 504         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
 505 }
 506 
 507 static inline bool mm_match_cgroup(struct mm_struct *mm,
 508                                    struct mem_cgroup *memcg)
 509 {
 510         struct mem_cgroup *task_memcg;
 511         bool match = false;
 512 
 513         rcu_read_lock();
 514         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 515         if (task_memcg)
 516                 match = mem_cgroup_is_descendant(task_memcg, memcg);
 517         rcu_read_unlock();
 518         return match;
 519 }
 520 
 521 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
 522 ino_t page_cgroup_ino(struct page *page);
 523 
 524 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 525 {
 526         if (mem_cgroup_disabled())
 527                 return true;
 528         return !!(memcg->css.flags & CSS_ONLINE);
 529 }
 530 
 531 /*
 532  * For memory reclaim.
 533  */
 534 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 535 
 536 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 537                 int zid, int nr_pages);
 538 
 539 static inline
 540 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
 541                 enum lru_list lru, int zone_idx)
 542 {
 543         struct mem_cgroup_per_node *mz;
 544 
 545         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 546         return mz->lru_zone_size[zone_idx][lru];
 547 }
 548 
 549 void mem_cgroup_handle_over_high(void);
 550 
 551 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 552 
 553 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
 554 
 555 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
 556                                 struct task_struct *p);
 557 
 558 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
 559 
 560 static inline void mem_cgroup_enter_user_fault(void)
 561 {
 562         WARN_ON(current->in_user_fault);
 563         current->in_user_fault = 1;
 564 }
 565 
 566 static inline void mem_cgroup_exit_user_fault(void)
 567 {
 568         WARN_ON(!current->in_user_fault);
 569         current->in_user_fault = 0;
 570 }
 571 
 572 static inline bool task_in_memcg_oom(struct task_struct *p)
 573 {
 574         return p->memcg_in_oom;
 575 }
 576 
 577 bool mem_cgroup_oom_synchronize(bool wait);
 578 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
 579                                             struct mem_cgroup *oom_domain);
 580 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
 581 
 582 #ifdef CONFIG_MEMCG_SWAP
 583 extern int do_swap_account;
 584 #endif
 585 
 586 struct mem_cgroup *lock_page_memcg(struct page *page);
 587 void __unlock_page_memcg(struct mem_cgroup *memcg);
 588 void unlock_page_memcg(struct page *page);
 589 
 590 /*
 591  * idx can be of type enum memcg_stat_item or node_stat_item.
 592  * Keep in sync with memcg_exact_page_state().
 593  */
 594 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 595 {
 596         long x = atomic_long_read(&memcg->vmstats[idx]);
 597 #ifdef CONFIG_SMP
 598         if (x < 0)
 599                 x = 0;
 600 #endif
 601         return x;
 602 }
 603 
 604 /*
 605  * idx can be of type enum memcg_stat_item or node_stat_item.
 606  * Keep in sync with memcg_exact_page_state().
 607  */
 608 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
 609                                                    int idx)
 610 {
 611         long x = 0;
 612         int cpu;
 613 
 614         for_each_possible_cpu(cpu)
 615                 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
 616 #ifdef CONFIG_SMP
 617         if (x < 0)
 618                 x = 0;
 619 #endif
 620         return x;
 621 }
 622 
 623 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
 624 
 625 /* idx can be of type enum memcg_stat_item or node_stat_item */
 626 static inline void mod_memcg_state(struct mem_cgroup *memcg,
 627                                    int idx, int val)
 628 {
 629         unsigned long flags;
 630 
 631         local_irq_save(flags);
 632         __mod_memcg_state(memcg, idx, val);
 633         local_irq_restore(flags);
 634 }
 635 
 636 /**
 637  * mod_memcg_page_state - update page state statistics
 638  * @page: the page
 639  * @idx: page state item to account
 640  * @val: number of pages (positive or negative)
 641  *
 642  * The @page must be locked or the caller must use lock_page_memcg()
 643  * to prevent double accounting when the page is concurrently being
 644  * moved to another memcg:
 645  *
 646  *   lock_page(page) or lock_page_memcg(page)
 647  *   if (TestClearPageState(page))
 648  *     mod_memcg_page_state(page, state, -1);
 649  *   unlock_page(page) or unlock_page_memcg(page)
 650  *
 651  * Kernel pages are an exception to this, since they'll never move.
 652  */
 653 static inline void __mod_memcg_page_state(struct page *page,
 654                                           int idx, int val)
 655 {
 656         if (page->mem_cgroup)
 657                 __mod_memcg_state(page->mem_cgroup, idx, val);
 658 }
 659 
 660 static inline void mod_memcg_page_state(struct page *page,
 661                                         int idx, int val)
 662 {
 663         if (page->mem_cgroup)
 664                 mod_memcg_state(page->mem_cgroup, idx, val);
 665 }
 666 
 667 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
 668                                               enum node_stat_item idx)
 669 {
 670         struct mem_cgroup_per_node *pn;
 671         long x;
 672 
 673         if (mem_cgroup_disabled())
 674                 return node_page_state(lruvec_pgdat(lruvec), idx);
 675 
 676         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 677         x = atomic_long_read(&pn->lruvec_stat[idx]);
 678 #ifdef CONFIG_SMP
 679         if (x < 0)
 680                 x = 0;
 681 #endif
 682         return x;
 683 }
 684 
 685 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 686                                                     enum node_stat_item idx)
 687 {
 688         struct mem_cgroup_per_node *pn;
 689         long x = 0;
 690         int cpu;
 691 
 692         if (mem_cgroup_disabled())
 693                 return node_page_state(lruvec_pgdat(lruvec), idx);
 694 
 695         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 696         for_each_possible_cpu(cpu)
 697                 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
 698 #ifdef CONFIG_SMP
 699         if (x < 0)
 700                 x = 0;
 701 #endif
 702         return x;
 703 }
 704 
 705 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 706                         int val);
 707 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
 708 void mod_memcg_obj_state(void *p, int idx, int val);
 709 
 710 static inline void mod_lruvec_state(struct lruvec *lruvec,
 711                                     enum node_stat_item idx, int val)
 712 {
 713         unsigned long flags;
 714 
 715         local_irq_save(flags);
 716         __mod_lruvec_state(lruvec, idx, val);
 717         local_irq_restore(flags);
 718 }
 719 
 720 static inline void __mod_lruvec_page_state(struct page *page,
 721                                            enum node_stat_item idx, int val)
 722 {
 723         pg_data_t *pgdat = page_pgdat(page);
 724         struct lruvec *lruvec;
 725 
 726         /* Untracked pages have no memcg, no lruvec. Update only the node */
 727         if (!page->mem_cgroup) {
 728                 __mod_node_page_state(pgdat, idx, val);
 729                 return;
 730         }
 731 
 732         lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
 733         __mod_lruvec_state(lruvec, idx, val);
 734 }
 735 
 736 static inline void mod_lruvec_page_state(struct page *page,
 737                                          enum node_stat_item idx, int val)
 738 {
 739         unsigned long flags;
 740 
 741         local_irq_save(flags);
 742         __mod_lruvec_page_state(page, idx, val);
 743         local_irq_restore(flags);
 744 }
 745 
 746 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
 747                                                 gfp_t gfp_mask,
 748                                                 unsigned long *total_scanned);
 749 
 750 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 751                           unsigned long count);
 752 
 753 static inline void count_memcg_events(struct mem_cgroup *memcg,
 754                                       enum vm_event_item idx,
 755                                       unsigned long count)
 756 {
 757         unsigned long flags;
 758 
 759         local_irq_save(flags);
 760         __count_memcg_events(memcg, idx, count);
 761         local_irq_restore(flags);
 762 }
 763 
 764 static inline void count_memcg_page_event(struct page *page,
 765                                           enum vm_event_item idx)
 766 {
 767         if (page->mem_cgroup)
 768                 count_memcg_events(page->mem_cgroup, idx, 1);
 769 }
 770 
 771 static inline void count_memcg_event_mm(struct mm_struct *mm,
 772                                         enum vm_event_item idx)
 773 {
 774         struct mem_cgroup *memcg;
 775 
 776         if (mem_cgroup_disabled())
 777                 return;
 778 
 779         rcu_read_lock();
 780         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 781         if (likely(memcg))
 782                 count_memcg_events(memcg, idx, 1);
 783         rcu_read_unlock();
 784 }
 785 
 786 static inline void memcg_memory_event(struct mem_cgroup *memcg,
 787                                       enum memcg_memory_event event)
 788 {
 789         atomic_long_inc(&memcg->memory_events_local[event]);
 790         cgroup_file_notify(&memcg->events_local_file);
 791 
 792         do {
 793                 atomic_long_inc(&memcg->memory_events[event]);
 794                 cgroup_file_notify(&memcg->events_file);
 795 
 796                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
 797                         break;
 798                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
 799                         break;
 800         } while ((memcg = parent_mem_cgroup(memcg)) &&
 801                  !mem_cgroup_is_root(memcg));
 802 }
 803 
 804 static inline void memcg_memory_event_mm(struct mm_struct *mm,
 805                                          enum memcg_memory_event event)
 806 {
 807         struct mem_cgroup *memcg;
 808 
 809         if (mem_cgroup_disabled())
 810                 return;
 811 
 812         rcu_read_lock();
 813         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 814         if (likely(memcg))
 815                 memcg_memory_event(memcg, event);
 816         rcu_read_unlock();
 817 }
 818 
 819 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 820 void mem_cgroup_split_huge_fixup(struct page *head);
 821 #endif
 822 
 823 #else /* CONFIG_MEMCG */
 824 
 825 #define MEM_CGROUP_ID_SHIFT     0
 826 #define MEM_CGROUP_ID_MAX       0
 827 
 828 struct mem_cgroup;
 829 
 830 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 831 {
 832         return true;
 833 }
 834 
 835 static inline bool mem_cgroup_disabled(void)
 836 {
 837         return true;
 838 }
 839 
 840 static inline void memcg_memory_event(struct mem_cgroup *memcg,
 841                                       enum memcg_memory_event event)
 842 {
 843 }
 844 
 845 static inline void memcg_memory_event_mm(struct mm_struct *mm,
 846                                          enum memcg_memory_event event)
 847 {
 848 }
 849 
 850 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
 851                                                   bool in_low_reclaim)
 852 {
 853         return 0;
 854 }
 855 
 856 static inline enum mem_cgroup_protection mem_cgroup_protected(
 857         struct mem_cgroup *root, struct mem_cgroup *memcg)
 858 {
 859         return MEMCG_PROT_NONE;
 860 }
 861 
 862 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
 863                                         gfp_t gfp_mask,
 864                                         struct mem_cgroup **memcgp,
 865                                         bool compound)
 866 {
 867         *memcgp = NULL;
 868         return 0;
 869 }
 870 
 871 static inline int mem_cgroup_try_charge_delay(struct page *page,
 872                                               struct mm_struct *mm,
 873                                               gfp_t gfp_mask,
 874                                               struct mem_cgroup **memcgp,
 875                                               bool compound)
 876 {
 877         *memcgp = NULL;
 878         return 0;
 879 }
 880 
 881 static inline void mem_cgroup_commit_charge(struct page *page,
 882                                             struct mem_cgroup *memcg,
 883                                             bool lrucare, bool compound)
 884 {
 885 }
 886 
 887 static inline void mem_cgroup_cancel_charge(struct page *page,
 888                                             struct mem_cgroup *memcg,
 889                                             bool compound)
 890 {
 891 }
 892 
 893 static inline void mem_cgroup_uncharge(struct page *page)
 894 {
 895 }
 896 
 897 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
 898 {
 899 }
 900 
 901 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
 902 {
 903 }
 904 
 905 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
 906                                 struct mem_cgroup *memcg)
 907 {
 908         return node_lruvec(pgdat);
 909 }
 910 
 911 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 912                                                     struct pglist_data *pgdat)
 913 {
 914         return &pgdat->lruvec;
 915 }
 916 
 917 static inline bool mm_match_cgroup(struct mm_struct *mm,
 918                 struct mem_cgroup *memcg)
 919 {
 920         return true;
 921 }
 922 
 923 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 924 {
 925         return NULL;
 926 }
 927 
 928 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
 929 {
 930         return NULL;
 931 }
 932 
 933 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 934 {
 935 }
 936 
 937 static inline struct mem_cgroup *
 938 mem_cgroup_iter(struct mem_cgroup *root,
 939                 struct mem_cgroup *prev,
 940                 struct mem_cgroup_reclaim_cookie *reclaim)
 941 {
 942         return NULL;
 943 }
 944 
 945 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
 946                                          struct mem_cgroup *prev)
 947 {
 948 }
 949 
 950 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 951                 int (*fn)(struct task_struct *, void *), void *arg)
 952 {
 953         return 0;
 954 }
 955 
 956 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 957 {
 958         return 0;
 959 }
 960 
 961 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 962 {
 963         WARN_ON_ONCE(id);
 964         /* XXX: This should always return root_mem_cgroup */
 965         return NULL;
 966 }
 967 
 968 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
 969 {
 970         return NULL;
 971 }
 972 
 973 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
 974 {
 975         return NULL;
 976 }
 977 
 978 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 979 {
 980         return true;
 981 }
 982 
 983 static inline
 984 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
 985                 enum lru_list lru, int zone_idx)
 986 {
 987         return 0;
 988 }
 989 
 990 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
 991 {
 992         return 0;
 993 }
 994 
 995 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
 996 {
 997         return 0;
 998 }
 999 
1000 static inline void
1001 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1002 {
1003 }
1004 
1005 static inline void
1006 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1007 {
1008 }
1009 
1010 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1011 {
1012         return NULL;
1013 }
1014 
1015 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1016 {
1017 }
1018 
1019 static inline void unlock_page_memcg(struct page *page)
1020 {
1021 }
1022 
1023 static inline void mem_cgroup_handle_over_high(void)
1024 {
1025 }
1026 
1027 static inline void mem_cgroup_enter_user_fault(void)
1028 {
1029 }
1030 
1031 static inline void mem_cgroup_exit_user_fault(void)
1032 {
1033 }
1034 
1035 static inline bool task_in_memcg_oom(struct task_struct *p)
1036 {
1037         return false;
1038 }
1039 
1040 static inline bool mem_cgroup_oom_synchronize(bool wait)
1041 {
1042         return false;
1043 }
1044 
1045 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1046         struct task_struct *victim, struct mem_cgroup *oom_domain)
1047 {
1048         return NULL;
1049 }
1050 
1051 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1052 {
1053 }
1054 
1055 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1056 {
1057         return 0;
1058 }
1059 
1060 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1061                                                    int idx)
1062 {
1063         return 0;
1064 }
1065 
1066 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1067                                      int idx,
1068                                      int nr)
1069 {
1070 }
1071 
1072 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1073                                    int idx,
1074                                    int nr)
1075 {
1076 }
1077 
1078 static inline void __mod_memcg_page_state(struct page *page,
1079                                           int idx,
1080                                           int nr)
1081 {
1082 }
1083 
1084 static inline void mod_memcg_page_state(struct page *page,
1085                                         int idx,
1086                                         int nr)
1087 {
1088 }
1089 
1090 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1091                                               enum node_stat_item idx)
1092 {
1093         return node_page_state(lruvec_pgdat(lruvec), idx);
1094 }
1095 
1096 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1097                                                     enum node_stat_item idx)
1098 {
1099         return node_page_state(lruvec_pgdat(lruvec), idx);
1100 }
1101 
1102 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1103                                       enum node_stat_item idx, int val)
1104 {
1105         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1106 }
1107 
1108 static inline void mod_lruvec_state(struct lruvec *lruvec,
1109                                     enum node_stat_item idx, int val)
1110 {
1111         mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1112 }
1113 
1114 static inline void __mod_lruvec_page_state(struct page *page,
1115                                            enum node_stat_item idx, int val)
1116 {
1117         __mod_node_page_state(page_pgdat(page), idx, val);
1118 }
1119 
1120 static inline void mod_lruvec_page_state(struct page *page,
1121                                          enum node_stat_item idx, int val)
1122 {
1123         mod_node_page_state(page_pgdat(page), idx, val);
1124 }
1125 
1126 static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1127                                            int val)
1128 {
1129         struct page *page = virt_to_head_page(p);
1130 
1131         __mod_node_page_state(page_pgdat(page), idx, val);
1132 }
1133 
1134 static inline void mod_memcg_obj_state(void *p, int idx, int val)
1135 {
1136 }
1137 
1138 static inline
1139 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1140                                             gfp_t gfp_mask,
1141                                             unsigned long *total_scanned)
1142 {
1143         return 0;
1144 }
1145 
1146 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1147 {
1148 }
1149 
1150 static inline void count_memcg_events(struct mem_cgroup *memcg,
1151                                       enum vm_event_item idx,
1152                                       unsigned long count)
1153 {
1154 }
1155 
1156 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1157                                         enum vm_event_item idx,
1158                                         unsigned long count)
1159 {
1160 }
1161 
1162 static inline void count_memcg_page_event(struct page *page,
1163                                           int idx)
1164 {
1165 }
1166 
1167 static inline
1168 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1169 {
1170 }
1171 #endif /* CONFIG_MEMCG */
1172 
1173 /* idx can be of type enum memcg_stat_item or node_stat_item */
1174 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1175                                      int idx)
1176 {
1177         __mod_memcg_state(memcg, idx, 1);
1178 }
1179 
1180 /* idx can be of type enum memcg_stat_item or node_stat_item */
1181 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1182                                      int idx)
1183 {
1184         __mod_memcg_state(memcg, idx, -1);
1185 }
1186 
1187 /* idx can be of type enum memcg_stat_item or node_stat_item */
1188 static inline void __inc_memcg_page_state(struct page *page,
1189                                           int idx)
1190 {
1191         __mod_memcg_page_state(page, idx, 1);
1192 }
1193 
1194 /* idx can be of type enum memcg_stat_item or node_stat_item */
1195 static inline void __dec_memcg_page_state(struct page *page,
1196                                           int idx)
1197 {
1198         __mod_memcg_page_state(page, idx, -1);
1199 }
1200 
1201 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1202                                       enum node_stat_item idx)
1203 {
1204         __mod_lruvec_state(lruvec, idx, 1);
1205 }
1206 
1207 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1208                                       enum node_stat_item idx)
1209 {
1210         __mod_lruvec_state(lruvec, idx, -1);
1211 }
1212 
1213 static inline void __inc_lruvec_page_state(struct page *page,
1214                                            enum node_stat_item idx)
1215 {
1216         __mod_lruvec_page_state(page, idx, 1);
1217 }
1218 
1219 static inline void __dec_lruvec_page_state(struct page *page,
1220                                            enum node_stat_item idx)
1221 {
1222         __mod_lruvec_page_state(page, idx, -1);
1223 }
1224 
1225 static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1226 {
1227         __mod_lruvec_slab_state(p, idx, 1);
1228 }
1229 
1230 static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1231 {
1232         __mod_lruvec_slab_state(p, idx, -1);
1233 }
1234 
1235 /* idx can be of type enum memcg_stat_item or node_stat_item */
1236 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1237                                    int idx)
1238 {
1239         mod_memcg_state(memcg, idx, 1);
1240 }
1241 
1242 /* idx can be of type enum memcg_stat_item or node_stat_item */
1243 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1244                                    int idx)
1245 {
1246         mod_memcg_state(memcg, idx, -1);
1247 }
1248 
1249 /* idx can be of type enum memcg_stat_item or node_stat_item */
1250 static inline void inc_memcg_page_state(struct page *page,
1251                                         int idx)
1252 {
1253         mod_memcg_page_state(page, idx, 1);
1254 }
1255 
1256 /* idx can be of type enum memcg_stat_item or node_stat_item */
1257 static inline void dec_memcg_page_state(struct page *page,
1258                                         int idx)
1259 {
1260         mod_memcg_page_state(page, idx, -1);
1261 }
1262 
1263 static inline void inc_lruvec_state(struct lruvec *lruvec,
1264                                     enum node_stat_item idx)
1265 {
1266         mod_lruvec_state(lruvec, idx, 1);
1267 }
1268 
1269 static inline void dec_lruvec_state(struct lruvec *lruvec,
1270                                     enum node_stat_item idx)
1271 {
1272         mod_lruvec_state(lruvec, idx, -1);
1273 }
1274 
1275 static inline void inc_lruvec_page_state(struct page *page,
1276                                          enum node_stat_item idx)
1277 {
1278         mod_lruvec_page_state(page, idx, 1);
1279 }
1280 
1281 static inline void dec_lruvec_page_state(struct page *page,
1282                                          enum node_stat_item idx)
1283 {
1284         mod_lruvec_page_state(page, idx, -1);
1285 }
1286 
1287 #ifdef CONFIG_CGROUP_WRITEBACK
1288 
1289 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1290 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1291                          unsigned long *pheadroom, unsigned long *pdirty,
1292                          unsigned long *pwriteback);
1293 
1294 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1295                                              struct bdi_writeback *wb);
1296 
1297 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1298                                                   struct bdi_writeback *wb)
1299 {
1300         if (mem_cgroup_disabled())
1301                 return;
1302 
1303         if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1304                 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1305 }
1306 
1307 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1308 
1309 #else   /* CONFIG_CGROUP_WRITEBACK */
1310 
1311 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1312 {
1313         return NULL;
1314 }
1315 
1316 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1317                                        unsigned long *pfilepages,
1318                                        unsigned long *pheadroom,
1319                                        unsigned long *pdirty,
1320                                        unsigned long *pwriteback)
1321 {
1322 }
1323 
1324 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1325                                                   struct bdi_writeback *wb)
1326 {
1327 }
1328 
1329 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1330 {
1331 }
1332 
1333 #endif  /* CONFIG_CGROUP_WRITEBACK */
1334 
1335 struct sock;
1336 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1337 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1338 #ifdef CONFIG_MEMCG
1339 extern struct static_key_false memcg_sockets_enabled_key;
1340 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1341 void mem_cgroup_sk_alloc(struct sock *sk);
1342 void mem_cgroup_sk_free(struct sock *sk);
1343 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1344 {
1345         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1346                 return true;
1347         do {
1348                 if (time_before(jiffies, memcg->socket_pressure))
1349                         return true;
1350         } while ((memcg = parent_mem_cgroup(memcg)));
1351         return false;
1352 }
1353 
1354 extern int memcg_expand_shrinker_maps(int new_id);
1355 
1356 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1357                                    int nid, int shrinker_id);
1358 #else
1359 #define mem_cgroup_sockets_enabled 0
1360 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1361 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1362 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1363 {
1364         return false;
1365 }
1366 
1367 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1368                                           int nid, int shrinker_id)
1369 {
1370 }
1371 #endif
1372 
1373 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1374 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1375 
1376 #ifdef CONFIG_MEMCG_KMEM
1377 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1378 void __memcg_kmem_uncharge(struct page *page, int order);
1379 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1380                               struct mem_cgroup *memcg);
1381 void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
1382                                  unsigned int nr_pages);
1383 
1384 extern struct static_key_false memcg_kmem_enabled_key;
1385 extern struct workqueue_struct *memcg_kmem_cache_wq;
1386 
1387 extern int memcg_nr_cache_ids;
1388 void memcg_get_cache_ids(void);
1389 void memcg_put_cache_ids(void);
1390 
1391 /*
1392  * Helper macro to loop through all memcg-specific caches. Callers must still
1393  * check if the cache is valid (it is either valid or NULL).
1394  * the slab_mutex must be held when looping through those caches
1395  */
1396 #define for_each_memcg_cache_index(_idx)        \
1397         for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1398 
1399 static inline bool memcg_kmem_enabled(void)
1400 {
1401         return static_branch_unlikely(&memcg_kmem_enabled_key);
1402 }
1403 
1404 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1405 {
1406         if (memcg_kmem_enabled())
1407                 return __memcg_kmem_charge(page, gfp, order);
1408         return 0;
1409 }
1410 
1411 static inline void memcg_kmem_uncharge(struct page *page, int order)
1412 {
1413         if (memcg_kmem_enabled())
1414                 __memcg_kmem_uncharge(page, order);
1415 }
1416 
1417 static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
1418                                           int order, struct mem_cgroup *memcg)
1419 {
1420         if (memcg_kmem_enabled())
1421                 return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
1422         return 0;
1423 }
1424 
1425 static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
1426                                              struct mem_cgroup *memcg)
1427 {
1428         if (memcg_kmem_enabled())
1429                 __memcg_kmem_uncharge_memcg(memcg, 1 << order);
1430 }
1431 
1432 /*
1433  * helper for accessing a memcg's index. It will be used as an index in the
1434  * child cache array in kmem_cache, and also to derive its name. This function
1435  * will return -1 when this is not a kmem-limited memcg.
1436  */
1437 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1438 {
1439         return memcg ? memcg->kmemcg_id : -1;
1440 }
1441 
1442 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1443 
1444 #else
1445 
1446 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1447 {
1448         return 0;
1449 }
1450 
1451 static inline void memcg_kmem_uncharge(struct page *page, int order)
1452 {
1453 }
1454 
1455 static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1456 {
1457         return 0;
1458 }
1459 
1460 static inline void __memcg_kmem_uncharge(struct page *page, int order)
1461 {
1462 }
1463 
1464 #define for_each_memcg_cache_index(_idx)        \
1465         for (; NULL; )
1466 
1467 static inline bool memcg_kmem_enabled(void)
1468 {
1469         return false;
1470 }
1471 
1472 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1473 {
1474         return -1;
1475 }
1476 
1477 static inline void memcg_get_cache_ids(void)
1478 {
1479 }
1480 
1481 static inline void memcg_put_cache_ids(void)
1482 {
1483 }
1484 
1485 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1486 {
1487        return NULL;
1488 }
1489 
1490 #endif /* CONFIG_MEMCG_KMEM */
1491 
1492 #endif /* _LINUX_MEMCONTROL_H */

/* [<][>][^][v][top][bottom][index][help] */