memcg 367 drivers/tty/sysrq.c .memcg = NULL, memcg 820 fs/buffer.c struct mem_cgroup *memcg; memcg 825 fs/buffer.c memcg = get_mem_cgroup_from_page(page); memcg 826 fs/buffer.c memalloc_use_memcg(memcg); memcg 846 fs/buffer.c mem_cgroup_put(memcg); memcg 306 fs/notify/fanotify/fanotify.c memalloc_use_memcg(group->memcg); memcg 824 fs/notify/fanotify/fanotify_user.c group->memcg = get_mem_cgroup_from_mm(current->mm); memcg 27 fs/notify/group.c mem_cgroup_put(group->memcg); memcg 98 fs/notify/inotify/inotify_fsnotify.c memalloc_use_memcg(group->memcg); memcg 645 fs/notify/inotify/inotify_user.c group->memcg = get_mem_cgroup_from_mm(current->mm); memcg 239 include/linux/backing-dev.h void wb_memcg_offline(struct mem_cgroup *memcg); memcg 456 include/linux/backing-dev.h static inline void wb_memcg_offline(struct mem_cgroup *memcg) memcg 189 include/linux/fsnotify_backend.h struct mem_cgroup *memcg; /* memcg to charge allocations */ memcg 117 include/linux/list_lru.h int nid, struct mem_cgroup *memcg); memcg 123 include/linux/list_lru.h return list_lru_count_one(lru, sc->nid, sc->memcg); memcg 140 include/linux/memcontrol.h struct mem_cgroup *memcg; /* Back pointer, we cannot */ memcg 349 include/linux/memcontrol.h static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) memcg 351 include/linux/memcontrol.h return (memcg == root_mem_cgroup); memcg 359 include/linux/memcontrol.h static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, memcg 366 include/linux/memcontrol.h return READ_ONCE(memcg->memory.emin); memcg 368 include/linux/memcontrol.h return max(READ_ONCE(memcg->memory.emin), memcg 369 include/linux/memcontrol.h READ_ONCE(memcg->memory.elow)); memcg 373 include/linux/memcontrol.h struct mem_cgroup *memcg); memcg 381 include/linux/memcontrol.h void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg 383 include/linux/memcontrol.h void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, memcg 391 include/linux/memcontrol.h mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) memcg 393 include/linux/memcontrol.h return memcg->nodeinfo[nid]; memcg 406 include/linux/memcontrol.h struct mem_cgroup *memcg) memcg 416 include/linux/memcontrol.h mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); memcg 442 include/linux/memcontrol.h static inline void mem_cgroup_put(struct mem_cgroup *memcg) memcg 444 include/linux/memcontrol.h if (memcg) memcg 445 include/linux/memcontrol.h css_put(&memcg->css); memcg 458 include/linux/memcontrol.h static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) memcg 463 include/linux/memcontrol.h return memcg->id.id; memcg 480 include/linux/memcontrol.h return mz->memcg; memcg 490 include/linux/memcontrol.h static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) memcg 492 include/linux/memcontrol.h if (!memcg->memory.parent) memcg 494 include/linux/memcontrol.h return mem_cgroup_from_counter(memcg->memory.parent, memory); memcg 497 include/linux/memcontrol.h static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, memcg 500 include/linux/memcontrol.h if (root == memcg) memcg 504 include/linux/memcontrol.h return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); memcg 508 include/linux/memcontrol.h struct mem_cgroup *memcg) memcg 516 include/linux/memcontrol.h match = mem_cgroup_is_descendant(task_memcg, memcg); memcg 524 include/linux/memcontrol.h static inline bool mem_cgroup_online(struct mem_cgroup *memcg) memcg 528 include/linux/memcontrol.h return !!(memcg->css.flags & CSS_ONLINE); memcg 534 include/linux/memcontrol.h int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); memcg 551 include/linux/memcontrol.h unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); memcg 553 include/linux/memcontrol.h unsigned long mem_cgroup_size(struct mem_cgroup *memcg); memcg 555 include/linux/memcontrol.h void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, memcg 558 include/linux/memcontrol.h void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); memcg 580 include/linux/memcontrol.h void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); memcg 587 include/linux/memcontrol.h void __unlock_page_memcg(struct mem_cgroup *memcg); memcg 594 include/linux/memcontrol.h static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) memcg 596 include/linux/memcontrol.h long x = atomic_long_read(&memcg->vmstats[idx]); memcg 608 include/linux/memcontrol.h static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, memcg 615 include/linux/memcontrol.h x += per_cpu(memcg->vmstats_local->stat[idx], cpu); memcg 623 include/linux/memcontrol.h void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); memcg 626 include/linux/memcontrol.h static inline void mod_memcg_state(struct mem_cgroup *memcg, memcg 632 include/linux/memcontrol.h __mod_memcg_state(memcg, idx, val); memcg 750 include/linux/memcontrol.h void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, memcg 753 include/linux/memcontrol.h static inline void count_memcg_events(struct mem_cgroup *memcg, memcg 760 include/linux/memcontrol.h __count_memcg_events(memcg, idx, count); memcg 774 include/linux/memcontrol.h struct mem_cgroup *memcg; memcg 780 include/linux/memcontrol.h memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); memcg 781 include/linux/memcontrol.h if (likely(memcg)) memcg 782 include/linux/memcontrol.h count_memcg_events(memcg, idx, 1); memcg 786 include/linux/memcontrol.h static inline void memcg_memory_event(struct mem_cgroup *memcg, memcg 789 include/linux/memcontrol.h atomic_long_inc(&memcg->memory_events_local[event]); memcg 790 include/linux/memcontrol.h cgroup_file_notify(&memcg->events_local_file); memcg 793 include/linux/memcontrol.h atomic_long_inc(&memcg->memory_events[event]); memcg 794 include/linux/memcontrol.h cgroup_file_notify(&memcg->events_file); memcg 800 include/linux/memcontrol.h } while ((memcg = parent_mem_cgroup(memcg)) && memcg 801 include/linux/memcontrol.h !mem_cgroup_is_root(memcg)); memcg 807 include/linux/memcontrol.h struct mem_cgroup *memcg; memcg 813 include/linux/memcontrol.h memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); memcg 814 include/linux/memcontrol.h if (likely(memcg)) memcg 815 include/linux/memcontrol.h memcg_memory_event(memcg, event); memcg 830 include/linux/memcontrol.h static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) memcg 840 include/linux/memcontrol.h static inline void memcg_memory_event(struct mem_cgroup *memcg, memcg 850 include/linux/memcontrol.h static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, memcg 857 include/linux/memcontrol.h struct mem_cgroup *root, struct mem_cgroup *memcg) memcg 882 include/linux/memcontrol.h struct mem_cgroup *memcg, memcg 888 include/linux/memcontrol.h struct mem_cgroup *memcg, memcg 906 include/linux/memcontrol.h struct mem_cgroup *memcg) memcg 918 include/linux/memcontrol.h struct mem_cgroup *memcg) memcg 933 include/linux/memcontrol.h static inline void mem_cgroup_put(struct mem_cgroup *memcg) memcg 950 include/linux/memcontrol.h static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, memcg 956 include/linux/memcontrol.h static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) memcg 978 include/linux/memcontrol.h static inline bool mem_cgroup_online(struct mem_cgroup *memcg) memcg 990 include/linux/memcontrol.h static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) memcg 995 include/linux/memcontrol.h static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) memcg 1001 include/linux/memcontrol.h mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) memcg 1006 include/linux/memcontrol.h mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) memcg 1015 include/linux/memcontrol.h static inline void __unlock_page_memcg(struct mem_cgroup *memcg) memcg 1051 include/linux/memcontrol.h static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) memcg 1055 include/linux/memcontrol.h static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) memcg 1060 include/linux/memcontrol.h static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, memcg 1066 include/linux/memcontrol.h static inline void __mod_memcg_state(struct mem_cgroup *memcg, memcg 1072 include/linux/memcontrol.h static inline void mod_memcg_state(struct mem_cgroup *memcg, memcg 1150 include/linux/memcontrol.h static inline void count_memcg_events(struct mem_cgroup *memcg, memcg 1156 include/linux/memcontrol.h static inline void __count_memcg_events(struct mem_cgroup *memcg, memcg 1174 include/linux/memcontrol.h static inline void __inc_memcg_state(struct mem_cgroup *memcg, memcg 1177 include/linux/memcontrol.h __mod_memcg_state(memcg, idx, 1); memcg 1181 include/linux/memcontrol.h static inline void __dec_memcg_state(struct mem_cgroup *memcg, memcg 1184 include/linux/memcontrol.h __mod_memcg_state(memcg, idx, -1); memcg 1236 include/linux/memcontrol.h static inline void inc_memcg_state(struct mem_cgroup *memcg, memcg 1239 include/linux/memcontrol.h mod_memcg_state(memcg, idx, 1); memcg 1243 include/linux/memcontrol.h static inline void dec_memcg_state(struct mem_cgroup *memcg, memcg 1246 include/linux/memcontrol.h mod_memcg_state(memcg, idx, -1); memcg 1336 include/linux/memcontrol.h bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); memcg 1337 include/linux/memcontrol.h void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); memcg 1343 include/linux/memcontrol.h static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) memcg 1345 include/linux/memcontrol.h if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) memcg 1348 include/linux/memcontrol.h if (time_before(jiffies, memcg->socket_pressure)) memcg 1350 include/linux/memcontrol.h } while ((memcg = parent_mem_cgroup(memcg))); memcg 1356 include/linux/memcontrol.h extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, memcg 1362 include/linux/memcontrol.h static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) memcg 1367 include/linux/memcontrol.h static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, memcg 1380 include/linux/memcontrol.h struct mem_cgroup *memcg); memcg 1381 include/linux/memcontrol.h void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, memcg 1418 include/linux/memcontrol.h int order, struct mem_cgroup *memcg) memcg 1421 include/linux/memcontrol.h return __memcg_kmem_charge_memcg(page, gfp, order, memcg); memcg 1426 include/linux/memcontrol.h struct mem_cgroup *memcg) memcg 1429 include/linux/memcontrol.h __memcg_kmem_uncharge_memcg(memcg, 1 << order); memcg 1437 include/linux/memcontrol.h static inline int memcg_cache_id(struct mem_cgroup *memcg) memcg 1439 include/linux/memcontrol.h return memcg ? memcg->kmemcg_id : -1; memcg 1472 include/linux/memcontrol.h static inline int memcg_cache_id(struct mem_cgroup *memcg) memcg 431 include/linux/mm.h struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ memcg 849 include/linux/mm.h vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, memcg 37 include/linux/oom.h struct mem_cgroup *memcg; memcg 196 include/linux/rmap.h struct mem_cgroup *memcg, unsigned long *vm_flags); memcg 285 include/linux/rmap.h struct mem_cgroup *memcg, memcg 321 include/linux/sched/mm.h static inline void memalloc_use_memcg(struct mem_cgroup *memcg) memcg 324 include/linux/sched/mm.h current->active_memcg = memcg; memcg 338 include/linux/sched/mm.h static inline void memalloc_use_memcg(struct mem_cgroup *memcg) memcg 33 include/linux/shrinker.h struct mem_cgroup *memcg; memcg 355 include/linux/swap.h extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, memcg 629 include/linux/swap.h static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) memcg 636 include/linux/swap.h if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) memcg 639 include/linux/swap.h return memcg->swappiness; memcg 649 include/linux/swap.h extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, memcg 652 include/linux/swap.h static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, memcg 662 include/linux/swap.h extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); memcg 680 include/linux/swap.h static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) memcg 33 include/linux/vmpressure.h extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, memcg 35 include/linux/vmpressure.h extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); memcg 39 include/linux/vmpressure.h extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); memcg 41 include/linux/vmpressure.h extern int vmpressure_register_event(struct mem_cgroup *memcg, memcg 44 include/linux/vmpressure.h extern void vmpressure_unregister_event(struct mem_cgroup *memcg, memcg 47 include/linux/vmpressure.h static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, memcg 49 include/linux/vmpressure.h static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, memcg 165 kernel/events/uprobes.c struct mem_cgroup *memcg; memcg 172 kernel/events/uprobes.c &memcg, false); memcg 184 kernel/events/uprobes.c mem_cgroup_cancel_charge(new_page, memcg, false); memcg 192 kernel/events/uprobes.c mem_cgroup_commit_charge(new_page, memcg, false, false); memcg 530 mm/backing-dev.c struct mem_cgroup *memcg; memcg 538 mm/backing-dev.c memcg = mem_cgroup_from_css(memcg_css); memcg 541 mm/backing-dev.c memcg_cgwb_list = &memcg->cgwb_list; memcg 744 mm/backing-dev.c void wb_memcg_offline(struct mem_cgroup *memcg) memcg 746 mm/backing-dev.c struct list_head *memcg_cgwb_list = &memcg->cgwb_list; memcg 857 mm/filemap.c struct mem_cgroup *memcg; memcg 867 mm/filemap.c gfp_mask, &memcg, false); memcg 903 mm/filemap.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 910 mm/filemap.c mem_cgroup_cancel_charge(page, memcg, false); memcg 494 mm/huge_memory.c struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; memcg 497 mm/huge_memory.c if (memcg) memcg 498 mm/huge_memory.c return &memcg->deferred_split_queue; memcg 579 mm/huge_memory.c struct mem_cgroup *memcg; memcg 586 mm/huge_memory.c if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { memcg 621 mm/huge_memory.c mem_cgroup_cancel_charge(page, memcg, true); memcg 632 mm/huge_memory.c mem_cgroup_commit_charge(page, memcg, false, true); memcg 640 mm/huge_memory.c count_memcg_events(memcg, THP_FAULT_ALLOC, 1); memcg 649 mm/huge_memory.c mem_cgroup_cancel_charge(page, memcg, true); memcg 1203 mm/huge_memory.c struct mem_cgroup *memcg; memcg 1223 mm/huge_memory.c GFP_KERNEL, &memcg, false))) { memcg 1227 mm/huge_memory.c memcg = (void *)page_private(pages[i]); memcg 1229 mm/huge_memory.c mem_cgroup_cancel_charge(pages[i], memcg, memcg 1237 mm/huge_memory.c set_page_private(pages[i], (unsigned long)memcg); memcg 1273 mm/huge_memory.c memcg = (void *)page_private(pages[i]); memcg 1276 mm/huge_memory.c mem_cgroup_commit_charge(pages[i], memcg, false, false); memcg 1306 mm/huge_memory.c memcg = (void *)page_private(pages[i]); memcg 1308 mm/huge_memory.c mem_cgroup_cancel_charge(pages[i], memcg, false); memcg 1319 mm/huge_memory.c struct mem_cgroup *memcg; memcg 1391 mm/huge_memory.c huge_gfp, &memcg, true))) { memcg 1402 mm/huge_memory.c count_memcg_events(memcg, THP_FAULT_ALLOC, 1); memcg 1420 mm/huge_memory.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 1429 mm/huge_memory.c mem_cgroup_commit_charge(new_page, memcg, false, true); memcg 2855 mm/huge_memory.c struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; memcg 2880 mm/huge_memory.c if (memcg) memcg 2881 mm/huge_memory.c memcg_set_shrinker_bit(memcg, page_to_nid(page), memcg 2895 mm/huge_memory.c if (sc->memcg) memcg 2896 mm/huge_memory.c ds_queue = &sc->memcg->deferred_split_queue; memcg 2912 mm/huge_memory.c if (sc->memcg) memcg 2913 mm/huge_memory.c ds_queue = &sc->memcg->deferred_split_queue; memcg 957 mm/khugepaged.c struct mem_cgroup *memcg; memcg 980 mm/khugepaged.c if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { memcg 988 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 996 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 1007 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 1093 mm/khugepaged.c mem_cgroup_commit_charge(new_page, memcg, false, true); memcg 1094 mm/khugepaged.c count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1); memcg 1111 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 1499 mm/khugepaged.c struct mem_cgroup *memcg; memcg 1518 mm/khugepaged.c if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { memcg 1531 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 1750 mm/khugepaged.c mem_cgroup_commit_charge(new_page, memcg, false, true); memcg 1758 mm/khugepaged.c count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1); memcg 1805 mm/khugepaged.c mem_cgroup_cancel_charge(new_page, memcg, true); memcg 75 mm/list_lru.c struct mem_cgroup *memcg = NULL; memcg 80 mm/list_lru.c memcg = mem_cgroup_from_kmem(ptr); memcg 81 mm/list_lru.c if (!memcg) memcg 84 mm/list_lru.c l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); memcg 87 mm/list_lru.c *memcg_ptr = memcg; memcg 129 mm/list_lru.c struct mem_cgroup *memcg; memcg 134 mm/list_lru.c l = list_lru_from_kmem(nlru, item, &memcg); memcg 138 mm/list_lru.c memcg_set_shrinker_bit(memcg, nid, memcg 185 mm/list_lru.c int nid, struct mem_cgroup *memcg) memcg 192 mm/list_lru.c l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); memcg 268 mm/list_lru.c list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, memcg 276 mm/list_lru.c ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, memcg 284 mm/list_lru.c list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, memcg 292 mm/list_lru.c ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, memcg 143 mm/memcontrol.c struct mem_cgroup *memcg; memcg 157 mm/memcontrol.c int (*register_event)(struct mem_cgroup *memcg, memcg 164 mm/memcontrol.c void (*unregister_event)(struct mem_cgroup *memcg, memcg 176 mm/memcontrol.c static void mem_cgroup_threshold(struct mem_cgroup *memcg); memcg 177 mm/memcontrol.c static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); memcg 256 mm/memcontrol.c struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) memcg 258 mm/memcontrol.c if (!memcg) memcg 259 mm/memcontrol.c memcg = root_mem_cgroup; memcg 260 mm/memcontrol.c return &memcg->vmpressure; memcg 331 mm/memcontrol.c static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, memcg 341 mm/memcontrol.c mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); memcg 354 mm/memcontrol.c rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); memcg 361 mm/memcontrol.c static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) memcg 367 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 371 mm/memcontrol.c pn = mem_cgroup_nodeinfo(memcg, nid); memcg 379 mm/memcontrol.c static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) memcg 384 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 392 mm/memcontrol.c memcg_free_shrinker_maps(memcg); memcg 396 mm/memcontrol.c rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); memcg 406 mm/memcontrol.c struct mem_cgroup *memcg; memcg 417 mm/memcontrol.c for_each_mem_cgroup(memcg) { memcg 418 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 420 mm/memcontrol.c ret = memcg_expand_one_shrinker_map(memcg, size, old_size); memcg 422 mm/memcontrol.c mem_cgroup_iter_break(NULL, memcg); memcg 433 mm/memcontrol.c void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) memcg 435 mm/memcontrol.c if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { memcg 439 mm/memcontrol.c map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); memcg 460 mm/memcontrol.c struct mem_cgroup *memcg; memcg 462 mm/memcontrol.c memcg = page->mem_cgroup; memcg 464 mm/memcontrol.c if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) memcg 465 mm/memcontrol.c memcg = root_mem_cgroup; memcg 467 mm/memcontrol.c return &memcg->css; memcg 485 mm/memcontrol.c struct mem_cgroup *memcg; memcg 490 mm/memcontrol.c memcg = memcg_from_slab_page(page); memcg 492 mm/memcontrol.c memcg = READ_ONCE(page->mem_cgroup); memcg 493 mm/memcontrol.c while (memcg && !(memcg->css.flags & CSS_ONLINE)) memcg 494 mm/memcontrol.c memcg = parent_mem_cgroup(memcg); memcg 495 mm/memcontrol.c if (memcg) memcg 496 mm/memcontrol.c ino = cgroup_ino(memcg->css.cgroup); memcg 502 mm/memcontrol.c mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) memcg 506 mm/memcontrol.c return memcg->nodeinfo[nid]; memcg 586 mm/memcontrol.c static unsigned long soft_limit_excess(struct mem_cgroup *memcg) memcg 588 mm/memcontrol.c unsigned long nr_pages = page_counter_read(&memcg->memory); memcg 589 mm/memcontrol.c unsigned long soft_limit = READ_ONCE(memcg->soft_limit); memcg 598 mm/memcontrol.c static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) memcg 611 mm/memcontrol.c for (; memcg; memcg = parent_mem_cgroup(memcg)) { memcg 612 mm/memcontrol.c mz = mem_cgroup_page_nodeinfo(memcg, page); memcg 613 mm/memcontrol.c excess = soft_limit_excess(memcg); memcg 635 mm/memcontrol.c static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) memcg 642 mm/memcontrol.c mz = mem_cgroup_nodeinfo(memcg, nid); memcg 667 mm/memcontrol.c if (!soft_limit_excess(mz->memcg) || memcg 668 mm/memcontrol.c !css_tryget_online(&mz->memcg->css)) memcg 691 mm/memcontrol.c void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) memcg 698 mm/memcontrol.c x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); memcg 706 mm/memcontrol.c __this_cpu_add(memcg->vmstats_local->stat[idx], x); memcg 707 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 711 mm/memcontrol.c __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); memcg 719 mm/memcontrol.c parent = parent_mem_cgroup(pn->memcg); memcg 740 mm/memcontrol.c struct mem_cgroup *memcg; memcg 750 mm/memcontrol.c memcg = pn->memcg; memcg 753 mm/memcontrol.c __mod_memcg_state(memcg, idx, val); memcg 773 mm/memcontrol.c struct mem_cgroup *memcg; memcg 777 mm/memcontrol.c memcg = memcg_from_slab_page(page); memcg 780 mm/memcontrol.c if (!memcg || memcg == root_mem_cgroup) { memcg 783 mm/memcontrol.c lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 791 mm/memcontrol.c struct mem_cgroup *memcg; memcg 794 mm/memcontrol.c memcg = mem_cgroup_from_obj(p); memcg 795 mm/memcontrol.c if (memcg) memcg 796 mm/memcontrol.c mod_memcg_state(memcg, idx, val); memcg 806 mm/memcontrol.c void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, memcg 814 mm/memcontrol.c x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); memcg 822 mm/memcontrol.c __this_cpu_add(memcg->vmstats_local->events[idx], x); memcg 823 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 827 mm/memcontrol.c __this_cpu_write(memcg->vmstats_percpu->events[idx], x); memcg 830 mm/memcontrol.c static unsigned long memcg_events(struct mem_cgroup *memcg, int event) memcg 832 mm/memcontrol.c return atomic_long_read(&memcg->vmevents[event]); memcg 835 mm/memcontrol.c static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) memcg 841 mm/memcontrol.c x += per_cpu(memcg->vmstats_local->events[event], cpu); memcg 845 mm/memcontrol.c static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, memcg 854 mm/memcontrol.c __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); memcg 856 mm/memcontrol.c __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); memcg 858 mm/memcontrol.c __mod_memcg_state(memcg, NR_SHMEM, nr_pages); memcg 863 mm/memcontrol.c __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); memcg 868 mm/memcontrol.c __count_memcg_events(memcg, PGPGIN, 1); memcg 870 mm/memcontrol.c __count_memcg_events(memcg, PGPGOUT, 1); memcg 874 mm/memcontrol.c __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); memcg 877 mm/memcontrol.c static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, memcg 882 mm/memcontrol.c val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); memcg 883 mm/memcontrol.c next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); memcg 899 mm/memcontrol.c __this_cpu_write(memcg->vmstats_percpu->targets[target], next); memcg 909 mm/memcontrol.c static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) memcg 912 mm/memcontrol.c if (unlikely(mem_cgroup_event_ratelimit(memcg, memcg 917 mm/memcontrol.c do_softlimit = mem_cgroup_event_ratelimit(memcg, memcg 920 mm/memcontrol.c do_numainfo = mem_cgroup_event_ratelimit(memcg, memcg 923 mm/memcontrol.c mem_cgroup_threshold(memcg); memcg 925 mm/memcontrol.c mem_cgroup_update_tree(memcg, page); memcg 928 mm/memcontrol.c atomic_inc(&memcg->numainfo_events); memcg 957 mm/memcontrol.c struct mem_cgroup *memcg; memcg 970 mm/memcontrol.c memcg = root_mem_cgroup; memcg 972 mm/memcontrol.c memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); memcg 973 mm/memcontrol.c if (unlikely(!memcg)) memcg 974 mm/memcontrol.c memcg = root_mem_cgroup; memcg 976 mm/memcontrol.c } while (!css_tryget(&memcg->css)); memcg 978 mm/memcontrol.c return memcg; memcg 991 mm/memcontrol.c struct mem_cgroup *memcg = page->mem_cgroup; memcg 997 mm/memcontrol.c if (!memcg || !css_tryget_online(&memcg->css)) memcg 998 mm/memcontrol.c memcg = root_mem_cgroup; memcg 1000 mm/memcontrol.c return memcg; memcg 1010 mm/memcontrol.c struct mem_cgroup *memcg = root_mem_cgroup; memcg 1014 mm/memcontrol.c memcg = current->active_memcg; memcg 1016 mm/memcontrol.c return memcg; memcg 1044 mm/memcontrol.c struct mem_cgroup *memcg = NULL; memcg 1111 mm/memcontrol.c memcg = mem_cgroup_from_css(css); memcg 1119 mm/memcontrol.c memcg = NULL; memcg 1128 mm/memcontrol.c (void)cmpxchg(&iter->position, pos, memcg); memcg 1133 mm/memcontrol.c if (!memcg) memcg 1145 mm/memcontrol.c return memcg; memcg 1182 mm/memcontrol.c struct mem_cgroup *memcg = dead_memcg; memcg 1186 mm/memcontrol.c __invalidate_reclaim_iterators(memcg, dead_memcg); memcg 1187 mm/memcontrol.c last = memcg; memcg 1188 mm/memcontrol.c } while ((memcg = parent_mem_cgroup(memcg))); memcg 1214 mm/memcontrol.c int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, memcg 1220 mm/memcontrol.c BUG_ON(memcg == root_mem_cgroup); memcg 1222 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) { memcg 1231 mm/memcontrol.c mem_cgroup_iter_break(memcg, iter); memcg 1250 mm/memcontrol.c struct mem_cgroup *memcg; memcg 1258 mm/memcontrol.c memcg = page->mem_cgroup; memcg 1263 mm/memcontrol.c if (!memcg) memcg 1264 mm/memcontrol.c memcg = root_mem_cgroup; memcg 1266 mm/memcontrol.c mz = mem_cgroup_page_nodeinfo(memcg, page); memcg 1325 mm/memcontrol.c static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) memcg 1331 mm/memcontrol.c count = page_counter_read(&memcg->memory); memcg 1332 mm/memcontrol.c limit = READ_ONCE(memcg->memory.max); memcg 1337 mm/memcontrol.c count = page_counter_read(&memcg->memsw); memcg 1338 mm/memcontrol.c limit = READ_ONCE(memcg->memsw.max); memcg 1355 mm/memcontrol.c static bool mem_cgroup_under_move(struct mem_cgroup *memcg) memcg 1370 mm/memcontrol.c ret = mem_cgroup_is_descendant(from, memcg) || memcg 1371 mm/memcontrol.c mem_cgroup_is_descendant(to, memcg); memcg 1377 mm/memcontrol.c static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) memcg 1380 mm/memcontrol.c if (mem_cgroup_under_move(memcg)) { memcg 1393 mm/memcontrol.c static char *memory_stat_format(struct mem_cgroup *memcg) memcg 1414 mm/memcontrol.c (u64)memcg_page_state(memcg, MEMCG_RSS) * memcg 1417 mm/memcontrol.c (u64)memcg_page_state(memcg, MEMCG_CACHE) * memcg 1420 mm/memcontrol.c (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * memcg 1423 mm/memcontrol.c (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + memcg 1424 mm/memcontrol.c memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * memcg 1427 mm/memcontrol.c (u64)memcg_page_state(memcg, MEMCG_SOCK) * memcg 1431 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_SHMEM) * memcg 1434 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * memcg 1437 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * memcg 1440 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_WRITEBACK) * memcg 1450 mm/memcontrol.c (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * memcg 1455 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * memcg 1459 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * memcg 1462 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * memcg 1467 mm/memcontrol.c seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT)); memcg 1468 mm/memcontrol.c seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT)); memcg 1471 mm/memcontrol.c memcg_page_state(memcg, WORKINGSET_REFAULT)); memcg 1473 mm/memcontrol.c memcg_page_state(memcg, WORKINGSET_ACTIVATE)); memcg 1475 mm/memcontrol.c memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); memcg 1477 mm/memcontrol.c seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL)); memcg 1479 mm/memcontrol.c memcg_events(memcg, PGSCAN_KSWAPD) + memcg 1480 mm/memcontrol.c memcg_events(memcg, PGSCAN_DIRECT)); memcg 1482 mm/memcontrol.c memcg_events(memcg, PGSTEAL_KSWAPD) + memcg 1483 mm/memcontrol.c memcg_events(memcg, PGSTEAL_DIRECT)); memcg 1484 mm/memcontrol.c seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE)); memcg 1485 mm/memcontrol.c seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE)); memcg 1486 mm/memcontrol.c seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE)); memcg 1487 mm/memcontrol.c seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED)); memcg 1491 mm/memcontrol.c memcg_events(memcg, THP_FAULT_ALLOC)); memcg 1493 mm/memcontrol.c memcg_events(memcg, THP_COLLAPSE_ALLOC)); memcg 1512 mm/memcontrol.c void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) memcg 1516 mm/memcontrol.c if (memcg) { memcg 1518 mm/memcontrol.c pr_cont_cgroup_path(memcg->css.cgroup); memcg 1533 mm/memcontrol.c void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) memcg 1538 mm/memcontrol.c K((u64)page_counter_read(&memcg->memory)), memcg 1539 mm/memcontrol.c K((u64)memcg->memory.max), memcg->memory.failcnt); memcg 1542 mm/memcontrol.c K((u64)page_counter_read(&memcg->swap)), memcg 1543 mm/memcontrol.c K((u64)memcg->swap.max), memcg->swap.failcnt); memcg 1546 mm/memcontrol.c K((u64)page_counter_read(&memcg->memsw)), memcg 1547 mm/memcontrol.c K((u64)memcg->memsw.max), memcg->memsw.failcnt); memcg 1549 mm/memcontrol.c K((u64)page_counter_read(&memcg->kmem)), memcg 1550 mm/memcontrol.c K((u64)memcg->kmem.max), memcg->kmem.failcnt); memcg 1554 mm/memcontrol.c pr_cont_cgroup_path(memcg->css.cgroup); memcg 1556 mm/memcontrol.c buf = memory_stat_format(memcg); memcg 1566 mm/memcontrol.c unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) memcg 1570 mm/memcontrol.c max = memcg->memory.max; memcg 1571 mm/memcontrol.c if (mem_cgroup_swappiness(memcg)) { memcg 1575 mm/memcontrol.c memsw_max = memcg->memsw.max; memcg 1576 mm/memcontrol.c swap_max = memcg->swap.max; memcg 1583 mm/memcontrol.c unsigned long mem_cgroup_size(struct mem_cgroup *memcg) memcg 1585 mm/memcontrol.c return page_counter_read(&memcg->memory); memcg 1588 mm/memcontrol.c static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, memcg 1594 mm/memcontrol.c .memcg = memcg, memcg 1623 mm/memcontrol.c static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, memcg 1626 mm/memcontrol.c struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); memcg 1646 mm/memcontrol.c static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) memcg 1653 mm/memcontrol.c if (!atomic_read(&memcg->numainfo_events)) memcg 1655 mm/memcontrol.c if (atomic_inc_return(&memcg->numainfo_updating) > 1) memcg 1659 mm/memcontrol.c memcg->scan_nodes = node_states[N_MEMORY]; memcg 1663 mm/memcontrol.c if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) memcg 1664 mm/memcontrol.c node_clear(nid, memcg->scan_nodes); memcg 1667 mm/memcontrol.c atomic_set(&memcg->numainfo_events, 0); memcg 1668 mm/memcontrol.c atomic_set(&memcg->numainfo_updating, 0); memcg 1683 mm/memcontrol.c int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) memcg 1687 mm/memcontrol.c mem_cgroup_may_update_nodemask(memcg); memcg 1688 mm/memcontrol.c node = memcg->last_scanned_node; memcg 1690 mm/memcontrol.c node = next_node_in(node, memcg->scan_nodes); memcg 1699 mm/memcontrol.c memcg->last_scanned_node = node; memcg 1703 mm/memcontrol.c int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) memcg 1772 mm/memcontrol.c static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) memcg 1778 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) { memcg 1785 mm/memcontrol.c mem_cgroup_iter_break(memcg, iter); memcg 1796 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) { memcg 1798 mm/memcontrol.c mem_cgroup_iter_break(memcg, iter); memcg 1811 mm/memcontrol.c static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) memcg 1817 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 1822 mm/memcontrol.c static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) memcg 1827 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 1832 mm/memcontrol.c static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) memcg 1841 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 1850 mm/memcontrol.c struct mem_cgroup *memcg; memcg 1862 mm/memcontrol.c oom_wait_memcg = oom_wait_info->memcg; memcg 1870 mm/memcontrol.c static void memcg_oom_recover(struct mem_cgroup *memcg) memcg 1880 mm/memcontrol.c if (memcg && memcg->under_oom) memcg 1881 mm/memcontrol.c __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); memcg 1891 mm/memcontrol.c static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) memcg 1899 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_OOM); memcg 1919 mm/memcontrol.c if (memcg->oom_kill_disable) { memcg 1922 mm/memcontrol.c css_get(&memcg->css); memcg 1923 mm/memcontrol.c current->memcg_in_oom = memcg; memcg 1930 mm/memcontrol.c mem_cgroup_mark_under_oom(memcg); memcg 1932 mm/memcontrol.c locked = mem_cgroup_oom_trylock(memcg); memcg 1935 mm/memcontrol.c mem_cgroup_oom_notify(memcg); memcg 1937 mm/memcontrol.c mem_cgroup_unmark_under_oom(memcg); memcg 1938 mm/memcontrol.c if (mem_cgroup_out_of_memory(memcg, mask, order)) memcg 1944 mm/memcontrol.c mem_cgroup_oom_unlock(memcg); memcg 1968 mm/memcontrol.c struct mem_cgroup *memcg = current->memcg_in_oom; memcg 1973 mm/memcontrol.c if (!memcg) memcg 1979 mm/memcontrol.c owait.memcg = memcg; memcg 1986 mm/memcontrol.c mem_cgroup_mark_under_oom(memcg); memcg 1988 mm/memcontrol.c locked = mem_cgroup_oom_trylock(memcg); memcg 1991 mm/memcontrol.c mem_cgroup_oom_notify(memcg); memcg 1993 mm/memcontrol.c if (locked && !memcg->oom_kill_disable) { memcg 1994 mm/memcontrol.c mem_cgroup_unmark_under_oom(memcg); memcg 1996 mm/memcontrol.c mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, memcg 2000 mm/memcontrol.c mem_cgroup_unmark_under_oom(memcg); memcg 2005 mm/memcontrol.c mem_cgroup_oom_unlock(memcg); memcg 2011 mm/memcontrol.c memcg_oom_recover(memcg); memcg 2015 mm/memcontrol.c css_put(&memcg->css); memcg 2033 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2043 mm/memcontrol.c memcg = mem_cgroup_from_task(victim); memcg 2044 mm/memcontrol.c if (memcg == root_mem_cgroup) memcg 2052 mm/memcontrol.c for (; memcg; memcg = parent_mem_cgroup(memcg)) { memcg 2053 mm/memcontrol.c if (memcg->oom_group) memcg 2054 mm/memcontrol.c oom_group = memcg; memcg 2056 mm/memcontrol.c if (memcg == oom_domain) memcg 2068 mm/memcontrol.c void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) memcg 2071 mm/memcontrol.c pr_cont_cgroup_path(memcg->css.cgroup); memcg 2088 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2107 mm/memcontrol.c memcg = page->mem_cgroup; memcg 2108 mm/memcontrol.c if (unlikely(!memcg)) memcg 2111 mm/memcontrol.c if (atomic_read(&memcg->moving_account) <= 0) memcg 2112 mm/memcontrol.c return memcg; memcg 2114 mm/memcontrol.c spin_lock_irqsave(&memcg->move_lock, flags); memcg 2115 mm/memcontrol.c if (memcg != page->mem_cgroup) { memcg 2116 mm/memcontrol.c spin_unlock_irqrestore(&memcg->move_lock, flags); memcg 2125 mm/memcontrol.c memcg->move_lock_task = current; memcg 2126 mm/memcontrol.c memcg->move_lock_flags = flags; memcg 2128 mm/memcontrol.c return memcg; memcg 2138 mm/memcontrol.c void __unlock_page_memcg(struct mem_cgroup *memcg) memcg 2140 mm/memcontrol.c if (memcg && memcg->move_lock_task == current) { memcg 2141 mm/memcontrol.c unsigned long flags = memcg->move_lock_flags; memcg 2143 mm/memcontrol.c memcg->move_lock_task = NULL; memcg 2144 mm/memcontrol.c memcg->move_lock_flags = 0; memcg 2146 mm/memcontrol.c spin_unlock_irqrestore(&memcg->move_lock, flags); memcg 2183 mm/memcontrol.c static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) memcg 2195 mm/memcontrol.c if (memcg == stock->cached && stock->nr_pages >= nr_pages) { memcg 2244 mm/memcontrol.c static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) memcg 2252 mm/memcontrol.c if (stock->cached != memcg) { /* reset if necessary */ memcg 2254 mm/memcontrol.c stock->cached = memcg; memcg 2284 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2288 mm/memcontrol.c memcg = stock->cached; memcg 2289 mm/memcontrol.c if (memcg && stock->nr_pages && memcg 2290 mm/memcontrol.c mem_cgroup_is_descendant(memcg, root_memcg)) memcg 2309 mm/memcontrol.c struct mem_cgroup *memcg, *mi; memcg 2314 mm/memcontrol.c for_each_mem_cgroup(memcg) { memcg 2321 mm/memcontrol.c x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); memcg 2323 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 2324 mm/memcontrol.c atomic_long_add(x, &memcg->vmstats[i]); memcg 2332 mm/memcontrol.c pn = mem_cgroup_nodeinfo(memcg, nid); memcg 2344 mm/memcontrol.c x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); memcg 2346 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 2347 mm/memcontrol.c atomic_long_add(x, &memcg->vmevents[i]); memcg 2354 mm/memcontrol.c static void reclaim_high(struct mem_cgroup *memcg, memcg 2359 mm/memcontrol.c if (page_counter_read(&memcg->memory) <= memcg->high) memcg 2361 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_HIGH); memcg 2362 mm/memcontrol.c try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); memcg 2363 mm/memcontrol.c } while ((memcg = parent_mem_cgroup(memcg))); memcg 2368 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2370 mm/memcontrol.c memcg = container_of(work, struct mem_cgroup, high_work); memcg 2371 mm/memcontrol.c reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); memcg 2431 mm/memcontrol.c static unsigned long calculate_high_delay(struct mem_cgroup *memcg, memcg 2441 mm/memcontrol.c usage = page_counter_read(&memcg->memory); memcg 2442 mm/memcontrol.c high = READ_ONCE(memcg->high); memcg 2459 mm/memcontrol.c } while ((memcg = parent_mem_cgroup(memcg)) && memcg 2460 mm/memcontrol.c !mem_cgroup_is_root(memcg)); memcg 2504 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2509 mm/memcontrol.c memcg = get_mem_cgroup_from_mm(current->mm); memcg 2510 mm/memcontrol.c reclaim_high(memcg, nr_pages, GFP_KERNEL); memcg 2517 mm/memcontrol.c penalty_jiffies = calculate_high_delay(memcg, nr_pages); memcg 2538 mm/memcontrol.c css_put(&memcg->css); memcg 2541 mm/memcontrol.c static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, memcg 2553 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 2556 mm/memcontrol.c if (consume_stock(memcg, nr_pages)) memcg 2560 mm/memcontrol.c page_counter_try_charge(&memcg->memsw, batch, &counter)) { memcg 2561 mm/memcontrol.c if (page_counter_try_charge(&memcg->memory, batch, &counter)) memcg 2564 mm/memcontrol.c page_counter_uncharge(&memcg->memsw, batch); memcg 2680 mm/memcontrol.c page_counter_charge(&memcg->memory, nr_pages); memcg 2682 mm/memcontrol.c page_counter_charge(&memcg->memsw, nr_pages); memcg 2683 mm/memcontrol.c css_get_many(&memcg->css, nr_pages); memcg 2688 mm/memcontrol.c css_get_many(&memcg->css, batch); memcg 2690 mm/memcontrol.c refill_stock(memcg, batch - nr_pages); memcg 2702 mm/memcontrol.c if (page_counter_read(&memcg->memory) > memcg->high) { memcg 2705 mm/memcontrol.c schedule_work(&memcg->high_work); memcg 2712 mm/memcontrol.c } while ((memcg = parent_mem_cgroup(memcg))); memcg 2717 mm/memcontrol.c static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) memcg 2719 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 2722 mm/memcontrol.c page_counter_uncharge(&memcg->memory, nr_pages); memcg 2724 mm/memcontrol.c page_counter_uncharge(&memcg->memsw, nr_pages); memcg 2726 mm/memcontrol.c css_put_many(&memcg->css, nr_pages); memcg 2760 mm/memcontrol.c static void commit_charge(struct page *page, struct mem_cgroup *memcg, memcg 2788 mm/memcontrol.c page->mem_cgroup = memcg; memcg 2868 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2877 mm/memcontrol.c struct mem_cgroup *memcg = cw->memcg; memcg 2880 mm/memcontrol.c memcg_create_kmem_cache(memcg, cachep); memcg 2882 mm/memcontrol.c css_put(&memcg->css); memcg 2889 mm/memcontrol.c static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, memcg 2894 mm/memcontrol.c if (!css_tryget_online(&memcg->css)) memcg 2901 mm/memcontrol.c cw->memcg = memcg; memcg 2933 mm/memcontrol.c struct mem_cgroup *memcg; memcg 2946 mm/memcontrol.c memcg = current->active_memcg; memcg 2948 mm/memcontrol.c memcg = mem_cgroup_from_task(current); memcg 2950 mm/memcontrol.c if (!memcg || memcg == root_mem_cgroup) memcg 2953 mm/memcontrol.c kmemcg_id = READ_ONCE(memcg->kmemcg_id); memcg 2986 mm/memcontrol.c memcg_schedule_kmem_cache_create(memcg, cachep); memcg 3014 mm/memcontrol.c struct mem_cgroup *memcg) memcg 3020 mm/memcontrol.c ret = try_charge(memcg, gfp, nr_pages); memcg 3025 mm/memcontrol.c !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { memcg 3033 mm/memcontrol.c page_counter_charge(&memcg->kmem, nr_pages); memcg 3036 mm/memcontrol.c cancel_charge(memcg, nr_pages); memcg 3052 mm/memcontrol.c struct mem_cgroup *memcg; memcg 3058 mm/memcontrol.c memcg = get_mem_cgroup_from_current(); memcg 3059 mm/memcontrol.c if (!mem_cgroup_is_root(memcg)) { memcg 3060 mm/memcontrol.c ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); memcg 3062 mm/memcontrol.c page->mem_cgroup = memcg; memcg 3066 mm/memcontrol.c css_put(&memcg->css); memcg 3075 mm/memcontrol.c void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, memcg 3079 mm/memcontrol.c page_counter_uncharge(&memcg->kmem, nr_pages); memcg 3081 mm/memcontrol.c page_counter_uncharge(&memcg->memory, nr_pages); memcg 3083 mm/memcontrol.c page_counter_uncharge(&memcg->memsw, nr_pages); memcg 3092 mm/memcontrol.c struct mem_cgroup *memcg = page->mem_cgroup; memcg 3095 mm/memcontrol.c if (!memcg) memcg 3098 mm/memcontrol.c VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); memcg 3099 mm/memcontrol.c __memcg_kmem_uncharge_memcg(memcg, nr_pages); memcg 3106 mm/memcontrol.c css_put_many(&memcg->css, nr_pages); memcg 3170 mm/memcontrol.c static int mem_cgroup_resize_max(struct mem_cgroup *memcg, memcg 3177 mm/memcontrol.c struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; memcg 3190 mm/memcontrol.c limits_invariant = memsw ? max >= memcg->memory.max : memcg 3191 mm/memcontrol.c max <= memcg->memsw.max; memcg 3206 mm/memcontrol.c drain_all_stock(memcg); memcg 3211 mm/memcontrol.c if (!try_to_free_mem_cgroup_pages(memcg, 1, memcg 3219 mm/memcontrol.c memcg_oom_recover(memcg); memcg 3263 mm/memcontrol.c reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, memcg 3278 mm/memcontrol.c excess = soft_limit_excess(mz->memcg); memcg 3290 mm/memcontrol.c css_put(&mz->memcg->css); memcg 3303 mm/memcontrol.c css_put(&next_mz->memcg->css); memcg 3313 mm/memcontrol.c static inline bool memcg_has_children(struct mem_cgroup *memcg) memcg 3318 mm/memcontrol.c ret = css_next_child(NULL, &memcg->css); memcg 3328 mm/memcontrol.c static int mem_cgroup_force_empty(struct mem_cgroup *memcg) memcg 3335 mm/memcontrol.c drain_all_stock(memcg); memcg 3338 mm/memcontrol.c while (nr_retries && page_counter_read(&memcg->memory)) { memcg 3344 mm/memcontrol.c progress = try_to_free_mem_cgroup_pages(memcg, 1, memcg 3361 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 3363 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) memcg 3365 mm/memcontrol.c return mem_cgroup_force_empty(memcg) ?: nbytes; memcg 3378 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 3379 mm/memcontrol.c struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); memcg 3381 mm/memcontrol.c if (memcg->use_hierarchy == val) memcg 3394 mm/memcontrol.c if (!memcg_has_children(memcg)) memcg 3395 mm/memcontrol.c memcg->use_hierarchy = val; memcg 3404 mm/memcontrol.c static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) memcg 3408 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) { memcg 3409 mm/memcontrol.c val = memcg_page_state(memcg, MEMCG_CACHE) + memcg 3410 mm/memcontrol.c memcg_page_state(memcg, MEMCG_RSS); memcg 3412 mm/memcontrol.c val += memcg_page_state(memcg, MEMCG_SWAP); memcg 3415 mm/memcontrol.c val = page_counter_read(&memcg->memory); memcg 3417 mm/memcontrol.c val = page_counter_read(&memcg->memsw); memcg 3433 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 3438 mm/memcontrol.c counter = &memcg->memory; memcg 3441 mm/memcontrol.c counter = &memcg->memsw; memcg 3444 mm/memcontrol.c counter = &memcg->kmem; memcg 3447 mm/memcontrol.c counter = &memcg->tcpmem; memcg 3455 mm/memcontrol.c if (counter == &memcg->memory) memcg 3456 mm/memcontrol.c return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; memcg 3457 mm/memcontrol.c if (counter == &memcg->memsw) memcg 3458 mm/memcontrol.c return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; memcg 3467 mm/memcontrol.c return (u64)memcg->soft_limit * PAGE_SIZE; memcg 3473 mm/memcontrol.c static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) memcg 3481 mm/memcontrol.c stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); memcg 3483 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 3488 mm/memcontrol.c struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; memcg 3505 mm/memcontrol.c static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) memcg 3516 mm/memcontrol.c events[i] += per_cpu(memcg->vmstats_percpu->events[i], memcg 3519 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) memcg 3525 mm/memcontrol.c static int memcg_online_kmem(struct mem_cgroup *memcg) memcg 3532 mm/memcontrol.c BUG_ON(memcg->kmemcg_id >= 0); memcg 3533 mm/memcontrol.c BUG_ON(memcg->kmem_state); memcg 3546 mm/memcontrol.c memcg->kmemcg_id = memcg_id; memcg 3547 mm/memcontrol.c memcg->kmem_state = KMEM_ONLINE; memcg 3548 mm/memcontrol.c INIT_LIST_HEAD(&memcg->kmem_caches); memcg 3553 mm/memcontrol.c static void memcg_offline_kmem(struct mem_cgroup *memcg) memcg 3559 mm/memcontrol.c if (memcg->kmem_state != KMEM_ONLINE) memcg 3567 mm/memcontrol.c memcg->kmem_state = KMEM_ALLOCATED; memcg 3569 mm/memcontrol.c parent = parent_mem_cgroup(memcg); memcg 3576 mm/memcontrol.c memcg_deactivate_kmem_caches(memcg, parent); memcg 3578 mm/memcontrol.c kmemcg_id = memcg->kmemcg_id; memcg 3590 mm/memcontrol.c css_for_each_descendant_pre(css, &memcg->css) { memcg 3594 mm/memcontrol.c if (!memcg->use_hierarchy) memcg 3604 mm/memcontrol.c static void memcg_free_kmem(struct mem_cgroup *memcg) memcg 3607 mm/memcontrol.c if (unlikely(memcg->kmem_state == KMEM_ONLINE)) memcg 3608 mm/memcontrol.c memcg_offline_kmem(memcg); memcg 3610 mm/memcontrol.c if (memcg->kmem_state == KMEM_ALLOCATED) { memcg 3611 mm/memcontrol.c WARN_ON(!list_empty(&memcg->kmem_caches)); memcg 3616 mm/memcontrol.c static int memcg_online_kmem(struct mem_cgroup *memcg) memcg 3620 mm/memcontrol.c static void memcg_offline_kmem(struct mem_cgroup *memcg) memcg 3623 mm/memcontrol.c static void memcg_free_kmem(struct mem_cgroup *memcg) memcg 3628 mm/memcontrol.c static int memcg_update_kmem_max(struct mem_cgroup *memcg, memcg 3634 mm/memcontrol.c ret = page_counter_set_max(&memcg->kmem, max); memcg 3639 mm/memcontrol.c static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) memcg 3645 mm/memcontrol.c ret = page_counter_set_max(&memcg->tcpmem, max); memcg 3649 mm/memcontrol.c if (!memcg->tcpmem_active) { memcg 3667 mm/memcontrol.c memcg->tcpmem_active = true; memcg 3681 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 3692 mm/memcontrol.c if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ memcg 3698 mm/memcontrol.c ret = mem_cgroup_resize_max(memcg, nr_pages, false); memcg 3701 mm/memcontrol.c ret = mem_cgroup_resize_max(memcg, nr_pages, true); memcg 3707 mm/memcontrol.c ret = memcg_update_kmem_max(memcg, nr_pages); memcg 3710 mm/memcontrol.c ret = memcg_update_tcp_max(memcg, nr_pages); memcg 3715 mm/memcontrol.c memcg->soft_limit = nr_pages; memcg 3725 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 3730 mm/memcontrol.c counter = &memcg->memory; memcg 3733 mm/memcontrol.c counter = &memcg->memsw; memcg 3736 mm/memcontrol.c counter = &memcg->kmem; memcg 3739 mm/memcontrol.c counter = &memcg->tcpmem; memcg 3769 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 3780 mm/memcontrol.c memcg->move_charge_at_immigrate = val; memcg 3797 mm/memcontrol.c static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, memcg 3800 mm/memcontrol.c struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); memcg 3814 mm/memcontrol.c static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, memcg 3823 mm/memcontrol.c nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); memcg 3844 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 3847 mm/memcontrol.c nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); memcg 3850 mm/memcontrol.c nr = mem_cgroup_node_nr_lru_pages(memcg, nid, memcg 3861 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 3866 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 3917 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 3929 mm/memcontrol.c memcg_page_state_local(memcg, memcg1_stats[i]) * memcg 3935 mm/memcontrol.c memcg_events_local(memcg, memcg1_events[i])); memcg 3939 mm/memcontrol.c memcg_page_state_local(memcg, NR_LRU_BASE + i) * memcg 3944 mm/memcontrol.c for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { memcg 3958 mm/memcontrol.c (u64)memcg_page_state(memcg, memcg1_stats[i]) * memcg 3964 mm/memcontrol.c (u64)memcg_events(memcg, memcg1_events[i])); memcg 3968 mm/memcontrol.c (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * memcg 3980 mm/memcontrol.c mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); memcg 4001 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 4003 mm/memcontrol.c return mem_cgroup_swappiness(memcg); memcg 4009 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 4015 mm/memcontrol.c memcg->swappiness = val; memcg 4022 mm/memcontrol.c static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) memcg 4030 mm/memcontrol.c t = rcu_dereference(memcg->thresholds.primary); memcg 4032 mm/memcontrol.c t = rcu_dereference(memcg->memsw_thresholds.primary); memcg 4037 mm/memcontrol.c usage = mem_cgroup_usage(memcg, swap); memcg 4073 mm/memcontrol.c static void mem_cgroup_threshold(struct mem_cgroup *memcg) memcg 4075 mm/memcontrol.c while (memcg) { memcg 4076 mm/memcontrol.c __mem_cgroup_threshold(memcg, false); memcg 4078 mm/memcontrol.c __mem_cgroup_threshold(memcg, true); memcg 4080 mm/memcontrol.c memcg = parent_mem_cgroup(memcg); memcg 4098 mm/memcontrol.c static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) memcg 4104 mm/memcontrol.c list_for_each_entry(ev, &memcg->oom_notify, list) memcg 4111 mm/memcontrol.c static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) memcg 4115 mm/memcontrol.c for_each_mem_cgroup_tree(iter, memcg) memcg 4119 mm/memcontrol.c static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, memcg 4132 mm/memcontrol.c mutex_lock(&memcg->thresholds_lock); memcg 4135 mm/memcontrol.c thresholds = &memcg->thresholds; memcg 4136 mm/memcontrol.c usage = mem_cgroup_usage(memcg, false); memcg 4138 mm/memcontrol.c thresholds = &memcg->memsw_thresholds; memcg 4139 mm/memcontrol.c usage = mem_cgroup_usage(memcg, true); memcg 4145 mm/memcontrol.c __mem_cgroup_threshold(memcg, type == _MEMSWAP); memcg 4195 mm/memcontrol.c mutex_unlock(&memcg->thresholds_lock); memcg 4200 mm/memcontrol.c static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, memcg 4203 mm/memcontrol.c return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); memcg 4206 mm/memcontrol.c static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, memcg 4209 mm/memcontrol.c return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); memcg 4212 mm/memcontrol.c static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, memcg 4220 mm/memcontrol.c mutex_lock(&memcg->thresholds_lock); memcg 4223 mm/memcontrol.c thresholds = &memcg->thresholds; memcg 4224 mm/memcontrol.c usage = mem_cgroup_usage(memcg, false); memcg 4226 mm/memcontrol.c thresholds = &memcg->memsw_thresholds; memcg 4227 mm/memcontrol.c usage = mem_cgroup_usage(memcg, true); memcg 4235 mm/memcontrol.c __mem_cgroup_threshold(memcg, type == _MEMSWAP); memcg 4294 mm/memcontrol.c mutex_unlock(&memcg->thresholds_lock); memcg 4297 mm/memcontrol.c static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, memcg 4300 mm/memcontrol.c return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); memcg 4303 mm/memcontrol.c static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, memcg 4306 mm/memcontrol.c return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); memcg 4309 mm/memcontrol.c static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, memcg 4321 mm/memcontrol.c list_add(&event->list, &memcg->oom_notify); memcg 4324 mm/memcontrol.c if (memcg->under_oom) memcg 4331 mm/memcontrol.c static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, memcg 4338 mm/memcontrol.c list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { memcg 4350 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); memcg 4352 mm/memcontrol.c seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); memcg 4353 mm/memcontrol.c seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); memcg 4355 mm/memcontrol.c atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); memcg 4362 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 4368 mm/memcontrol.c memcg->oom_kill_disable = val; memcg 4370 mm/memcontrol.c memcg_oom_recover(memcg); memcg 4379 mm/memcontrol.c static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) memcg 4381 mm/memcontrol.c return wb_domain_init(&memcg->cgwb_domain, gfp); memcg 4384 mm/memcontrol.c static void memcg_wb_domain_exit(struct mem_cgroup *memcg) memcg 4386 mm/memcontrol.c wb_domain_exit(&memcg->cgwb_domain); memcg 4389 mm/memcontrol.c static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) memcg 4391 mm/memcontrol.c wb_domain_size_changed(&memcg->cgwb_domain); memcg 4396 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); memcg 4398 mm/memcontrol.c if (!memcg->css.parent) memcg 4401 mm/memcontrol.c return &memcg->cgwb_domain; memcg 4408 mm/memcontrol.c static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) memcg 4410 mm/memcontrol.c long x = atomic_long_read(&memcg->vmstats[idx]); memcg 4414 mm/memcontrol.c x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; memcg 4442 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); memcg 4445 mm/memcontrol.c *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); memcg 4448 mm/memcontrol.c *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); memcg 4449 mm/memcontrol.c *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + memcg 4450 mm/memcontrol.c memcg_exact_page_state(memcg, NR_ACTIVE_FILE); memcg 4453 mm/memcontrol.c while ((parent = parent_mem_cgroup(memcg))) { memcg 4454 mm/memcontrol.c unsigned long ceiling = min(memcg->memory.max, memcg->high); memcg 4455 mm/memcontrol.c unsigned long used = page_counter_read(&memcg->memory); memcg 4458 mm/memcontrol.c memcg = parent; memcg 4509 mm/memcontrol.c struct mem_cgroup *memcg = page->mem_cgroup; memcg 4524 mm/memcontrol.c frn = &memcg->cgwb_frn[i]; memcg 4551 mm/memcontrol.c frn = &memcg->cgwb_frn[oldest]; memcg 4561 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); memcg 4567 mm/memcontrol.c struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; memcg 4588 mm/memcontrol.c static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) memcg 4593 mm/memcontrol.c static void memcg_wb_domain_exit(struct mem_cgroup *memcg) memcg 4597 mm/memcontrol.c static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) memcg 4625 mm/memcontrol.c struct mem_cgroup *memcg = event->memcg; memcg 4629 mm/memcontrol.c event->unregister_event(memcg, event->eventfd); memcg 4636 mm/memcontrol.c css_put(&memcg->css); memcg 4649 mm/memcontrol.c struct mem_cgroup *memcg = event->memcg; memcg 4662 mm/memcontrol.c spin_lock(&memcg->event_list_lock); memcg 4671 mm/memcontrol.c spin_unlock(&memcg->event_list_lock); memcg 4699 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 4725 mm/memcontrol.c event->memcg = memcg; memcg 4797 mm/memcontrol.c ret = event->register_event(memcg, event->eventfd, buf); memcg 4803 mm/memcontrol.c spin_lock(&memcg->event_list_lock); memcg 4804 mm/memcontrol.c list_add(&event->list, &memcg->event_list); memcg 4805 mm/memcontrol.c spin_unlock(&memcg->event_list_lock); memcg 4983 mm/memcontrol.c static void mem_cgroup_id_remove(struct mem_cgroup *memcg) memcg 4985 mm/memcontrol.c if (memcg->id.id > 0) { memcg 4986 mm/memcontrol.c idr_remove(&mem_cgroup_idr, memcg->id.id); memcg 4987 mm/memcontrol.c memcg->id.id = 0; memcg 4991 mm/memcontrol.c static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) memcg 4993 mm/memcontrol.c refcount_add(n, &memcg->id.ref); memcg 4996 mm/memcontrol.c static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) memcg 4998 mm/memcontrol.c if (refcount_sub_and_test(n, &memcg->id.ref)) { memcg 4999 mm/memcontrol.c mem_cgroup_id_remove(memcg); memcg 5002 mm/memcontrol.c css_put(&memcg->css); memcg 5006 mm/memcontrol.c static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) memcg 5008 mm/memcontrol.c mem_cgroup_id_put_many(memcg, 1); memcg 5023 mm/memcontrol.c static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) memcg 5057 mm/memcontrol.c pn->memcg = memcg; memcg 5059 mm/memcontrol.c memcg->nodeinfo[node] = pn; memcg 5063 mm/memcontrol.c static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) memcg 5065 mm/memcontrol.c struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; memcg 5075 mm/memcontrol.c static void __mem_cgroup_free(struct mem_cgroup *memcg) memcg 5080 mm/memcontrol.c free_mem_cgroup_per_node_info(memcg, node); memcg 5081 mm/memcontrol.c free_percpu(memcg->vmstats_percpu); memcg 5082 mm/memcontrol.c free_percpu(memcg->vmstats_local); memcg 5083 mm/memcontrol.c kfree(memcg); memcg 5086 mm/memcontrol.c static void mem_cgroup_free(struct mem_cgroup *memcg) memcg 5088 mm/memcontrol.c memcg_wb_domain_exit(memcg); memcg 5093 mm/memcontrol.c memcg_flush_percpu_vmstats(memcg); memcg 5094 mm/memcontrol.c memcg_flush_percpu_vmevents(memcg); memcg 5095 mm/memcontrol.c __mem_cgroup_free(memcg); memcg 5100 mm/memcontrol.c struct mem_cgroup *memcg; memcg 5109 mm/memcontrol.c memcg = kzalloc(size, GFP_KERNEL); memcg 5110 mm/memcontrol.c if (!memcg) memcg 5113 mm/memcontrol.c memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, memcg 5116 mm/memcontrol.c if (memcg->id.id < 0) { memcg 5117 mm/memcontrol.c error = memcg->id.id; memcg 5121 mm/memcontrol.c memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); memcg 5122 mm/memcontrol.c if (!memcg->vmstats_local) memcg 5125 mm/memcontrol.c memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); memcg 5126 mm/memcontrol.c if (!memcg->vmstats_percpu) memcg 5130 mm/memcontrol.c if (alloc_mem_cgroup_per_node_info(memcg, node)) memcg 5133 mm/memcontrol.c if (memcg_wb_domain_init(memcg, GFP_KERNEL)) memcg 5136 mm/memcontrol.c INIT_WORK(&memcg->high_work, high_work_func); memcg 5137 mm/memcontrol.c memcg->last_scanned_node = MAX_NUMNODES; memcg 5138 mm/memcontrol.c INIT_LIST_HEAD(&memcg->oom_notify); memcg 5139 mm/memcontrol.c mutex_init(&memcg->thresholds_lock); memcg 5140 mm/memcontrol.c spin_lock_init(&memcg->move_lock); memcg 5141 mm/memcontrol.c vmpressure_init(&memcg->vmpressure); memcg 5142 mm/memcontrol.c INIT_LIST_HEAD(&memcg->event_list); memcg 5143 mm/memcontrol.c spin_lock_init(&memcg->event_list_lock); memcg 5144 mm/memcontrol.c memcg->socket_pressure = jiffies; memcg 5146 mm/memcontrol.c memcg->kmemcg_id = -1; memcg 5149 mm/memcontrol.c INIT_LIST_HEAD(&memcg->cgwb_list); memcg 5151 mm/memcontrol.c memcg->cgwb_frn[i].done = memcg 5155 mm/memcontrol.c spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); memcg 5156 mm/memcontrol.c INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); memcg 5157 mm/memcontrol.c memcg->deferred_split_queue.split_queue_len = 0; memcg 5159 mm/memcontrol.c idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); memcg 5160 mm/memcontrol.c return memcg; memcg 5162 mm/memcontrol.c mem_cgroup_id_remove(memcg); memcg 5163 mm/memcontrol.c __mem_cgroup_free(memcg); memcg 5171 mm/memcontrol.c struct mem_cgroup *memcg; memcg 5174 mm/memcontrol.c memcg = mem_cgroup_alloc(); memcg 5175 mm/memcontrol.c if (IS_ERR(memcg)) memcg 5176 mm/memcontrol.c return ERR_CAST(memcg); memcg 5178 mm/memcontrol.c memcg->high = PAGE_COUNTER_MAX; memcg 5179 mm/memcontrol.c memcg->soft_limit = PAGE_COUNTER_MAX; memcg 5181 mm/memcontrol.c memcg->swappiness = mem_cgroup_swappiness(parent); memcg 5182 mm/memcontrol.c memcg->oom_kill_disable = parent->oom_kill_disable; memcg 5185 mm/memcontrol.c memcg->use_hierarchy = true; memcg 5186 mm/memcontrol.c page_counter_init(&memcg->memory, &parent->memory); memcg 5187 mm/memcontrol.c page_counter_init(&memcg->swap, &parent->swap); memcg 5188 mm/memcontrol.c page_counter_init(&memcg->memsw, &parent->memsw); memcg 5189 mm/memcontrol.c page_counter_init(&memcg->kmem, &parent->kmem); memcg 5190 mm/memcontrol.c page_counter_init(&memcg->tcpmem, &parent->tcpmem); memcg 5192 mm/memcontrol.c page_counter_init(&memcg->memory, NULL); memcg 5193 mm/memcontrol.c page_counter_init(&memcg->swap, NULL); memcg 5194 mm/memcontrol.c page_counter_init(&memcg->memsw, NULL); memcg 5195 mm/memcontrol.c page_counter_init(&memcg->kmem, NULL); memcg 5196 mm/memcontrol.c page_counter_init(&memcg->tcpmem, NULL); memcg 5209 mm/memcontrol.c INIT_LIST_HEAD(&memcg->kmem_caches); memcg 5211 mm/memcontrol.c root_mem_cgroup = memcg; memcg 5212 mm/memcontrol.c return &memcg->css; memcg 5215 mm/memcontrol.c error = memcg_online_kmem(memcg); memcg 5222 mm/memcontrol.c return &memcg->css; memcg 5224 mm/memcontrol.c mem_cgroup_id_remove(memcg); memcg 5225 mm/memcontrol.c mem_cgroup_free(memcg); memcg 5231 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 5238 mm/memcontrol.c if (memcg_alloc_shrinker_maps(memcg)) { memcg 5239 mm/memcontrol.c mem_cgroup_id_remove(memcg); memcg 5244 mm/memcontrol.c refcount_set(&memcg->id.ref, 1); memcg 5251 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 5259 mm/memcontrol.c spin_lock(&memcg->event_list_lock); memcg 5260 mm/memcontrol.c list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { memcg 5264 mm/memcontrol.c spin_unlock(&memcg->event_list_lock); memcg 5266 mm/memcontrol.c page_counter_set_min(&memcg->memory, 0); memcg 5267 mm/memcontrol.c page_counter_set_low(&memcg->memory, 0); memcg 5269 mm/memcontrol.c memcg_offline_kmem(memcg); memcg 5270 mm/memcontrol.c wb_memcg_offline(memcg); memcg 5272 mm/memcontrol.c drain_all_stock(memcg); memcg 5274 mm/memcontrol.c mem_cgroup_id_put(memcg); memcg 5279 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 5281 mm/memcontrol.c invalidate_reclaim_iterators(memcg); memcg 5286 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 5291 mm/memcontrol.c wb_wait_for_completion(&memcg->cgwb_frn[i].done); memcg 5296 mm/memcontrol.c if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) memcg 5299 mm/memcontrol.c vmpressure_cleanup(&memcg->vmpressure); memcg 5300 mm/memcontrol.c cancel_work_sync(&memcg->high_work); memcg 5301 mm/memcontrol.c mem_cgroup_remove_from_trees(memcg); memcg 5302 mm/memcontrol.c memcg_free_shrinker_maps(memcg); memcg 5303 mm/memcontrol.c memcg_free_kmem(memcg); memcg 5304 mm/memcontrol.c mem_cgroup_free(memcg); memcg 5322 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 5324 mm/memcontrol.c page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); memcg 5325 mm/memcontrol.c page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); memcg 5326 mm/memcontrol.c page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX); memcg 5327 mm/memcontrol.c page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); memcg 5328 mm/memcontrol.c page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); memcg 5329 mm/memcontrol.c page_counter_set_min(&memcg->memory, 0); memcg 5330 mm/memcontrol.c page_counter_set_low(&memcg->memory, 0); memcg 5331 mm/memcontrol.c memcg->high = PAGE_COUNTER_MAX; memcg 5332 mm/memcontrol.c memcg->soft_limit = PAGE_COUNTER_MAX; memcg 5333 mm/memcontrol.c memcg_wb_domain_size_changed(memcg); memcg 5803 mm/memcontrol.c struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ memcg 5824 mm/memcontrol.c memcg = mem_cgroup_from_css(css); memcg 5834 mm/memcontrol.c move_flags = READ_ONCE(memcg->move_charge_at_immigrate); memcg 5840 mm/memcontrol.c VM_BUG_ON(from == memcg); memcg 5856 mm/memcontrol.c mc.to = memcg; memcg 6077 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 6079 mm/memcontrol.c return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; memcg 6091 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 6100 mm/memcontrol.c page_counter_set_min(&memcg->memory, min); memcg 6114 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 6123 mm/memcontrol.c page_counter_set_low(&memcg->memory, low); memcg 6136 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 6146 mm/memcontrol.c memcg->high = high; memcg 6148 mm/memcontrol.c nr_pages = page_counter_read(&memcg->memory); memcg 6150 mm/memcontrol.c try_to_free_mem_cgroup_pages(memcg, nr_pages - high, memcg 6153 mm/memcontrol.c memcg_wb_domain_size_changed(memcg); memcg 6166 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 6177 mm/memcontrol.c xchg(&memcg->memory.max, max); memcg 6180 mm/memcontrol.c unsigned long nr_pages = page_counter_read(&memcg->memory); memcg 6191 mm/memcontrol.c drain_all_stock(memcg); memcg 6197 mm/memcontrol.c if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, memcg 6203 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_OOM); memcg 6204 mm/memcontrol.c if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) memcg 6208 mm/memcontrol.c memcg_wb_domain_size_changed(memcg); memcg 6224 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 6226 mm/memcontrol.c __memory_events_show(m, memcg->memory_events); memcg 6232 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 6234 mm/memcontrol.c __memory_events_show(m, memcg->memory_events_local); memcg 6240 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 6243 mm/memcontrol.c buf = memory_stat_format(memcg); memcg 6253 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 6255 mm/memcontrol.c seq_printf(m, "%d\n", memcg->oom_group); memcg 6263 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 6277 mm/memcontrol.c memcg->oom_group = oom_group; memcg 6425 mm/memcontrol.c struct mem_cgroup *memcg) memcg 6437 mm/memcontrol.c if (memcg == root) memcg 6440 mm/memcontrol.c usage = page_counter_read(&memcg->memory); memcg 6444 mm/memcontrol.c emin = memcg->memory.min; memcg 6445 mm/memcontrol.c elow = memcg->memory.low; memcg 6447 mm/memcontrol.c parent = parent_mem_cgroup(memcg); memcg 6460 mm/memcontrol.c min_usage = min(usage, memcg->memory.min); memcg 6474 mm/memcontrol.c low_usage = min(usage, memcg->memory.low); memcg 6484 mm/memcontrol.c memcg->memory.emin = emin; memcg 6485 mm/memcontrol.c memcg->memory.elow = elow; memcg 6517 mm/memcontrol.c struct mem_cgroup *memcg = NULL; memcg 6541 mm/memcontrol.c memcg = mem_cgroup_from_id(id); memcg 6542 mm/memcontrol.c if (memcg && !css_tryget_online(&memcg->css)) memcg 6543 mm/memcontrol.c memcg = NULL; memcg 6548 mm/memcontrol.c if (!memcg) memcg 6549 mm/memcontrol.c memcg = get_mem_cgroup_from_mm(mm); memcg 6551 mm/memcontrol.c ret = try_charge(memcg, gfp_mask, nr_pages); memcg 6553 mm/memcontrol.c css_put(&memcg->css); memcg 6555 mm/memcontrol.c *memcgp = memcg; memcg 6563 mm/memcontrol.c struct mem_cgroup *memcg; memcg 6567 mm/memcontrol.c memcg = *memcgp; memcg 6568 mm/memcontrol.c mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask); memcg 6589 mm/memcontrol.c void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg 6604 mm/memcontrol.c if (!memcg) memcg 6607 mm/memcontrol.c commit_charge(page, memcg, lrucare); memcg 6610 mm/memcontrol.c mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); memcg 6611 mm/memcontrol.c memcg_check_events(memcg, page); memcg 6633 mm/memcontrol.c void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, memcg 6645 mm/memcontrol.c if (!memcg) memcg 6648 mm/memcontrol.c cancel_charge(memcg, nr_pages); memcg 6652 mm/memcontrol.c struct mem_cgroup *memcg; memcg 6672 mm/memcontrol.c if (!mem_cgroup_is_root(ug->memcg)) { memcg 6673 mm/memcontrol.c page_counter_uncharge(&ug->memcg->memory, nr_pages); memcg 6675 mm/memcontrol.c page_counter_uncharge(&ug->memcg->memsw, nr_pages); memcg 6677 mm/memcontrol.c page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); memcg 6678 mm/memcontrol.c memcg_oom_recover(ug->memcg); memcg 6682 mm/memcontrol.c __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); memcg 6683 mm/memcontrol.c __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); memcg 6684 mm/memcontrol.c __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); memcg 6685 mm/memcontrol.c __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); memcg 6686 mm/memcontrol.c __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); memcg 6687 mm/memcontrol.c __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); memcg 6688 mm/memcontrol.c memcg_check_events(ug->memcg, ug->dummy_page); memcg 6691 mm/memcontrol.c if (!mem_cgroup_is_root(ug->memcg)) memcg 6692 mm/memcontrol.c css_put_many(&ug->memcg->css, nr_pages); memcg 6710 mm/memcontrol.c if (ug->memcg != page->mem_cgroup) { memcg 6711 mm/memcontrol.c if (ug->memcg) { memcg 6715 mm/memcontrol.c ug->memcg = page->mem_cgroup; memcg 6763 mm/memcontrol.c if (ug.memcg) memcg 6818 mm/memcontrol.c struct mem_cgroup *memcg; memcg 6837 mm/memcontrol.c memcg = oldpage->mem_cgroup; memcg 6838 mm/memcontrol.c if (!memcg) memcg 6845 mm/memcontrol.c page_counter_charge(&memcg->memory, nr_pages); memcg 6847 mm/memcontrol.c page_counter_charge(&memcg->memsw, nr_pages); memcg 6848 mm/memcontrol.c css_get_many(&memcg->css, nr_pages); memcg 6850 mm/memcontrol.c commit_charge(newpage, memcg, false); memcg 6853 mm/memcontrol.c mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); memcg 6854 mm/memcontrol.c memcg_check_events(memcg, newpage); memcg 6863 mm/memcontrol.c struct mem_cgroup *memcg; memcg 6873 mm/memcontrol.c memcg = mem_cgroup_from_task(current); memcg 6874 mm/memcontrol.c if (memcg == root_mem_cgroup) memcg 6876 mm/memcontrol.c if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) memcg 6878 mm/memcontrol.c if (css_tryget_online(&memcg->css)) memcg 6879 mm/memcontrol.c sk->sk_memcg = memcg; memcg 6898 mm/memcontrol.c bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) memcg 6905 mm/memcontrol.c if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { memcg 6906 mm/memcontrol.c memcg->tcpmem_pressure = 0; memcg 6909 mm/memcontrol.c page_counter_charge(&memcg->tcpmem, nr_pages); memcg 6910 mm/memcontrol.c memcg->tcpmem_pressure = 1; memcg 6918 mm/memcontrol.c mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); memcg 6920 mm/memcontrol.c if (try_charge(memcg, gfp_mask, nr_pages) == 0) memcg 6923 mm/memcontrol.c try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); memcg 6932 mm/memcontrol.c void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) memcg 6935 mm/memcontrol.c page_counter_uncharge(&memcg->tcpmem, nr_pages); memcg 6939 mm/memcontrol.c mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); memcg 6941 mm/memcontrol.c refill_stock(memcg, nr_pages); memcg 7007 mm/memcontrol.c static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) memcg 7009 mm/memcontrol.c while (!refcount_inc_not_zero(&memcg->id.ref)) { memcg 7014 mm/memcontrol.c if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { memcg 7018 mm/memcontrol.c memcg = parent_mem_cgroup(memcg); memcg 7019 mm/memcontrol.c if (!memcg) memcg 7020 mm/memcontrol.c memcg = root_mem_cgroup; memcg 7022 mm/memcontrol.c return memcg; memcg 7034 mm/memcontrol.c struct mem_cgroup *memcg, *swap_memcg; memcg 7044 mm/memcontrol.c memcg = page->mem_cgroup; memcg 7047 mm/memcontrol.c if (!memcg) memcg 7055 mm/memcontrol.c swap_memcg = mem_cgroup_id_get_online(memcg); memcg 7067 mm/memcontrol.c if (!mem_cgroup_is_root(memcg)) memcg 7068 mm/memcontrol.c page_counter_uncharge(&memcg->memory, nr_entries); memcg 7070 mm/memcontrol.c if (memcg != swap_memcg) { memcg 7073 mm/memcontrol.c page_counter_uncharge(&memcg->memsw, nr_entries); memcg 7083 mm/memcontrol.c mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), memcg 7085 mm/memcontrol.c memcg_check_events(memcg, page); memcg 7087 mm/memcontrol.c if (!mem_cgroup_is_root(memcg)) memcg 7088 mm/memcontrol.c css_put_many(&memcg->css, nr_entries); memcg 7104 mm/memcontrol.c struct mem_cgroup *memcg; memcg 7110 mm/memcontrol.c memcg = page->mem_cgroup; memcg 7113 mm/memcontrol.c if (!memcg) memcg 7117 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_SWAP_FAIL); memcg 7121 mm/memcontrol.c memcg = mem_cgroup_id_get_online(memcg); memcg 7123 mm/memcontrol.c if (!mem_cgroup_is_root(memcg) && memcg 7124 mm/memcontrol.c !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { memcg 7125 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_SWAP_MAX); memcg 7126 mm/memcontrol.c memcg_memory_event(memcg, MEMCG_SWAP_FAIL); memcg 7127 mm/memcontrol.c mem_cgroup_id_put(memcg); memcg 7133 mm/memcontrol.c mem_cgroup_id_get_many(memcg, nr_pages - 1); memcg 7134 mm/memcontrol.c oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); memcg 7136 mm/memcontrol.c mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); memcg 7148 mm/memcontrol.c struct mem_cgroup *memcg; memcg 7156 mm/memcontrol.c memcg = mem_cgroup_from_id(id); memcg 7157 mm/memcontrol.c if (memcg) { memcg 7158 mm/memcontrol.c if (!mem_cgroup_is_root(memcg)) { memcg 7160 mm/memcontrol.c page_counter_uncharge(&memcg->swap, nr_pages); memcg 7162 mm/memcontrol.c page_counter_uncharge(&memcg->memsw, nr_pages); memcg 7164 mm/memcontrol.c mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); memcg 7165 mm/memcontrol.c mem_cgroup_id_put_many(memcg, nr_pages); memcg 7170 mm/memcontrol.c long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) memcg 7176 mm/memcontrol.c for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) memcg 7178 mm/memcontrol.c READ_ONCE(memcg->swap.max) - memcg 7179 mm/memcontrol.c page_counter_read(&memcg->swap)); memcg 7185 mm/memcontrol.c struct mem_cgroup *memcg; memcg 7194 mm/memcontrol.c memcg = page->mem_cgroup; memcg 7195 mm/memcontrol.c if (!memcg) memcg 7198 mm/memcontrol.c for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) memcg 7199 mm/memcontrol.c if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max) memcg 7225 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 7227 mm/memcontrol.c return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; memcg 7239 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memcg 7248 mm/memcontrol.c xchg(&memcg->swap.max, max); memcg 7255 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 7258 mm/memcontrol.c atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); memcg 7260 mm/memcontrol.c atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); memcg 2329 mm/memory.c struct mem_cgroup *memcg; memcg 2348 mm/memory.c if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) memcg 2383 mm/memory.c mem_cgroup_commit_charge(new_page, memcg, false, false); memcg 2422 mm/memory.c mem_cgroup_cancel_charge(new_page, memcg, false); memcg 2755 mm/memory.c struct mem_cgroup *memcg; memcg 2861 mm/memory.c &memcg, false)) { memcg 2908 mm/memory.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 2912 mm/memory.c mem_cgroup_commit_charge(page, memcg, true, false); memcg 2948 mm/memory.c mem_cgroup_cancel_charge(page, memcg, false); memcg 2969 mm/memory.c struct mem_cgroup *memcg; memcg 3022 mm/memory.c if (mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, memcg 3049 mm/memory.c mem_cgroup_cancel_charge(page, memcg, false); memcg 3056 mm/memory.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 3067 mm/memory.c mem_cgroup_cancel_charge(page, memcg, false); memcg 3283 mm/memory.c vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, memcg 3294 mm/memory.c VM_BUG_ON_PAGE(memcg, page); memcg 3319 mm/memory.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 3368 mm/memory.c ret = alloc_set_pte(vmf, vmf->memcg, page); memcg 3529 mm/memory.c &vmf->memcg, false)) { memcg 3550 mm/memory.c mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); memcg 2711 mm/migrate.c struct mem_cgroup *memcg; memcg 2758 mm/migrate.c if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) memcg 2788 mm/migrate.c mem_cgroup_cancel_charge(page, memcg, false); memcg 2794 mm/migrate.c mem_cgroup_cancel_charge(page, memcg, false); memcg 2804 mm/migrate.c mem_cgroup_cancel_charge(page, memcg, false); memcg 2810 mm/migrate.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 69 mm/oom_kill.c return oc->memcg != NULL; memcg 261 mm/oom_kill.c oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; memcg 368 mm/oom_kill.c mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); memcg 429 mm/oom_kill.c mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); memcg 447 mm/oom_kill.c mem_cgroup_print_oom_context(oc->memcg, victim); memcg 462 mm/oom_kill.c mem_cgroup_print_oom_meminfo(oc->memcg); memcg 981 mm/oom_kill.c oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); memcg 1126 mm/oom_kill.c .memcg = NULL, memcg 2717 mm/page-writeback.c struct mem_cgroup *memcg; memcg 2721 mm/page-writeback.c memcg = lock_page_memcg(page); memcg 2760 mm/page-writeback.c __unlock_page_memcg(memcg); memcg 3795 mm/page_alloc.c .memcg = NULL, memcg 749 mm/rmap.c struct mem_cgroup *memcg; memcg 819 mm/rmap.c struct mem_cgroup *memcg = pra->memcg; memcg 821 mm/rmap.c if (!mm_match_cgroup(vma->vm_mm, memcg)) memcg 839 mm/rmap.c struct mem_cgroup *memcg, memcg 845 mm/rmap.c .memcg = memcg, memcg 871 mm/rmap.c if (memcg) { memcg 1634 mm/shmem.c struct mem_cgroup *memcg; memcg 1679 mm/shmem.c error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, memcg 1695 mm/shmem.c mem_cgroup_cancel_charge(page, memcg, false); memcg 1702 mm/shmem.c mem_cgroup_commit_charge(page, memcg, true, false); memcg 1749 mm/shmem.c struct mem_cgroup *memcg; memcg 1871 mm/shmem.c error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, memcg 1878 mm/shmem.c mem_cgroup_cancel_charge(page, memcg, memcg 1882 mm/shmem.c mem_cgroup_commit_charge(page, memcg, false, memcg 2321 mm/shmem.c struct mem_cgroup *memcg; memcg 2371 mm/shmem.c ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); memcg 2380 mm/shmem.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 2431 mm/shmem.c mem_cgroup_cancel_charge(page, memcg, false); memcg 83 mm/slab.h struct mem_cgroup *memcg; memcg 338 mm/slab.h return READ_ONCE(s->memcg_params.memcg); memcg 351 mm/slab.h struct mem_cgroup *memcg; memcg 356 mm/slab.h memcg = READ_ONCE(s->memcg_params.memcg); memcg 357 mm/slab.h while (memcg && !css_tryget_online(&memcg->css)) memcg 358 mm/slab.h memcg = parent_mem_cgroup(memcg); memcg 361 mm/slab.h if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { memcg 368 mm/slab.h ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); memcg 372 mm/slab.h lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); memcg 377 mm/slab.h css_put_many(&memcg->css, 1 << order); memcg 379 mm/slab.h css_put(&memcg->css); memcg 390 mm/slab.h struct mem_cgroup *memcg; memcg 394 mm/slab.h memcg = READ_ONCE(s->memcg_params.memcg); memcg 395 mm/slab.h if (likely(!mem_cgroup_is_root(memcg))) { memcg 396 mm/slab.h lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); memcg 398 mm/slab.h memcg_kmem_uncharge_memcg(page, order, memcg); memcg 409 mm/slab.h extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); memcg 462 mm/slab.h struct mem_cgroup *memcg) memcg 184 mm/slab_common.c mem_cgroup_put(s->memcg_params.memcg); memcg 185 mm/slab_common.c WRITE_ONCE(s->memcg_params.memcg, NULL); memcg 238 mm/slab_common.c void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) memcg 243 mm/slab_common.c css_get(&memcg->css); memcg 244 mm/slab_common.c s->memcg_params.memcg = memcg; memcg 248 mm/slab_common.c &s->memcg_params.memcg->kmem_caches); memcg 383 mm/slab_common.c struct mem_cgroup *memcg, struct kmem_cache *root_cache) memcg 413 mm/slab_common.c memcg_link_cache(s, memcg); memcg 640 mm/slab_common.c void memcg_create_kmem_cache(struct mem_cgroup *memcg, memcg 644 mm/slab_common.c struct cgroup_subsys_state *css = &memcg->css; memcg 659 mm/slab_common.c if (memcg->kmem_state != KMEM_ONLINE) memcg 662 mm/slab_common.c idx = memcg_cache_id(memcg); memcg 684 mm/slab_common.c root_cache->ctor, memcg, root_cache); memcg 792 mm/slab_common.c void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg, memcg 800 mm/slab_common.c idx = memcg_cache_id(memcg); memcg 817 mm/slab_common.c list_for_each_entry(s, &memcg->kmem_caches, memcg 819 mm/slab_common.c WRITE_ONCE(s->memcg_params.memcg, parent); memcg 820 mm/slab_common.c css_put(&memcg->css); memcg 824 mm/slab_common.c list_splice_init(&memcg->kmem_caches, memcg 1520 mm/slab_common.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 1523 mm/slab_common.c return seq_list_start(&memcg->kmem_caches, *pos); memcg 1528 mm/slab_common.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 1530 mm/slab_common.c return seq_list_next(p, &memcg->kmem_caches, pos); memcg 1542 mm/slab_common.c struct mem_cgroup *memcg = mem_cgroup_from_seq(m); memcg 1544 mm/slab_common.c if (p == memcg->kmem_caches.next) memcg 1621 mm/slab_common.c css = &c->memcg_params.memcg->css; memcg 1857 mm/swapfile.c struct mem_cgroup *memcg; memcg 1868 mm/swapfile.c &memcg, false)) { memcg 1875 mm/swapfile.c mem_cgroup_cancel_charge(page, memcg, false); memcg 1887 mm/swapfile.c mem_cgroup_commit_charge(page, memcg, true, false); memcg 1890 mm/swapfile.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 3740 mm/swapfile.c void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, memcg 3744 mm/swapfile.c if (!(gfp_mask & __GFP_IO) || !memcg) memcg 28 mm/userfaultfd.c struct mem_cgroup *memcg; memcg 69 mm/userfaultfd.c if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) memcg 92 mm/userfaultfd.c mem_cgroup_commit_charge(page, memcg, false, false); memcg 106 mm/userfaultfd.c mem_cgroup_cancel_charge(page, memcg, false); memcg 78 mm/vmpressure.c struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg 80 mm/vmpressure.c memcg = parent_mem_cgroup(memcg); memcg 81 mm/vmpressure.c if (!memcg) memcg 83 mm/vmpressure.c return memcg_to_vmpressure(memcg); memcg 240 mm/vmpressure.c void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, memcg 243 mm/vmpressure.c struct vmpressure *vmpr = memcg_to_vmpressure(memcg); memcg 283 mm/vmpressure.c if (!memcg || memcg == root_mem_cgroup) memcg 307 mm/vmpressure.c memcg->socket_pressure = jiffies + HZ; memcg 323 mm/vmpressure.c void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) memcg 339 mm/vmpressure.c vmpressure(gfp, memcg, true, vmpressure_win, 0); memcg 362 mm/vmpressure.c int vmpressure_register_event(struct mem_cgroup *memcg, memcg 365 mm/vmpressure.c struct vmpressure *vmpr = memcg_to_vmpressure(memcg); memcg 425 mm/vmpressure.c void vmpressure_unregister_event(struct mem_cgroup *memcg, memcg 428 mm/vmpressure.c struct vmpressure *vmpr = memcg_to_vmpressure(memcg); memcg 262 mm/vmscan.c struct mem_cgroup *memcg = sc->target_mem_cgroup; memcg 264 mm/vmscan.c if (!memcg) memcg 274 mm/vmscan.c struct mem_cgroup *memcg, memcg 279 mm/vmscan.c if (!memcg) memcg 282 mm/vmscan.c mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); memcg 287 mm/vmscan.c struct mem_cgroup *memcg) memcg 291 mm/vmscan.c mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); memcg 316 mm/vmscan.c struct mem_cgroup *memcg, bool congested) memcg 321 mm/vmscan.c struct mem_cgroup *memcg) memcg 594 mm/vmscan.c struct mem_cgroup *memcg, int priority) memcg 600 mm/vmscan.c if (!mem_cgroup_online(memcg)) memcg 606 mm/vmscan.c map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, memcg 615 mm/vmscan.c .memcg = memcg, memcg 654 mm/vmscan.c memcg_set_shrinker_bit(memcg, nid, i); memcg 669 mm/vmscan.c struct mem_cgroup *memcg, int priority) memcg 696 mm/vmscan.c struct mem_cgroup *memcg, memcg 709 mm/vmscan.c if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) memcg 710 mm/vmscan.c return shrink_slab_memcg(gfp_mask, nid, memcg, priority); memcg 719 mm/vmscan.c .memcg = memcg, memcg 748 mm/vmscan.c struct mem_cgroup *memcg = NULL; memcg 751 mm/vmscan.c memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg 753 mm/vmscan.c freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); memcg 754 mm/vmscan.c } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); memcg 2304 mm/vmscan.c static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, memcg 2308 mm/vmscan.c int swappiness = mem_cgroup_swappiness(memcg); memcg 2320 mm/vmscan.c if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { memcg 2465 mm/vmscan.c protection = mem_cgroup_protection(memcg, memcg 2498 mm/vmscan.c unsigned long cgroup_size = mem_cgroup_size(memcg); memcg 2522 mm/vmscan.c if (!scan && !mem_cgroup_online(memcg)) memcg 2537 mm/vmscan.c scan = mem_cgroup_online(memcg) ? memcg 2563 mm/vmscan.c static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg, memcg 2566 mm/vmscan.c struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 2576 mm/vmscan.c get_scan_count(lruvec, memcg, sc, nr, lru_pages); memcg 2750 mm/vmscan.c static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg) memcg 2753 mm/vmscan.c (memcg && memcg_congested(pgdat, memcg)); memcg 2765 mm/vmscan.c struct mem_cgroup *memcg; memcg 2772 mm/vmscan.c memcg = mem_cgroup_iter(root, NULL, NULL); memcg 2778 mm/vmscan.c switch (mem_cgroup_protected(root, memcg)) { memcg 2796 mm/vmscan.c memcg_memory_event(memcg, MEMCG_LOW); memcg 2811 mm/vmscan.c shrink_node_memcg(pgdat, memcg, sc, &lru_pages); memcg 2814 mm/vmscan.c shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, memcg 2818 mm/vmscan.c vmpressure(sc->gfp_mask, memcg, false, memcg 2822 mm/vmscan.c } while ((memcg = mem_cgroup_iter(root, memcg, NULL))); memcg 3040 mm/vmscan.c struct mem_cgroup *memcg; memcg 3042 mm/vmscan.c memcg = mem_cgroup_iter(root_memcg, NULL, NULL); memcg 3047 mm/vmscan.c lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 3050 mm/vmscan.c } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL))); memcg 3310 mm/vmscan.c unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, memcg 3317 mm/vmscan.c .target_mem_cgroup = memcg, memcg 3340 mm/vmscan.c shrink_node_memcg(pgdat, memcg, &sc, &lru_pages); memcg 3349 mm/vmscan.c unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, memcg 3364 mm/vmscan.c .target_mem_cgroup = memcg, memcg 3377 mm/vmscan.c nid = mem_cgroup_select_victim_node(memcg); memcg 3401 mm/vmscan.c struct mem_cgroup *memcg; memcg 3406 mm/vmscan.c memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg 3408 mm/vmscan.c struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 3414 mm/vmscan.c memcg = mem_cgroup_iter(NULL, memcg, NULL); memcg 3415 mm/vmscan.c } while (memcg); memcg 226 mm/workingset.c struct mem_cgroup *memcg = page_memcg(page); memcg 227 mm/workingset.c int memcgid = mem_cgroup_id(memcg); memcg 236 mm/workingset.c lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 254 mm/workingset.c struct mem_cgroup *memcg; memcg 280 mm/workingset.c memcg = mem_cgroup_from_id(memcgid); memcg 281 mm/workingset.c if (!mem_cgroup_disabled() && !memcg) memcg 283 mm/workingset.c lruvec = mem_cgroup_lruvec(pgdat, memcg); memcg 334 mm/workingset.c struct mem_cgroup *memcg; memcg 345 mm/workingset.c memcg = page_memcg_rcu(page); memcg 346 mm/workingset.c if (!mem_cgroup_disabled() && !memcg) memcg 348 mm/workingset.c lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); memcg 425 mm/workingset.c if (sc->memcg) { memcg 429 mm/workingset.c lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg); memcg 161 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 163 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 164 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 167 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 170 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(memcg, "memory.current"); memcg 174 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_anon_50M_check, NULL)) memcg 177 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) memcg 183 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 184 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 590 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 593 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 594 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 597 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 600 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_strcmp(memcg, "memory.high", "max\n")) memcg 603 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.swap.max", "0")) memcg 606 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.high", "30M")) memcg 609 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_anon, (void *)MB(100))) memcg 612 tools/testing/selftests/cgroup/test_memcontrol.c if (!cg_run(memcg, alloc_pagecache_50M_check, NULL)) memcg 615 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) memcg 618 tools/testing/selftests/cgroup/test_memcontrol.c high = cg_read_key_long(memcg, "memory.events", "high "); memcg 625 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 626 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 639 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 642 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 643 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 646 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 649 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_strcmp(memcg, "memory.max", "max\n")) memcg 652 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.swap.max", "0")) memcg 655 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.max", "30M")) memcg 659 tools/testing/selftests/cgroup/test_memcontrol.c if (!cg_run(memcg, alloc_anon, (void *)MB(100))) memcg 662 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) memcg 665 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(memcg, "memory.current"); memcg 669 tools/testing/selftests/cgroup/test_memcontrol.c max = cg_read_key_long(memcg, "memory.events", "max "); memcg 676 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 677 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 716 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 722 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 723 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 726 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 729 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_long(memcg, "memory.swap.current")) { memcg 734 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_strcmp(memcg, "memory.max", "max\n")) memcg 737 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_strcmp(memcg, "memory.swap.max", "max\n")) memcg 740 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.swap.max", "30M")) memcg 743 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.max", "30M")) memcg 747 tools/testing/selftests/cgroup/test_memcontrol.c if (!cg_run(memcg, alloc_anon, (void *)MB(100))) memcg 750 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) memcg 753 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) memcg 756 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30))) memcg 759 tools/testing/selftests/cgroup/test_memcontrol.c max = cg_read_key_long(memcg, "memory.events", "max "); memcg 766 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 767 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 780 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 782 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 783 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 786 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 789 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.max", "30M")) memcg 792 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.swap.max", "0")) memcg 795 tools/testing/selftests/cgroup/test_memcontrol.c if (!cg_run(memcg, alloc_anon, (void *)MB(100))) memcg 798 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_strcmp(memcg, "cgroup.procs", "")) memcg 801 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) memcg 804 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) memcg 810 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 811 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 941 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 943 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test"); memcg 944 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 947 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 958 tools/testing/selftests/cgroup/test_memcontrol.c pid = cg_run_nowait(memcg, tcp_server, &args); memcg 980 tools/testing/selftests/cgroup/test_memcontrol.c if (tcp_client(memcg, port) != KSFT_PASS) memcg 987 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_long(memcg, "memory.current") < 0) memcg 990 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.stat", "sock ")) memcg 996 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 997 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg); memcg 1128 tools/testing/selftests/cgroup/test_memcontrol.c char *memcg; memcg 1131 tools/testing/selftests/cgroup/test_memcontrol.c memcg = cg_name(root, "memcg_test_0"); memcg 1133 tools/testing/selftests/cgroup/test_memcontrol.c if (!memcg) memcg 1136 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_create(memcg)) memcg 1139 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.max", "50M")) memcg 1142 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.swap.max", "0")) memcg 1145 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_write(memcg, "memory.oom.group", "1")) memcg 1148 tools/testing/selftests/cgroup/test_memcontrol.c safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); memcg 1152 tools/testing/selftests/cgroup/test_memcontrol.c cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); memcg 1153 tools/testing/selftests/cgroup/test_memcontrol.c if (!cg_run(memcg, alloc_anon, (void *)MB(100))) memcg 1156 tools/testing/selftests/cgroup/test_memcontrol.c if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) memcg 1165 tools/testing/selftests/cgroup/test_memcontrol.c if (memcg) memcg 1166 tools/testing/selftests/cgroup/test_memcontrol.c cg_destroy(memcg); memcg 1167 tools/testing/selftests/cgroup/test_memcontrol.c free(memcg);