/linux-4.4.14/mm/ |
D | memcontrol.c | 152 struct mem_cgroup *memcg; member 166 int (*register_event)(struct mem_cgroup *memcg, 173 void (*unregister_event)(struct mem_cgroup *memcg, 185 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 250 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 252 if (!memcg) in memcg_to_vmpressure() 253 memcg = root_mem_cgroup; in memcg_to_vmpressure() 254 return &memcg->vmpressure; in memcg_to_vmpressure() 262 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument [all …]
|
D | vmpressure.c | 81 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local 83 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 84 if (!memcg) in vmpressure_parent() 86 return memcg_to_vmpressure(memcg); in vmpressure_parent() 211 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument 214 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure() 263 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 279 vmpressure(gfp, memcg, vmpressure_win, 0); in vmpressure_prio() 296 int vmpressure_register_event(struct mem_cgroup *memcg, in vmpressure_register_event() argument 299 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure_register_event() [all …]
|
D | oom_kill.c | 132 struct mem_cgroup *memcg, const nodemask_t *nodemask) in oom_unkillable_task() argument 140 if (memcg && !task_in_mem_cgroup(p, memcg)) in oom_unkillable_task() 159 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, in oom_badness() argument 165 if (oom_unkillable_task(p, memcg, nodemask)) in oom_badness() 353 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) in dump_tasks() argument 361 if (oom_unkillable_task(p, memcg, nodemask)) in dump_tasks() 387 struct mem_cgroup *memcg) in dump_header() argument 395 if (memcg) in dump_header() 396 mem_cgroup_print_oom_info(memcg, p); in dump_header() 400 dump_tasks(memcg, oc->nodemask); in dump_header() [all …]
|
D | slab_common.c | 139 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument 143 if (memcg) { in init_memcg_params() 145 s->memcg_params.memcg = memcg; in init_memcg_params() 215 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument 322 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in create_cache() argument 338 err = init_memcg_params(s, memcg, root_cache); in create_cache() 489 void memcg_create_kmem_cache(struct mem_cgroup *memcg, in memcg_create_kmem_cache() argument 493 struct cgroup_subsys_state *css = &memcg->css; in memcg_create_kmem_cache() 508 if (!memcg_kmem_is_active(memcg)) in memcg_create_kmem_cache() 511 idx = memcg_cache_id(memcg); in memcg_create_kmem_cache() [all …]
|
D | vmscan.c | 173 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim() local 175 if (!memcg) in sane_reclaim() 407 struct mem_cgroup *memcg, in shrink_slab() argument 414 if (memcg && !memcg_kmem_is_active(memcg)) in shrink_slab() 435 .memcg = memcg, in shrink_slab() 438 if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) in shrink_slab() 458 struct mem_cgroup *memcg = NULL; in drop_slab_node() local 462 freed += shrink_slab(GFP_KERNEL, nid, memcg, in drop_slab_node() 464 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in drop_slab_node() 613 struct mem_cgroup *memcg; in __remove_mapping() local [all …]
|
D | page-writeback.c | 2409 struct mem_cgroup *memcg) in account_page_dirtied() argument 2421 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); in account_page_dirtied() 2439 struct mem_cgroup *memcg, struct bdi_writeback *wb) in account_page_cleaned() argument 2442 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); in account_page_cleaned() 2463 struct mem_cgroup *memcg; in __set_page_dirty_nobuffers() local 2465 memcg = mem_cgroup_begin_page_stat(page); in __set_page_dirty_nobuffers() 2471 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers() 2478 account_page_dirtied(page, mapping, memcg); in __set_page_dirty_nobuffers() 2482 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers() 2490 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_nobuffers() [all …]
|
D | list_lru.c | 79 struct mem_cgroup *memcg; in list_lru_from_kmem() local 84 memcg = mem_cgroup_from_kmem(ptr); in list_lru_from_kmem() 85 if (!memcg) in list_lru_from_kmem() 88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 178 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 180 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); in list_lru_count_one() 259 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in list_lru_walk_one() argument 263 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), in list_lru_walk_one()
|
D | rmap.c | 824 struct mem_cgroup *memcg; member 910 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local 912 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 930 struct mem_cgroup *memcg, in page_referenced() argument 937 .memcg = memcg, in page_referenced() 963 if (memcg) { in page_referenced() 1210 struct mem_cgroup *memcg; in page_add_file_rmap() local 1212 memcg = mem_cgroup_begin_page_stat(page); in page_add_file_rmap() 1215 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); in page_add_file_rmap() 1217 mem_cgroup_end_page_stat(memcg); in page_add_file_rmap() [all …]
|
D | userfaultfd.c | 27 struct mem_cgroup *memcg; in mcopy_atomic_pte() local 66 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg)) in mcopy_atomic_pte() 80 mem_cgroup_commit_charge(page, memcg, false); in mcopy_atomic_pte() 94 mem_cgroup_cancel_charge(page, memcg); in mcopy_atomic_pte()
|
D | huge_memory.c | 720 struct mem_cgroup *memcg; in __do_huge_pmd_anonymous_page() local 727 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { in __do_huge_pmd_anonymous_page() 735 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page() 751 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page() 762 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page() 774 mem_cgroup_commit_charge(page, memcg, false); in __do_huge_pmd_anonymous_page() 1049 struct mem_cgroup *memcg; in do_huge_pmd_wp_page_fallback() local 1071 &memcg))) { in do_huge_pmd_wp_page_fallback() 1075 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback() 1077 mem_cgroup_cancel_charge(pages[i], memcg); in do_huge_pmd_wp_page_fallback() [all …]
|
D | truncate.c | 513 struct mem_cgroup *memcg; in invalidate_complete_page2() local 522 memcg = mem_cgroup_begin_page_stat(page); in invalidate_complete_page2() 528 __delete_from_page_cache(page, NULL, memcg); in invalidate_complete_page2() 530 mem_cgroup_end_page_stat(memcg); in invalidate_complete_page2() 539 mem_cgroup_end_page_stat(memcg); in invalidate_complete_page2()
|
D | filemap.c | 182 struct mem_cgroup *memcg) in __delete_from_page_cache() argument 218 account_page_cleaned(page, mapping, memcg, in __delete_from_page_cache() 233 struct mem_cgroup *memcg; in delete_from_page_cache() local 242 memcg = mem_cgroup_begin_page_stat(page); in delete_from_page_cache() 244 __delete_from_page_cache(page, NULL, memcg); in delete_from_page_cache() 246 mem_cgroup_end_page_stat(memcg); in delete_from_page_cache() 528 struct mem_cgroup *memcg; in replace_page_cache_page() local 538 memcg = mem_cgroup_begin_page_stat(old); in replace_page_cache_page() 540 __delete_from_page_cache(old, NULL, memcg); in replace_page_cache_page() 553 mem_cgroup_end_page_stat(memcg); in replace_page_cache_page() [all …]
|
D | memory.c | 2110 struct mem_cgroup *memcg; in wp_page_copy() local 2126 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy() 2157 mem_cgroup_commit_charge(new_page, memcg, false); in wp_page_copy() 2196 mem_cgroup_cancel_charge(new_page, memcg); in wp_page_copy() 2492 struct mem_cgroup *memcg; in do_swap_page() local 2571 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { in do_swap_page() 2613 mem_cgroup_commit_charge(page, memcg, true); in do_swap_page() 2616 mem_cgroup_commit_charge(page, memcg, false); in do_swap_page() 2651 mem_cgroup_cancel_charge(page, memcg); in do_swap_page() 2707 struct mem_cgroup *memcg; in do_anonymous_page() local [all …]
|
D | backing-dev.c | 521 struct mem_cgroup *memcg; in cgwb_create() local 529 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create() 532 memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); in cgwb_create() 719 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument 722 struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); in wb_memcg_offline()
|
D | shmem.c | 716 struct mem_cgroup *memcg; in shmem_unuse() local 731 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); in shmem_unuse() 754 mem_cgroup_cancel_charge(page, memcg); in shmem_unuse() 756 mem_cgroup_commit_charge(page, memcg, true); in shmem_unuse() 1060 struct mem_cgroup *memcg; in shmem_getpage_gfp() local 1139 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp() 1156 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp() 1163 mem_cgroup_commit_charge(page, memcg, true); in shmem_getpage_gfp() 1202 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp() 1212 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp() [all …]
|
D | slab.h | 248 s->memcg_params.memcg); in memcg_charge_slab()
|
D | swapfile.c | 1138 struct mem_cgroup *memcg; in unuse_pte() local 1148 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte() 1155 mem_cgroup_cancel_charge(page, memcg); in unuse_pte() 1167 mem_cgroup_commit_charge(page, memcg, true); in unuse_pte() 1170 mem_cgroup_commit_charge(page, memcg, false); in unuse_pte()
|
/linux-4.4.14/include/linux/ |
D | memcontrol.h | 113 struct mem_cgroup *memcg; member 143 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 286 static inline void mem_cgroup_events(struct mem_cgroup *memcg, in mem_cgroup_events() argument 290 this_cpu_add(memcg->stat->events[idx], nr); in mem_cgroup_events() 291 cgroup_file_notify(&memcg->events_file); in mem_cgroup_events() 294 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 298 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 300 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 309 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 311 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); [all …]
|
D | vmpressure.h | 29 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, 31 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 35 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 37 extern int vmpressure_register_event(struct mem_cgroup *memcg, 40 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 43 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument 45 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
D | list_lru.h | 109 int nid, struct mem_cgroup *memcg); 115 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 159 int nid, struct mem_cgroup *memcg, 170 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk()
|
D | oom.h | 76 struct mem_cgroup *memcg, const nodemask_t *nodemask, 83 struct mem_cgroup *memcg, const char *message); 87 struct mem_cgroup *memcg);
|
D | swap.h | 322 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 354 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 357 if (mem_cgroup_disabled() || !memcg->css.parent) in mem_cgroup_swappiness() 360 return memcg->swappiness; in mem_cgroup_swappiness()
|
D | rmap.h | 175 struct mem_cgroup *memcg, unsigned long *vm_flags); 251 struct mem_cgroup *memcg, in page_referenced() argument
|
D | shrinker.h | 25 struct mem_cgroup *memcg; member
|
D | ksm.h | 92 struct mem_cgroup *memcg, unsigned long *vm_flags) in page_referenced_ksm() argument
|
D | backing-dev.h | 250 void wb_memcg_offline(struct mem_cgroup *memcg); 465 static inline void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
|
D | mm.h | 886 static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) in set_page_memcg() argument 888 page->mem_cgroup = memcg; in set_page_memcg() 896 static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) in set_page_memcg() argument 1271 struct mem_cgroup *memcg); 1273 struct mem_cgroup *memcg, struct bdi_writeback *wb);
|
D | slab.h | 530 struct mem_cgroup *memcg; member
|
D | pagemap.h | 662 struct mem_cgroup *memcg);
|
/linux-4.4.14/net/ipv4/ |
D | tcp_memcontrol.c | 9 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in tcp_init_cgroup() argument 16 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in tcp_init_cgroup() 20 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_init_cgroup() 28 cg_proto->memcg = memcg; in tcp_init_cgroup() 41 void tcp_destroy_cgroup(struct mem_cgroup *memcg) in tcp_destroy_cgroup() argument 45 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_destroy_cgroup() 57 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) in tcp_update_limit() argument 63 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_update_limit() 118 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in tcp_cgroup_write() local 131 ret = tcp_update_limit(memcg, nr_pages); in tcp_cgroup_write() [all …]
|
/linux-4.4.14/include/net/ |
D | tcp_memcontrol.h | 4 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); 5 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 6 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
|
D | sock.h | 75 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 76 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); 79 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in mem_cgroup_sockets_init() argument 84 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) in mem_cgroup_sockets_destroy() argument 1065 int (*init_cgroup)(struct mem_cgroup *memcg, 1067 void (*destroy_cgroup)(struct mem_cgroup *memcg); 1068 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); 1105 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); in parent_cg_proto()
|
/linux-4.4.14/Documentation/cgroups/ |
D | memcg_test.txt | 5 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 6 is complex. This is a document for memcg's internal behavior. 47 At commit(), the page is associated with the memcg. 91 But brief explanation of the behavior of memcg around shmem will be 109 Each memcg has its own private LRU. Now, its handling is under global 111 Almost all routines around memcg's LRU is called by global LRU's 115 memcg's private LRU and call __isolate_lru_page() to extract a page 125 9.1 Small limit to memcg. 126 When you do test to do racy case, it's good test to set memcg's limit 133 Historically, memcg's shmem handling was poor and we saw some amount [all …]
|
D | memory.txt | 16 see patch's title and function names tend to use "memcg". 42 - pages are linked to per-memcg LRU exclusively, and there is no global LRU. 313 from inside the memcg. The creation is done lazily, so some objects can still be 315 belong to the same memcg. This only fails to hold when a task is migrated to a 316 different memcg during the page allocation by the cache. 332 This is the standard memcg limitation mechanism already present before kmem 547 there is a swap storage available. This might lead to memcg OOM killer 571 This is similar to numa_maps but operates on a per-memcg basis. This is 573 an memcg since the pages are allowed to be allocated from any physical 577 Each memcg's numa_stat file includes "total", "file", "anon" and "unevictable" [all …]
|
/linux-4.4.14/Documentation/vm/ |
D | hwpoison.txt | 131 corrupt-filter-memcg 134 of the memcg. 143 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|
/linux-4.4.14/fs/ |
D | buffer.c | 634 struct mem_cgroup *memcg, int warn) in __set_page_dirty() argument 641 account_page_dirtied(page, mapping, memcg); in __set_page_dirty() 676 struct mem_cgroup *memcg; in __set_page_dirty_buffers() local 696 memcg = mem_cgroup_begin_page_stat(page); in __set_page_dirty_buffers() 701 __set_page_dirty(page, mapping, memcg, 1); in __set_page_dirty_buffers() 703 mem_cgroup_end_page_stat(memcg); in __set_page_dirty_buffers() 1179 struct mem_cgroup *memcg; in mark_buffer_dirty() local 1181 memcg = mem_cgroup_begin_page_stat(page); in mark_buffer_dirty() 1185 __set_page_dirty(page, mapping, memcg, 0); in mark_buffer_dirty() 1187 mem_cgroup_end_page_stat(memcg); in mark_buffer_dirty()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_aops.c | 1954 struct mem_cgroup *memcg; in xfs_vm_set_page_dirty() local 1978 memcg = mem_cgroup_begin_page_stat(page); in xfs_vm_set_page_dirty() 1989 account_page_dirtied(page, mapping, memcg); in xfs_vm_set_page_dirty() 1995 mem_cgroup_end_page_stat(memcg); in xfs_vm_set_page_dirty()
|
/linux-4.4.14/kernel/events/ |
D | uprobes.c | 162 struct mem_cgroup *memcg; in __replace_page() local 164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page() 179 mem_cgroup_commit_charge(kpage, memcg, false); in __replace_page() 202 mem_cgroup_cancel_charge(kpage, memcg); in __replace_page()
|
/linux-4.4.14/net/core/ |
D | sock.c | 199 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in mem_cgroup_sockets_init() argument 207 ret = proto->init_cgroup(memcg, ss); in mem_cgroup_sockets_init() 218 proto->destroy_cgroup(memcg); in mem_cgroup_sockets_init() 223 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) in mem_cgroup_sockets_destroy() argument 230 proto->destroy_cgroup(memcg); in mem_cgroup_sockets_destroy()
|