/linux-4.1.27/mm/ |
D | memcontrol.c | 154 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 221 struct mem_cgroup *memcg; member 235 int (*register_event)(struct mem_cgroup *memcg, 242 void (*unregister_event)(struct mem_cgroup *memcg, 254 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 255 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 358 bool memcg_kmem_is_active(struct mem_cgroup *memcg) in memcg_kmem_is_active() argument 360 return memcg->kmem_acct_active; in memcg_kmem_is_active() 430 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 432 if (!memcg) in memcg_to_vmpressure() [all …]
|
D | vmpressure.c | 81 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local 83 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 84 if (!memcg) in vmpressure_parent() 86 return memcg_to_vmpressure(memcg); in vmpressure_parent() 211 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument 214 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure() 263 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 279 vmpressure(gfp, memcg, vmpressure_win, 0); in vmpressure_prio() 296 int vmpressure_register_event(struct mem_cgroup *memcg, in vmpressure_register_event() argument 299 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure_register_event() [all …]
|
D | oom_kill.c | 122 struct mem_cgroup *memcg, const nodemask_t *nodemask) in oom_unkillable_task() argument 130 if (memcg && !task_in_mem_cgroup(p, memcg)) in oom_unkillable_task() 149 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, in oom_badness() argument 155 if (oom_unkillable_task(p, memcg, nodemask)) in oom_badness() 349 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) in dump_tasks() argument 357 if (oom_unkillable_task(p, memcg, nodemask)) in dump_tasks() 383 struct mem_cgroup *memcg, const nodemask_t *nodemask) in dump_header() argument 393 if (memcg) in dump_header() 394 mem_cgroup_print_oom_info(memcg, p); in dump_header() 398 dump_tasks(memcg, nodemask); in dump_header() [all …]
|
D | slab_common.c | 117 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument 121 if (memcg) { in init_memcg_params() 123 s->memcg_params.memcg = memcg; in init_memcg_params() 193 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in init_memcg_params() argument 300 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in do_kmem_cache_create() argument 316 err = init_memcg_params(s, memcg, root_cache); in do_kmem_cache_create() 477 void memcg_create_kmem_cache(struct mem_cgroup *memcg, in memcg_create_kmem_cache() argument 481 struct cgroup_subsys_state *css = mem_cgroup_css(memcg); in memcg_create_kmem_cache() 496 if (!memcg_kmem_is_active(memcg)) in memcg_create_kmem_cache() 499 idx = memcg_cache_id(memcg); in memcg_create_kmem_cache() [all …]
|
D | vmscan.c | 376 struct mem_cgroup *memcg, in shrink_slab() argument 383 if (memcg && !memcg_kmem_is_active(memcg)) in shrink_slab() 404 .memcg = memcg, in shrink_slab() 407 if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) in shrink_slab() 427 struct mem_cgroup *memcg = NULL; in drop_slab_node() local 431 freed += shrink_slab(GFP_KERNEL, nid, memcg, in drop_slab_node() 433 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in drop_slab_node() 2324 struct mem_cgroup *memcg; in shrink_zone() local 2329 memcg = mem_cgroup_iter(root, NULL, &reclaim); in shrink_zone() 2336 if (mem_cgroup_low(root, memcg)) { in shrink_zone() [all …]
|
D | list_lru.c | 65 struct mem_cgroup *memcg; in list_lru_from_kmem() local 70 memcg = mem_cgroup_from_kmem(ptr); in list_lru_from_kmem() 71 if (!memcg) in list_lru_from_kmem() 74 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 164 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 166 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); in list_lru_count_one() 245 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in list_lru_walk_one() argument 249 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), in list_lru_walk_one()
|
D | rmap.c | 718 struct mem_cgroup *memcg; member 799 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local 801 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 819 struct mem_cgroup *memcg, in page_referenced() argument 826 .memcg = memcg, in page_referenced() 852 if (memcg) { in page_referenced() 1094 struct mem_cgroup *memcg; in page_add_file_rmap() local 1096 memcg = mem_cgroup_begin_page_stat(page); in page_add_file_rmap() 1099 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); in page_add_file_rmap() 1101 mem_cgroup_end_page_stat(memcg); in page_add_file_rmap() [all …]
|
D | huge_memory.c | 722 struct mem_cgroup *memcg; in __do_huge_pmd_anonymous_page() local 728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) in __do_huge_pmd_anonymous_page() 733 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page() 748 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page() 756 mem_cgroup_commit_charge(page, memcg, false); in __do_huge_pmd_anonymous_page() 980 struct mem_cgroup *memcg; in do_huge_pmd_wp_page_fallback() local 1002 &memcg))) { in do_huge_pmd_wp_page_fallback() 1006 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback() 1008 mem_cgroup_cancel_charge(pages[i], memcg); in do_huge_pmd_wp_page_fallback() 1015 set_page_private(pages[i], (unsigned long)memcg); in do_huge_pmd_wp_page_fallback() [all …]
|
D | page-writeback.c | 2339 struct mem_cgroup *memcg; in test_clear_page_writeback() local 2342 memcg = mem_cgroup_begin_page_stat(page); in test_clear_page_writeback() 2363 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); in test_clear_page_writeback() 2367 mem_cgroup_end_page_stat(memcg); in test_clear_page_writeback() 2374 struct mem_cgroup *memcg; in __test_set_page_writeback() local 2377 memcg = mem_cgroup_begin_page_stat(page); in __test_set_page_writeback() 2404 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); in __test_set_page_writeback() 2407 mem_cgroup_end_page_stat(memcg); in __test_set_page_writeback()
|
D | memory.c | 2069 struct mem_cgroup *memcg; in wp_page_copy() local 2086 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy() 2115 mem_cgroup_commit_charge(new_page, memcg, false); in wp_page_copy() 2154 mem_cgroup_cancel_charge(new_page, memcg); in wp_page_copy() 2450 struct mem_cgroup *memcg; in do_swap_page() local 2529 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { in do_swap_page() 2571 mem_cgroup_commit_charge(page, memcg, true); in do_swap_page() 2574 mem_cgroup_commit_charge(page, memcg, false); in do_swap_page() 2609 mem_cgroup_cancel_charge(page, memcg); in do_swap_page() 2665 struct mem_cgroup *memcg; in do_anonymous_page() local [all …]
|
D | slab.h | 240 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); in memcg_charge_slab() 249 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); in memcg_uncharge_slab()
|
D | shmem.c | 694 struct mem_cgroup *memcg; in shmem_unuse() local 709 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); in shmem_unuse() 732 mem_cgroup_cancel_charge(page, memcg); in shmem_unuse() 734 mem_cgroup_commit_charge(page, memcg, true); in shmem_unuse() 1038 struct mem_cgroup *memcg; in shmem_getpage_gfp() local 1117 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp() 1134 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp() 1141 mem_cgroup_commit_charge(page, memcg, true); in shmem_getpage_gfp() 1180 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp() 1190 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp() [all …]
|
D | filemap.c | 549 struct mem_cgroup *memcg; in __add_to_page_cache_locked() local 557 gfp_mask, &memcg); in __add_to_page_cache_locked() 565 mem_cgroup_cancel_charge(page, memcg); in __add_to_page_cache_locked() 581 mem_cgroup_commit_charge(page, memcg, false); in __add_to_page_cache_locked() 589 mem_cgroup_cancel_charge(page, memcg); in __add_to_page_cache_locked()
|
D | swapfile.c | 1096 struct mem_cgroup *memcg; in unuse_pte() local 1106 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte() 1113 mem_cgroup_cancel_charge(page, memcg); in unuse_pte() 1125 mem_cgroup_commit_charge(page, memcg, true); in unuse_pte() 1128 mem_cgroup_commit_charge(page, memcg, false); in unuse_pte()
|
D | page_alloc.c | 3007 struct mem_cgroup *memcg = NULL; in alloc_kmem_pages() local 3009 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages() 3012 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages() 3019 struct mem_cgroup *memcg = NULL; in alloc_kmem_pages_node() local 3021 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages_node() 3024 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages_node()
|
/linux-4.1.27/include/linux/ |
D | memcontrol.h | 70 void mem_cgroup_events(struct mem_cgroup *memcg, 74 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 78 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 80 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 90 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 92 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 97 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 101 struct mem_cgroup *memcg) in mm_match_cgroup() argument 109 match = mem_cgroup_is_descendant(task_memcg, memcg); in mm_match_cgroup() 114 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); [all …]
|
D | vmpressure.h | 29 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, 31 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 35 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 37 extern int vmpressure_register_event(struct mem_cgroup *memcg, 40 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 43 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument 45 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
D | list_lru.h | 109 int nid, struct mem_cgroup *memcg); 115 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 159 int nid, struct mem_cgroup *memcg, 170 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk()
|
D | oom.h | 55 struct mem_cgroup *memcg, const nodemask_t *nodemask, 62 struct mem_cgroup *memcg, nodemask_t *nodemask, 70 struct mem_cgroup *memcg);
|
D | rmap.h | 172 struct mem_cgroup *memcg, unsigned long *vm_flags); 248 struct mem_cgroup *memcg, in page_referenced() argument
|
D | shrinker.h | 25 struct mem_cgroup *memcg; member
|
D | ksm.h | 92 struct mem_cgroup *memcg, unsigned long *vm_flags) in page_referenced_ksm() argument
|
D | slab.h | 511 struct mem_cgroup *memcg; member
|
D | swap.h | 322 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
D | sched.h | 1712 struct mem_cgroup *memcg; member
|
/linux-4.1.27/net/ipv4/ |
D | tcp_memcontrol.c | 9 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in tcp_init_cgroup() argument 16 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in tcp_init_cgroup() 20 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_init_cgroup() 28 cg_proto->memcg = memcg; in tcp_init_cgroup() 41 void tcp_destroy_cgroup(struct mem_cgroup *memcg) in tcp_destroy_cgroup() argument 45 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_destroy_cgroup() 57 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) in tcp_update_limit() argument 63 cg_proto = tcp_prot.proto_cgroup(memcg); in tcp_update_limit() 118 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in tcp_cgroup_write() local 131 ret = tcp_update_limit(memcg, nr_pages); in tcp_cgroup_write() [all …]
|
/linux-4.1.27/include/net/ |
D | tcp_memcontrol.h | 4 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); 5 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 6 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
|
D | sock.h | 75 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 76 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); 79 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in mem_cgroup_sockets_init() argument 84 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) in mem_cgroup_sockets_destroy() argument 1048 int (*init_cgroup)(struct mem_cgroup *memcg, 1050 void (*destroy_cgroup)(struct mem_cgroup *memcg); 1051 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); 1080 struct mem_cgroup *memcg; member 1121 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); in parent_cg_proto()
|
/linux-4.1.27/Documentation/cgroups/ |
D | memcg_test.txt | 5 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 6 is complex. This is a document for memcg's internal behavior. 47 At commit(), the page is associated with the memcg. 91 But brief explanation of the behavior of memcg around shmem will be 109 Each memcg has its own private LRU. Now, its handling is under global 111 Almost all routines around memcg's LRU is called by global LRU's 115 memcg's private LRU and call __isolate_lru_page() to extract a page 125 9.1 Small limit to memcg. 126 When you do test to do racy case, it's good test to set memcg's limit 133 Historically, memcg's shmem handling was poor and we saw some amount [all …]
|
D | memory.txt | 16 see patch's title and function names tend to use "memcg". 42 - pages are linked to per-memcg LRU exclusively, and there is no global LRU. 313 from inside the memcg. The creation is done lazily, so some objects can still be 315 belong to the same memcg. This only fails to hold when a task is migrated to a 316 different memcg during the page allocation by the cache. 332 This is the standard memcg limitation mechanism already present before kmem 546 there is a swap storage available. This might lead to memcg OOM killer 570 This is similar to numa_maps but operates on a per-memcg basis. This is 572 an memcg since the pages are allowed to be allocated from any physical 576 Each memcg's numa_stat file includes "total", "file", "anon" and "unevictable" [all …]
|
/linux-4.1.27/Documentation/vm/ |
D | hwpoison.txt | 131 corrupt-filter-memcg 134 of the memcg. 143 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|
/linux-4.1.27/kernel/events/ |
D | uprobes.c | 170 struct mem_cgroup *memcg; in __replace_page() local 172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page() 187 mem_cgroup_commit_charge(kpage, memcg, false); in __replace_page() 210 mem_cgroup_cancel_charge(kpage, memcg); in __replace_page()
|
/linux-4.1.27/net/core/ |
D | sock.c | 198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in mem_cgroup_sockets_init() argument 206 ret = proto->init_cgroup(memcg, ss); in mem_cgroup_sockets_init() 217 proto->destroy_cgroup(memcg); in mem_cgroup_sockets_init() 222 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) in mem_cgroup_sockets_destroy() argument 229 proto->destroy_cgroup(memcg); in mem_cgroup_sockets_destroy()
|