/linux-4.1.27/include/net/ |
H A D | tcp_memcontrol.h | 4 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); 5 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 6 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
|
H A D | sock.h | 75 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 76 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); 79 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) mem_cgroup_sockets_init() argument 84 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) mem_cgroup_sockets_destroy() argument 1048 int (*init_cgroup)(struct mem_cgroup *memcg, 1050 void (*destroy_cgroup)(struct mem_cgroup *memcg); 1051 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); 1072 * memcg field is used to find which memcg we belong directly 1073 * Each memcg struct can hold more than one cg_proto, so container_of 1078 * for everybody, instead of just for memcg users. 1080 struct mem_cgroup *memcg; member in struct:cg_proto 1121 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); parent_cg_proto()
|
/linux-4.1.27/mm/ |
H A D | memcontrol.c | 113 * Per memcg event counter is incremented at every pagein/pageout. With THP, 116 * than using jiffies etc. to handle periodic memcg event. 154 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member in struct:mem_cgroup_per_zone 219 * memcg which the event belongs to. 221 struct mem_cgroup *memcg; member in struct:mem_cgroup_event 235 int (*register_event)(struct mem_cgroup *memcg, 242 void (*unregister_event)(struct mem_cgroup *memcg, 254 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 255 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 358 bool memcg_kmem_is_active(struct mem_cgroup *memcg) memcg_kmem_is_active() argument 360 return memcg->kmem_acct_active; memcg_kmem_is_active() 430 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) memcg_to_vmpressure() argument 432 if (!memcg) memcg_to_vmpressure() 433 memcg = root_mem_cgroup; memcg_to_vmpressure() 434 return &memcg->vmpressure; memcg_to_vmpressure() 442 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) mem_cgroup_is_root() argument 444 return (memcg == root_mem_cgroup); mem_cgroup_is_root() 453 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) mem_cgroup_id() argument 455 return memcg->css.id; mem_cgroup_id() 462 * refcnt from swap can be called against removed memcg.) 472 /* Writing them here to avoid exposing memcg's inner layout */ 478 struct mem_cgroup *memcg; sock_update_memcg() local 485 * process context. So the test for root memcg given sock_update_memcg() 486 * the current task's memcg won't help us in this case. sock_update_memcg() 488 * Respecting the original socket's memcg is a better sock_update_memcg() 492 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); sock_update_memcg() 493 css_get(&sk->sk_cgrp->memcg->css); sock_update_memcg() 498 memcg = mem_cgroup_from_task(current); sock_update_memcg() 499 cg_proto = sk->sk_prot->proto_cgroup(memcg); sock_update_memcg() 500 if (!mem_cgroup_is_root(memcg) && sock_update_memcg() 502 css_tryget_online(&memcg->css)) { sock_update_memcg() 513 struct mem_cgroup *memcg; sock_release_memcg() local 514 WARN_ON(!sk->sk_cgrp->memcg); sock_release_memcg() 515 memcg = sk->sk_cgrp->memcg; sock_release_memcg() 516 css_put(&sk->sk_cgrp->memcg->css); sock_release_memcg() 520 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) tcp_proto_cgroup() argument 522 if (!memcg || mem_cgroup_is_root(memcg)) tcp_proto_cgroup() 525 return &memcg->tcp_mem; tcp_proto_cgroup() 533 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 586 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) mem_cgroup_zone_zoneinfo() argument 591 return &memcg->nodeinfo[nid]->zoneinfo[zid]; mem_cgroup_zone_zoneinfo() 594 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) mem_cgroup_css() argument 596 return &memcg->css; mem_cgroup_css() 600 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) mem_cgroup_page_zoneinfo() argument 605 return &memcg->nodeinfo[nid]->zoneinfo[zid]; mem_cgroup_page_zoneinfo() 674 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) soft_limit_excess() argument 676 unsigned long nr_pages = page_counter_read(&memcg->memory); soft_limit_excess() 677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); soft_limit_excess() 686 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) mem_cgroup_update_tree() argument 697 for (; memcg; memcg = parent_mem_cgroup(memcg)) { mem_cgroup_update_tree() 698 mz = mem_cgroup_page_zoneinfo(memcg, page); mem_cgroup_update_tree() 699 excess = soft_limit_excess(memcg); mem_cgroup_update_tree() 721 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) mem_cgroup_remove_from_trees() argument 729 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; for_each_node() 755 if (!soft_limit_excess(mz->memcg) || __mem_cgroup_largest_soft_limit_node() 756 !css_tryget_online(&mz->memcg->css)) __mem_cgroup_largest_soft_limit_node() 774 * Implementation Note: reading percpu statistics for memcg. 779 * a periodic synchronizion of counter in memcg's counter. 792 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, mem_cgroup_read_stat() argument 800 val += per_cpu(memcg->stat->count[idx], cpu); mem_cgroup_read_stat() 802 spin_lock(&memcg->pcp_counter_lock); mem_cgroup_read_stat() 803 val += memcg->nocpu_base.count[idx]; mem_cgroup_read_stat() 804 spin_unlock(&memcg->pcp_counter_lock); mem_cgroup_read_stat() 810 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, mem_cgroup_read_events() argument 818 val += per_cpu(memcg->stat->events[idx], cpu); mem_cgroup_read_events() 820 spin_lock(&memcg->pcp_counter_lock); mem_cgroup_read_events() 821 val += memcg->nocpu_base.events[idx]; mem_cgroup_read_events() 822 spin_unlock(&memcg->pcp_counter_lock); mem_cgroup_read_events() 828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, mem_cgroup_charge_statistics() argument 837 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], mem_cgroup_charge_statistics() 840 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], mem_cgroup_charge_statistics() 844 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], mem_cgroup_charge_statistics() 849 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); mem_cgroup_charge_statistics() 851 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); mem_cgroup_charge_statistics() 855 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); mem_cgroup_charge_statistics() 866 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, mem_cgroup_node_nr_lru_pages() argument 882 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; for_each_lru() 889 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, mem_cgroup_nr_lru_pages() argument 896 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); mem_cgroup_nr_lru_pages() 900 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, mem_cgroup_event_ratelimit() argument 905 val = __this_cpu_read(memcg->stat->nr_page_events); mem_cgroup_event_ratelimit() 906 next = __this_cpu_read(memcg->stat->targets[target]); mem_cgroup_event_ratelimit() 922 __this_cpu_write(memcg->stat->targets[target], next); mem_cgroup_event_ratelimit() 932 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) memcg_check_events() argument 935 if (unlikely(mem_cgroup_event_ratelimit(memcg, memcg_check_events() 940 do_softlimit = mem_cgroup_event_ratelimit(memcg, memcg_check_events() 943 do_numainfo = mem_cgroup_event_ratelimit(memcg, memcg_check_events() 946 mem_cgroup_threshold(memcg); memcg_check_events() 948 mem_cgroup_update_tree(memcg, page); memcg_check_events() 951 atomic_inc(&memcg->numainfo_events); memcg_check_events() 971 struct mem_cgroup *memcg = NULL; get_mem_cgroup_from_mm() local 981 memcg = root_mem_cgroup; get_mem_cgroup_from_mm() 983 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); get_mem_cgroup_from_mm() 984 if (unlikely(!memcg)) get_mem_cgroup_from_mm() 985 memcg = root_mem_cgroup; get_mem_cgroup_from_mm() 987 } while (!css_tryget_online(&memcg->css)); get_mem_cgroup_from_mm() 989 return memcg; get_mem_cgroup_from_mm() 995 * @prev: previously returned memcg, NULL on first invocation 1015 struct mem_cgroup *memcg = NULL; mem_cgroup_iter() local 1076 memcg = mem_cgroup_from_css(css); mem_cgroup_iter() 1083 * Make sure the memcg is initialized: mem_cgroup_iter() 1087 if (smp_load_acquire(&memcg->initialized)) mem_cgroup_iter() 1093 memcg = NULL; mem_cgroup_iter() 1097 if (cmpxchg(&iter->position, pos, memcg) == pos) { mem_cgroup_iter() 1098 if (memcg) mem_cgroup_iter() 1099 css_get(&memcg->css); mem_cgroup_iter() 1111 if (!memcg) mem_cgroup_iter() 1123 return memcg; mem_cgroup_iter() 1157 struct mem_cgroup *memcg; __mem_cgroup_count_vm_event() local 1160 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); __mem_cgroup_count_vm_event() 1161 if (unlikely(!memcg)) __mem_cgroup_count_vm_event() 1166 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); __mem_cgroup_count_vm_event() 1169 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); __mem_cgroup_count_vm_event() 1180 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1182 * @memcg: memcg of the wanted lruvec 1189 struct mem_cgroup *memcg) mem_cgroup_zone_lruvec() 1199 mz = mem_cgroup_zone_zoneinfo(memcg, zone); mem_cgroup_zone_lruvec() 1224 struct mem_cgroup *memcg; mem_cgroup_page_lruvec() local 1232 memcg = page->mem_cgroup; mem_cgroup_page_lruvec() 1237 if (!memcg) mem_cgroup_page_lruvec() 1238 memcg = root_mem_cgroup; mem_cgroup_page_lruvec() 1240 mz = mem_cgroup_page_zoneinfo(memcg, page); mem_cgroup_page_lruvec() 1277 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) mem_cgroup_is_descendant() argument 1279 if (root == memcg) mem_cgroup_is_descendant() 1283 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); mem_cgroup_is_descendant() 1286 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) task_in_mem_cgroup() argument 1307 ret = mem_cgroup_is_descendant(task_memcg, memcg); task_in_mem_cgroup() 1334 struct mem_cgroup *memcg; mem_cgroup_lruvec_online() local 1340 memcg = mz->memcg; mem_cgroup_lruvec_online() 1342 return !!(memcg->css.flags & CSS_ONLINE); mem_cgroup_lruvec_online() 1350 * @memcg: the memory cgroup 1355 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) mem_cgroup_margin() argument 1361 count = page_counter_read(&memcg->memory); mem_cgroup_margin() 1362 limit = READ_ONCE(memcg->memory.limit); mem_cgroup_margin() 1367 count = page_counter_read(&memcg->memsw); mem_cgroup_margin() 1368 limit = READ_ONCE(memcg->memsw.limit); mem_cgroup_margin() 1376 int mem_cgroup_swappiness(struct mem_cgroup *memcg) mem_cgroup_swappiness() argument 1379 if (mem_cgroup_disabled() || !memcg->css.parent) mem_cgroup_swappiness() 1382 return memcg->swappiness; mem_cgroup_swappiness() 1392 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) mem_cgroup_under_move() argument 1407 ret = mem_cgroup_is_descendant(from, memcg) || mem_cgroup_under_move() 1408 mem_cgroup_is_descendant(to, memcg); mem_cgroup_under_move() 1414 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) mem_cgroup_wait_acct_move() argument 1417 if (mem_cgroup_under_move(memcg)) { mem_cgroup_wait_acct_move() 1433 * @memcg: The memory cgroup that went over limit 1436 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1439 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) mem_cgroup_print_oom_info() argument 1457 pr_cont_cgroup_path(memcg->css.cgroup); mem_cgroup_print_oom_info() 1463 K((u64)page_counter_read(&memcg->memory)), mem_cgroup_print_oom_info() 1464 K((u64)memcg->memory.limit), memcg->memory.failcnt); mem_cgroup_print_oom_info() 1466 K((u64)page_counter_read(&memcg->memsw)), mem_cgroup_print_oom_info() 1467 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); mem_cgroup_print_oom_info() 1469 K((u64)page_counter_read(&memcg->kmem)), mem_cgroup_print_oom_info() 1470 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); mem_cgroup_print_oom_info() 1472 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1494 * This function returns the number of memcg under hierarchy tree. Returns 1497 static int mem_cgroup_count_children(struct mem_cgroup *memcg) mem_cgroup_count_children() argument 1502 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_count_children() 1508 * Return the memory (and swap, if configured) limit for a memcg. 1510 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) mem_cgroup_get_limit() argument 1514 limit = memcg->memory.limit; mem_cgroup_get_limit() 1515 if (mem_cgroup_swappiness(memcg)) { mem_cgroup_get_limit() 1518 memsw_limit = memcg->memsw.limit; mem_cgroup_get_limit() 1524 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, mem_cgroup_out_of_memory() argument 1543 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); mem_cgroup_out_of_memory() 1544 totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree() 1545 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1564 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1571 points = oom_badness(task, memcg, NULL, totalpages); for_each_mem_cgroup_tree() 1591 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, 1599 * @memcg: the target memcg 1603 * This function returns whether the specified memcg contains any 1607 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, test_mem_cgroup_node_reclaimable() argument 1610 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) test_mem_cgroup_node_reclaimable() 1614 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) test_mem_cgroup_node_reclaimable() 1626 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) mem_cgroup_may_update_nodemask() argument 1633 if (!atomic_read(&memcg->numainfo_events)) mem_cgroup_may_update_nodemask() 1635 if (atomic_inc_return(&memcg->numainfo_updating) > 1) mem_cgroup_may_update_nodemask() 1638 /* make a nodemask where this memcg uses memory from */ mem_cgroup_may_update_nodemask() 1639 memcg->scan_nodes = node_states[N_MEMORY]; mem_cgroup_may_update_nodemask() 1643 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) for_each_node_mask() 1644 node_clear(nid, memcg->scan_nodes); for_each_node_mask() 1647 atomic_set(&memcg->numainfo_events, 0); 1648 atomic_set(&memcg->numainfo_updating, 0); 1663 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) mem_cgroup_select_victim_node() argument 1667 mem_cgroup_may_update_nodemask(memcg); mem_cgroup_select_victim_node() 1668 node = memcg->last_scanned_node; mem_cgroup_select_victim_node() 1670 node = next_node(node, memcg->scan_nodes); mem_cgroup_select_victim_node() 1672 node = first_node(memcg->scan_nodes); mem_cgroup_select_victim_node() 1676 * memcg is too small and all pages are not on LRU. In that case, mem_cgroup_select_victim_node() 1682 memcg->last_scanned_node = node; mem_cgroup_select_victim_node() 1686 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) mem_cgroup_select_victim_node() argument 1755 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) mem_cgroup_oom_trylock() argument 1761 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1768 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1779 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1781 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1794 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) mem_cgroup_oom_unlock() argument 1800 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_oom_unlock() 1805 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) mem_cgroup_mark_under_oom() argument 1809 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_mark_under_oom() 1813 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) mem_cgroup_unmark_under_oom() argument 1822 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_unmark_under_oom() 1829 struct mem_cgroup *memcg; member in struct:oom_wait_info 1841 oom_wait_memcg = oom_wait_info->memcg; memcg_oom_wake_function() 1849 static void memcg_wakeup_oom(struct mem_cgroup *memcg) memcg_wakeup_oom() argument 1851 atomic_inc(&memcg->oom_wakeups); memcg_wakeup_oom() 1852 /* for filtering, pass "memcg" as argument. */ memcg_wakeup_oom() 1853 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); memcg_wakeup_oom() 1856 static void memcg_oom_recover(struct mem_cgroup *memcg) memcg_oom_recover() argument 1858 if (memcg && atomic_read(&memcg->under_oom)) memcg_oom_recover() 1859 memcg_wakeup_oom(memcg); memcg_oom_recover() 1862 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) mem_cgroup_oom() argument 1880 css_get(&memcg->css); mem_cgroup_oom() 1881 current->memcg_oom.memcg = memcg; mem_cgroup_oom() 1887 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1890 * This has to be called at the end of a page fault if the memcg OOM 1900 * Returns %true if an ongoing memcg OOM situation was detected and 1905 struct mem_cgroup *memcg = current->memcg_oom.memcg; mem_cgroup_oom_synchronize() local 1910 if (!memcg) mem_cgroup_oom_synchronize() 1916 owait.memcg = memcg; mem_cgroup_oom_synchronize() 1923 mem_cgroup_mark_under_oom(memcg); mem_cgroup_oom_synchronize() 1925 locked = mem_cgroup_oom_trylock(memcg); mem_cgroup_oom_synchronize() 1928 mem_cgroup_oom_notify(memcg); mem_cgroup_oom_synchronize() 1930 if (locked && !memcg->oom_kill_disable) { mem_cgroup_oom_synchronize() 1931 mem_cgroup_unmark_under_oom(memcg); mem_cgroup_oom_synchronize() 1933 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, mem_cgroup_oom_synchronize() 1937 mem_cgroup_unmark_under_oom(memcg); mem_cgroup_oom_synchronize() 1942 mem_cgroup_oom_unlock(memcg); mem_cgroup_oom_synchronize() 1948 memcg_oom_recover(memcg); mem_cgroup_oom_synchronize() 1951 current->memcg_oom.memcg = NULL; mem_cgroup_oom_synchronize() 1952 css_put(&memcg->css); mem_cgroup_oom_synchronize() 1962 * being moved to another memcg: 1964 * memcg = mem_cgroup_begin_page_stat(page); 1966 * mem_cgroup_update_page_stat(memcg, state, -1); 1967 * mem_cgroup_end_page_stat(memcg); 1971 struct mem_cgroup *memcg; mem_cgroup_begin_page_stat() local 1976 * path can get away without acquiring the memcg->move_lock mem_cgroup_begin_page_stat() 1979 * The RCU lock also protects the memcg from being freed when mem_cgroup_begin_page_stat() 1991 memcg = page->mem_cgroup; mem_cgroup_begin_page_stat() 1992 if (unlikely(!memcg)) mem_cgroup_begin_page_stat() 1995 if (atomic_read(&memcg->moving_account) <= 0) mem_cgroup_begin_page_stat() 1996 return memcg; mem_cgroup_begin_page_stat() 1998 spin_lock_irqsave(&memcg->move_lock, flags); mem_cgroup_begin_page_stat() 1999 if (memcg != page->mem_cgroup) { mem_cgroup_begin_page_stat() 2000 spin_unlock_irqrestore(&memcg->move_lock, flags); mem_cgroup_begin_page_stat() 2009 memcg->move_lock_task = current; mem_cgroup_begin_page_stat() 2010 memcg->move_lock_flags = flags; mem_cgroup_begin_page_stat() 2012 return memcg; mem_cgroup_begin_page_stat() 2017 * @memcg: the memcg that was accounted against 2019 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) mem_cgroup_end_page_stat() argument 2021 if (memcg && memcg->move_lock_task == current) { mem_cgroup_end_page_stat() 2022 unsigned long flags = memcg->move_lock_flags; mem_cgroup_end_page_stat() 2024 memcg->move_lock_task = NULL; mem_cgroup_end_page_stat() 2025 memcg->move_lock_flags = 0; mem_cgroup_end_page_stat() 2027 spin_unlock_irqrestore(&memcg->move_lock, flags); mem_cgroup_end_page_stat() 2035 * @memcg: memcg to account against 2041 void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, mem_cgroup_update_page_stat() argument 2046 if (memcg) mem_cgroup_update_page_stat() 2047 this_cpu_add(memcg->stat->count[idx], val); mem_cgroup_update_page_stat() 2067 * @memcg: memcg to consume from. 2070 * The charges will only happen if @memcg matches the current cpu's memcg 2076 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) consume_stock() argument 2085 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { consume_stock() 2125 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) refill_stock() argument 2129 if (stock->cached != memcg) { /* reset if necessary */ refill_stock() 2131 stock->cached = memcg; refill_stock() 2153 struct mem_cgroup *memcg; for_each_online_cpu() local 2155 memcg = stock->cached; for_each_online_cpu() 2156 if (!memcg || !stock->nr_pages) for_each_online_cpu() 2158 if (!mem_cgroup_is_descendant(memcg, root_memcg)) for_each_online_cpu() 2176 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) mem_cgroup_drain_pcp_counter() argument 2180 spin_lock(&memcg->pcp_counter_lock); mem_cgroup_drain_pcp_counter() 2182 long x = per_cpu(memcg->stat->count[i], cpu); mem_cgroup_drain_pcp_counter() 2184 per_cpu(memcg->stat->count[i], cpu) = 0; mem_cgroup_drain_pcp_counter() 2185 memcg->nocpu_base.count[i] += x; mem_cgroup_drain_pcp_counter() 2188 unsigned long x = per_cpu(memcg->stat->events[i], cpu); mem_cgroup_drain_pcp_counter() 2190 per_cpu(memcg->stat->events[i], cpu) = 0; mem_cgroup_drain_pcp_counter() 2191 memcg->nocpu_base.events[i] += x; mem_cgroup_drain_pcp_counter() 2193 spin_unlock(&memcg->pcp_counter_lock); mem_cgroup_drain_pcp_counter() 2218 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, try_charge() argument 2230 if (mem_cgroup_is_root(memcg)) try_charge() 2233 if (consume_stock(memcg, nr_pages)) try_charge() 2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) { try_charge() 2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter)) try_charge() 2241 page_counter_uncharge(&memcg->memsw, batch); try_charge() 2254 * Unlike in global OOM situations, memcg is not in a physical try_charge() 2323 css_get_many(&memcg->css, batch); try_charge() 2325 refill_stock(memcg, batch - nr_pages); try_charge() 2333 if (page_counter_read(&memcg->memory) <= memcg->high) try_charge() 2335 mem_cgroup_events(memcg, MEMCG_HIGH, 1); try_charge() 2336 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); try_charge() 2337 } while ((memcg = parent_mem_cgroup(memcg))); try_charge() 2342 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) cancel_charge() argument 2344 if (mem_cgroup_is_root(memcg)) cancel_charge() 2347 page_counter_uncharge(&memcg->memory, nr_pages); cancel_charge() 2349 page_counter_uncharge(&memcg->memsw, nr_pages); cancel_charge() 2351 css_put_many(&memcg->css, nr_pages); cancel_charge() 2355 * try_get_mem_cgroup_from_page - look up page's memcg association 2358 * Look up, get a css reference, and return the memcg that owns @page. 2366 struct mem_cgroup *memcg; try_get_mem_cgroup_from_page() local 2372 memcg = page->mem_cgroup; try_get_mem_cgroup_from_page() 2373 if (memcg) { try_get_mem_cgroup_from_page() 2374 if (!css_tryget_online(&memcg->css)) try_get_mem_cgroup_from_page() 2375 memcg = NULL; try_get_mem_cgroup_from_page() 2380 memcg = mem_cgroup_from_id(id); try_get_mem_cgroup_from_page() 2381 if (memcg && !css_tryget_online(&memcg->css)) try_get_mem_cgroup_from_page() 2382 memcg = NULL; try_get_mem_cgroup_from_page() 2385 return memcg; try_get_mem_cgroup_from_page() 2419 static void commit_charge(struct page *page, struct mem_cgroup *memcg, commit_charge() argument 2447 page->mem_cgroup = memcg; commit_charge() 2454 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, memcg_charge_kmem() argument 2460 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter); memcg_charge_kmem() 2464 ret = try_charge(memcg, gfp, nr_pages); memcg_charge_kmem() 2481 page_counter_charge(&memcg->memory, nr_pages); memcg_charge_kmem() 2483 page_counter_charge(&memcg->memsw, nr_pages); memcg_charge_kmem() 2484 css_get_many(&memcg->css, nr_pages); memcg_charge_kmem() 2487 page_counter_uncharge(&memcg->kmem, nr_pages); memcg_charge_kmem() 2492 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) memcg_uncharge_kmem() argument 2494 page_counter_uncharge(&memcg->memory, nr_pages); memcg_uncharge_kmem() 2496 page_counter_uncharge(&memcg->memsw, nr_pages); memcg_uncharge_kmem() 2498 page_counter_uncharge(&memcg->kmem, nr_pages); memcg_uncharge_kmem() 2500 css_put_many(&memcg->css, nr_pages); memcg_uncharge_kmem() 2504 * helper for acessing a memcg's index. It will be used as an index in the 2506 * will return -1 when this is not a kmem-limited memcg. 2508 int memcg_cache_id(struct mem_cgroup *memcg) memcg_cache_id() argument 2510 return memcg ? memcg->kmemcg_id : -1; memcg_cache_id() 2559 struct mem_cgroup *memcg; member in struct:memcg_kmem_cache_create_work 2568 struct mem_cgroup *memcg = cw->memcg; memcg_kmem_cache_create_func() local 2571 memcg_create_kmem_cache(memcg, cachep); memcg_kmem_cache_create_func() 2573 css_put(&memcg->css); memcg_kmem_cache_create_func() 2578 * Enqueue the creation of a per-memcg kmem_cache. 2580 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, __memcg_schedule_kmem_cache_create() argument 2589 css_get(&memcg->css); __memcg_schedule_kmem_cache_create() 2591 cw->memcg = memcg; __memcg_schedule_kmem_cache_create() 2598 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, memcg_schedule_kmem_cache_create() argument 2613 __memcg_schedule_kmem_cache_create(memcg, cachep); memcg_schedule_kmem_cache_create() 2619 * We try to use the current memcg's version of the cache. 2632 struct mem_cgroup *memcg; __memcg_kmem_get_cache() local 2641 memcg = get_mem_cgroup_from_mm(current->mm); __memcg_kmem_get_cache() 2642 kmemcg_id = READ_ONCE(memcg->kmemcg_id); __memcg_kmem_get_cache() 2662 memcg_schedule_kmem_cache_create(memcg, cachep); __memcg_kmem_get_cache() 2664 css_put(&memcg->css); __memcg_kmem_get_cache() 2671 css_put(&cachep->memcg_params.memcg->css); __memcg_kmem_put_cache() 2675 * We need to verify if the allocation against current->mm->owner's memcg is 2682 * against. We could also return the memcg directly and avoid the pointer 2691 struct mem_cgroup *memcg; __memcg_kmem_newpage_charge() local 2696 memcg = get_mem_cgroup_from_mm(current->mm); __memcg_kmem_newpage_charge() 2698 if (!memcg_kmem_is_active(memcg)) { __memcg_kmem_newpage_charge() 2699 css_put(&memcg->css); __memcg_kmem_newpage_charge() 2703 ret = memcg_charge_kmem(memcg, gfp, 1 << order); __memcg_kmem_newpage_charge() 2705 *_memcg = memcg; __memcg_kmem_newpage_charge() 2707 css_put(&memcg->css); __memcg_kmem_newpage_charge() 2711 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, __memcg_kmem_commit_charge() argument 2714 VM_BUG_ON(mem_cgroup_is_root(memcg)); __memcg_kmem_commit_charge() 2718 memcg_uncharge_kmem(memcg, 1 << order); __memcg_kmem_commit_charge() 2721 page->mem_cgroup = memcg; __memcg_kmem_commit_charge() 2726 struct mem_cgroup *memcg = page->mem_cgroup; __memcg_kmem_uncharge_pages() local 2728 if (!memcg) __memcg_kmem_uncharge_pages() 2731 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); __memcg_kmem_uncharge_pages() 2733 memcg_uncharge_kmem(memcg, 1 << order); __memcg_kmem_uncharge_pages() 2739 struct mem_cgroup *memcg = NULL; __mem_cgroup_from_kmem() local 2747 memcg = cachep->memcg_params.memcg; __mem_cgroup_from_kmem() 2750 memcg = page->mem_cgroup; __mem_cgroup_from_kmem() 2752 return memcg; __mem_cgroup_from_kmem() 2780 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, mem_cgroup_swap_statistics() argument 2784 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); mem_cgroup_swap_statistics() 2826 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, mem_cgroup_resize_limit() argument 2841 mem_cgroup_count_children(memcg); mem_cgroup_resize_limit() 2843 oldusage = page_counter_read(&memcg->memory); mem_cgroup_resize_limit() 2852 if (limit > memcg->memsw.limit) { mem_cgroup_resize_limit() 2857 if (limit > memcg->memory.limit) mem_cgroup_resize_limit() 2859 ret = page_counter_limit(&memcg->memory, limit); mem_cgroup_resize_limit() 2865 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); mem_cgroup_resize_limit() 2867 curusage = page_counter_read(&memcg->memory); mem_cgroup_resize_limit() 2876 memcg_oom_recover(memcg); mem_cgroup_resize_limit() 2881 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, mem_cgroup_resize_memsw_limit() argument 2892 mem_cgroup_count_children(memcg); mem_cgroup_resize_memsw_limit() 2894 oldusage = page_counter_read(&memcg->memsw); mem_cgroup_resize_memsw_limit() 2903 if (limit < memcg->memory.limit) { mem_cgroup_resize_memsw_limit() 2908 if (limit > memcg->memsw.limit) mem_cgroup_resize_memsw_limit() 2910 ret = page_counter_limit(&memcg->memsw, limit); mem_cgroup_resize_memsw_limit() 2916 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); mem_cgroup_resize_memsw_limit() 2918 curusage = page_counter_read(&memcg->memsw); mem_cgroup_resize_memsw_limit() 2927 memcg_oom_recover(memcg); mem_cgroup_resize_memsw_limit() 2962 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, mem_cgroup_soft_limit_reclaim() 2977 excess = soft_limit_excess(mz->memcg); mem_cgroup_soft_limit_reclaim() 2989 css_put(&mz->memcg->css); mem_cgroup_soft_limit_reclaim() 3002 css_put(&next_mz->memcg->css); mem_cgroup_soft_limit_reclaim() 3007 * Test whether @memcg has children, dead or alive. Note that this 3008 * function doesn't care whether @memcg has use_hierarchy enabled and 3012 static inline bool memcg_has_children(struct mem_cgroup *memcg) memcg_has_children() argument 3025 ret = css_next_child(NULL, &memcg->css); memcg_has_children() 3031 * Reclaims as many pages from the given memcg as possible and moves 3034 * Caller is responsible for holding css reference for memcg. 3036 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) mem_cgroup_force_empty() argument 3043 while (nr_retries && page_counter_read(&memcg->memory)) { mem_cgroup_force_empty() 3049 progress = try_to_free_mem_cgroup_pages(memcg, 1, mem_cgroup_force_empty() 3066 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); mem_cgroup_force_empty_write() local 3068 if (mem_cgroup_is_root(memcg)) mem_cgroup_force_empty_write() 3070 return mem_cgroup_force_empty(memcg) ?: nbytes; mem_cgroup_force_empty_write() 3083 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_hierarchy_write() local 3084 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); mem_cgroup_hierarchy_write() 3088 if (memcg->use_hierarchy == val) mem_cgroup_hierarchy_write() 3101 if (!memcg_has_children(memcg)) mem_cgroup_hierarchy_write() 3102 memcg->use_hierarchy = val; mem_cgroup_hierarchy_write() 3114 static unsigned long tree_stat(struct mem_cgroup *memcg, tree_stat() argument 3121 for_each_mem_cgroup_tree(iter, memcg) tree_stat() 3129 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) mem_cgroup_usage() argument 3133 if (mem_cgroup_is_root(memcg)) { mem_cgroup_usage() 3134 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); mem_cgroup_usage() 3135 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); mem_cgroup_usage() 3137 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); mem_cgroup_usage() 3140 val = page_counter_read(&memcg->memory); mem_cgroup_usage() 3142 val = page_counter_read(&memcg->memsw); mem_cgroup_usage() 3158 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_read_u64() local 3163 counter = &memcg->memory; mem_cgroup_read_u64() 3166 counter = &memcg->memsw; mem_cgroup_read_u64() 3169 counter = &memcg->kmem; mem_cgroup_read_u64() 3177 if (counter == &memcg->memory) mem_cgroup_read_u64() 3178 return mem_cgroup_usage(memcg, false); mem_cgroup_read_u64() 3179 if (counter == &memcg->memsw) mem_cgroup_read_u64() 3180 return mem_cgroup_usage(memcg, true); mem_cgroup_read_u64() 3189 return (u64)memcg->soft_limit * PAGE_SIZE; mem_cgroup_read_u64() 3196 static int memcg_activate_kmem(struct mem_cgroup *memcg, memcg_activate_kmem() argument 3202 BUG_ON(memcg->kmemcg_id >= 0); memcg_activate_kmem() 3203 BUG_ON(memcg->kmem_acct_activated); memcg_activate_kmem() 3204 BUG_ON(memcg->kmem_acct_active); memcg_activate_kmem() 3219 if (cgroup_has_tasks(memcg->css.cgroup) || memcg_activate_kmem() 3220 (memcg->use_hierarchy && memcg_has_children(memcg))) memcg_activate_kmem() 3236 err = page_counter_limit(&memcg->kmem, nr_pages); memcg_activate_kmem() 3246 memcg->kmemcg_id = memcg_id; memcg_activate_kmem() 3247 memcg->kmem_acct_activated = true; memcg_activate_kmem() 3248 memcg->kmem_acct_active = true; memcg_activate_kmem() 3253 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, memcg_update_kmem_limit() argument 3259 if (!memcg_kmem_is_active(memcg)) memcg_update_kmem_limit() 3260 ret = memcg_activate_kmem(memcg, limit); memcg_update_kmem_limit() 3262 ret = page_counter_limit(&memcg->kmem, limit); memcg_update_kmem_limit() 3267 static int memcg_propagate_kmem(struct mem_cgroup *memcg) memcg_propagate_kmem() argument 3270 struct mem_cgroup *parent = parent_mem_cgroup(memcg); memcg_propagate_kmem() 3281 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); memcg_propagate_kmem() 3286 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, memcg_update_kmem_limit() argument 3300 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); mem_cgroup_write() local 3311 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ mem_cgroup_write() 3317 ret = mem_cgroup_resize_limit(memcg, nr_pages); mem_cgroup_write() 3320 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); mem_cgroup_write() 3323 ret = memcg_update_kmem_limit(memcg, nr_pages); mem_cgroup_write() 3328 memcg->soft_limit = nr_pages; mem_cgroup_write() 3338 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); mem_cgroup_reset() local 3343 counter = &memcg->memory; mem_cgroup_reset() 3346 counter = &memcg->memsw; mem_cgroup_reset() 3349 counter = &memcg->kmem; mem_cgroup_reset() 3379 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_move_charge_write() local 3390 memcg->move_charge_at_immigrate = val; mem_cgroup_move_charge_write() 3418 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memcg_numa_stat_show() local 3421 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); memcg_numa_stat_show() 3424 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, for_each_node_state() 3435 for_each_mem_cgroup_tree(iter, memcg) 3440 for_each_mem_cgroup_tree(iter, memcg) for_each_node_state() 3454 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memcg_stat_show() local 3469 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); memcg_stat_show() 3474 mem_cgroup_read_events(memcg, i)); memcg_stat_show() 3478 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); memcg_stat_show() 3482 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { memcg_stat_show() 3497 for_each_mem_cgroup_tree(mi, memcg) memcg_stat_show() 3505 for_each_mem_cgroup_tree(mi, memcg) memcg_stat_show() 3514 for_each_mem_cgroup_tree(mi, memcg) memcg_stat_show() 3529 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; for_each_online_node() 3550 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_swappiness_read() local 3552 return mem_cgroup_swappiness(memcg); mem_cgroup_swappiness_read() 3558 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_swappiness_write() local 3564 memcg->swappiness = val; mem_cgroup_swappiness_write() 3571 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) __mem_cgroup_threshold() argument 3579 t = rcu_dereference(memcg->thresholds.primary); __mem_cgroup_threshold() 3581 t = rcu_dereference(memcg->memsw_thresholds.primary); __mem_cgroup_threshold() 3586 usage = mem_cgroup_usage(memcg, swap); __mem_cgroup_threshold() 3622 static void mem_cgroup_threshold(struct mem_cgroup *memcg) mem_cgroup_threshold() argument 3624 while (memcg) { mem_cgroup_threshold() 3625 __mem_cgroup_threshold(memcg, false); mem_cgroup_threshold() 3627 __mem_cgroup_threshold(memcg, true); mem_cgroup_threshold() 3629 memcg = parent_mem_cgroup(memcg); mem_cgroup_threshold() 3647 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) mem_cgroup_oom_notify_cb() argument 3653 list_for_each_entry(ev, &memcg->oom_notify, list) mem_cgroup_oom_notify_cb() 3660 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) mem_cgroup_oom_notify() argument 3664 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_oom_notify() 3668 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, __mem_cgroup_usage_register_event() argument 3682 mutex_lock(&memcg->thresholds_lock); __mem_cgroup_usage_register_event() 3685 thresholds = &memcg->thresholds; __mem_cgroup_usage_register_event() 3686 usage = mem_cgroup_usage(memcg, false); __mem_cgroup_usage_register_event() 3688 thresholds = &memcg->memsw_thresholds; __mem_cgroup_usage_register_event() 3689 usage = mem_cgroup_usage(memcg, true); __mem_cgroup_usage_register_event() 3695 __mem_cgroup_threshold(memcg, type == _MEMSWAP); __mem_cgroup_usage_register_event() 3746 mutex_unlock(&memcg->thresholds_lock); __mem_cgroup_usage_register_event() 3751 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, mem_cgroup_usage_register_event() argument 3754 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); mem_cgroup_usage_register_event() 3757 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, memsw_cgroup_usage_register_event() argument 3760 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); memsw_cgroup_usage_register_event() 3763 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, __mem_cgroup_usage_unregister_event() argument 3771 mutex_lock(&memcg->thresholds_lock); __mem_cgroup_usage_unregister_event() 3774 thresholds = &memcg->thresholds; __mem_cgroup_usage_unregister_event() 3775 usage = mem_cgroup_usage(memcg, false); __mem_cgroup_usage_unregister_event() 3777 thresholds = &memcg->memsw_thresholds; __mem_cgroup_usage_unregister_event() 3778 usage = mem_cgroup_usage(memcg, true); __mem_cgroup_usage_unregister_event() 3786 __mem_cgroup_threshold(memcg, type == _MEMSWAP); __mem_cgroup_usage_unregister_event() 3839 mutex_unlock(&memcg->thresholds_lock); __mem_cgroup_usage_unregister_event() 3842 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, mem_cgroup_usage_unregister_event() argument 3845 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); mem_cgroup_usage_unregister_event() 3848 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, memsw_cgroup_usage_unregister_event() argument 3851 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); memsw_cgroup_usage_unregister_event() 3854 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, mem_cgroup_oom_register_event() argument 3866 list_add(&event->list, &memcg->oom_notify); mem_cgroup_oom_register_event() 3869 if (atomic_read(&memcg->under_oom)) mem_cgroup_oom_register_event() 3876 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, mem_cgroup_oom_unregister_event() argument 3883 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { mem_cgroup_oom_unregister_event() 3895 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); mem_cgroup_oom_control_read() local 3897 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); mem_cgroup_oom_control_read() 3898 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); mem_cgroup_oom_control_read() 3905 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_oom_control_write() local 3911 memcg->oom_kill_disable = val; mem_cgroup_oom_control_write() 3913 memcg_oom_recover(memcg); mem_cgroup_oom_control_write() 3919 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) memcg_init_kmem() argument 3923 ret = memcg_propagate_kmem(memcg); memcg_init_kmem() 3927 return mem_cgroup_sockets_init(memcg, ss); memcg_init_kmem() 3930 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) memcg_deactivate_kmem() argument 3936 if (!memcg->kmem_acct_active) memcg_deactivate_kmem() 3945 memcg->kmem_acct_active = false; memcg_deactivate_kmem() 3947 memcg_deactivate_kmem_caches(memcg); memcg_deactivate_kmem() 3949 kmemcg_id = memcg->kmemcg_id; memcg_deactivate_kmem() 3952 parent = parent_mem_cgroup(memcg); memcg_deactivate_kmem() 3964 css_for_each_descendant_pre(css, &memcg->css) { memcg_deactivate_kmem() 3968 if (!memcg->use_hierarchy) memcg_deactivate_kmem() 3976 static void memcg_destroy_kmem(struct mem_cgroup *memcg) memcg_destroy_kmem() argument 3978 if (memcg->kmem_acct_activated) { memcg_destroy_kmem() 3979 memcg_destroy_kmem_caches(memcg); memcg_destroy_kmem() 3981 WARN_ON(page_counter_read(&memcg->kmem)); memcg_destroy_kmem() 3983 mem_cgroup_sockets_destroy(memcg); memcg_destroy_kmem() 3986 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) memcg_init_kmem() argument 3991 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) memcg_deactivate_kmem() argument 3995 static void memcg_destroy_kmem(struct mem_cgroup *memcg) memcg_destroy_kmem() argument 4022 struct mem_cgroup *memcg = event->memcg; memcg_event_remove() local 4026 event->unregister_event(memcg, event->eventfd); memcg_event_remove() 4033 css_put(&memcg->css); memcg_event_remove() 4046 struct mem_cgroup *memcg = event->memcg; memcg_event_wake() local 4059 spin_lock(&memcg->event_list_lock); memcg_event_wake() 4068 spin_unlock(&memcg->event_list_lock); memcg_event_wake() 4096 struct mem_cgroup *memcg = mem_cgroup_from_css(css); memcg_write_event_control() local 4122 event->memcg = memcg; memcg_write_event_control() 4194 ret = event->register_event(memcg, event->eventfd, buf); memcg_write_event_control() 4200 spin_lock(&memcg->event_list_lock); memcg_write_event_control() 4201 list_add(&event->list, &memcg->event_list); memcg_write_event_control() 4202 spin_unlock(&memcg->event_list_lock); memcg_write_event_control() 4334 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) alloc_mem_cgroup_per_zone_info() argument 4358 mz->memcg = memcg; alloc_mem_cgroup_per_zone_info() 4360 memcg->nodeinfo[node] = pn; alloc_mem_cgroup_per_zone_info() 4364 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) free_mem_cgroup_per_zone_info() argument 4366 kfree(memcg->nodeinfo[node]); free_mem_cgroup_per_zone_info() 4371 struct mem_cgroup *memcg; mem_cgroup_alloc() local 4377 memcg = kzalloc(size, GFP_KERNEL); mem_cgroup_alloc() 4378 if (!memcg) mem_cgroup_alloc() 4381 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); mem_cgroup_alloc() 4382 if (!memcg->stat) mem_cgroup_alloc() 4384 spin_lock_init(&memcg->pcp_counter_lock); mem_cgroup_alloc() 4385 return memcg; mem_cgroup_alloc() 4388 kfree(memcg); mem_cgroup_alloc() 4403 static void __mem_cgroup_free(struct mem_cgroup *memcg) __mem_cgroup_free() argument 4407 mem_cgroup_remove_from_trees(memcg); __mem_cgroup_free() 4410 free_mem_cgroup_per_zone_info(memcg, node); __mem_cgroup_free() 4412 free_percpu(memcg->stat); __mem_cgroup_free() 4413 kfree(memcg); __mem_cgroup_free() 4419 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) parent_mem_cgroup() argument 4421 if (!memcg->memory.parent) parent_mem_cgroup() 4423 return mem_cgroup_from_counter(memcg->memory.parent, memory); parent_mem_cgroup() 4430 struct mem_cgroup *memcg; mem_cgroup_css_alloc() local 4434 memcg = mem_cgroup_alloc(); mem_cgroup_css_alloc() 4435 if (!memcg) mem_cgroup_css_alloc() 4439 if (alloc_mem_cgroup_per_zone_info(memcg, node)) mem_cgroup_css_alloc() 4444 root_mem_cgroup = memcg; mem_cgroup_css_alloc() 4445 page_counter_init(&memcg->memory, NULL); mem_cgroup_css_alloc() 4446 memcg->high = PAGE_COUNTER_MAX; mem_cgroup_css_alloc() 4447 memcg->soft_limit = PAGE_COUNTER_MAX; mem_cgroup_css_alloc() 4448 page_counter_init(&memcg->memsw, NULL); mem_cgroup_css_alloc() 4449 page_counter_init(&memcg->kmem, NULL); mem_cgroup_css_alloc() 4452 memcg->last_scanned_node = MAX_NUMNODES; mem_cgroup_css_alloc() 4453 INIT_LIST_HEAD(&memcg->oom_notify); mem_cgroup_css_alloc() 4454 memcg->move_charge_at_immigrate = 0; mem_cgroup_css_alloc() 4455 mutex_init(&memcg->thresholds_lock); mem_cgroup_css_alloc() 4456 spin_lock_init(&memcg->move_lock); mem_cgroup_css_alloc() 4457 vmpressure_init(&memcg->vmpressure); mem_cgroup_css_alloc() 4458 INIT_LIST_HEAD(&memcg->event_list); mem_cgroup_css_alloc() 4459 spin_lock_init(&memcg->event_list_lock); mem_cgroup_css_alloc() 4461 memcg->kmemcg_id = -1; mem_cgroup_css_alloc() 4464 return &memcg->css; mem_cgroup_css_alloc() 4467 __mem_cgroup_free(memcg); mem_cgroup_css_alloc() 4474 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_css_online() local 4486 memcg->use_hierarchy = parent->use_hierarchy; mem_cgroup_css_online() 4487 memcg->oom_kill_disable = parent->oom_kill_disable; mem_cgroup_css_online() 4488 memcg->swappiness = mem_cgroup_swappiness(parent); mem_cgroup_css_online() 4491 page_counter_init(&memcg->memory, &parent->memory); mem_cgroup_css_online() 4492 memcg->high = PAGE_COUNTER_MAX; mem_cgroup_css_online() 4493 memcg->soft_limit = PAGE_COUNTER_MAX; mem_cgroup_css_online() 4494 page_counter_init(&memcg->memsw, &parent->memsw); mem_cgroup_css_online() 4495 page_counter_init(&memcg->kmem, &parent->kmem); mem_cgroup_css_online() 4502 page_counter_init(&memcg->memory, NULL); mem_cgroup_css_online() 4503 memcg->high = PAGE_COUNTER_MAX; mem_cgroup_css_online() 4504 memcg->soft_limit = PAGE_COUNTER_MAX; mem_cgroup_css_online() 4505 page_counter_init(&memcg->memsw, NULL); mem_cgroup_css_online() 4506 page_counter_init(&memcg->kmem, NULL); mem_cgroup_css_online() 4517 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); mem_cgroup_css_online() 4522 * Make sure the memcg is initialized: mem_cgroup_iter() mem_cgroup_css_online() 4523 * orders reading memcg->initialized against its callers mem_cgroup_css_online() 4524 * reading the memcg members. mem_cgroup_css_online() 4526 smp_store_release(&memcg->initialized, 1); mem_cgroup_css_online() 4533 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_css_offline() local 4541 spin_lock(&memcg->event_list_lock); mem_cgroup_css_offline() 4542 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { mem_cgroup_css_offline() 4546 spin_unlock(&memcg->event_list_lock); mem_cgroup_css_offline() 4548 vmpressure_cleanup(&memcg->vmpressure); mem_cgroup_css_offline() 4550 memcg_deactivate_kmem(memcg); mem_cgroup_css_offline() 4555 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_css_free() local 4557 memcg_destroy_kmem(memcg); mem_cgroup_css_free() 4558 __mem_cgroup_free(memcg); mem_cgroup_css_free() 4567 * but the memcg is pinned through dependency. The memcg should stop 4576 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_css_reset() local 4578 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); mem_cgroup_css_reset() 4579 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); mem_cgroup_css_reset() 4580 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); mem_cgroup_css_reset() 4581 memcg->low = 0; mem_cgroup_css_reset() 4582 memcg->high = PAGE_COUNTER_MAX; mem_cgroup_css_reset() 4583 memcg->soft_limit = PAGE_COUNTER_MAX; mem_cgroup_css_reset() 5006 struct mem_cgroup *memcg = mem_cgroup_from_css(css); mem_cgroup_can_attach() local 5014 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); mem_cgroup_can_attach() 5019 VM_BUG_ON(from == memcg); mem_cgroup_can_attach() 5034 mc.to = memcg; mem_cgroup_can_attach() 5163 * Signal mem_cgroup_begin_page_stat() to take the memcg's mem_cgroup_move_charge() 5164 * move_lock while we're moving its pages to another memcg. mem_cgroup_move_charge() 5231 * on for the root memcg is enough. mem_cgroup_bind() 5247 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memory_low_show() local 5248 unsigned long low = READ_ONCE(memcg->low); memory_low_show() 5261 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memory_low_write() local 5270 memcg->low = low; memory_low_write() 5277 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memory_high_show() local 5278 unsigned long high = READ_ONCE(memcg->high); memory_high_show() 5291 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memory_high_write() local 5301 memcg->high = high; memory_high_write() 5303 nr_pages = page_counter_read(&memcg->memory); memory_high_write() 5305 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, memory_high_write() 5313 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memory_max_show() local 5314 unsigned long max = READ_ONCE(memcg->memory.limit); memory_max_show() 5327 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); memory_max_write() local 5336 err = mem_cgroup_resize_limit(memcg, max); memory_max_write() 5345 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memory_events_show() local 5347 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); memory_events_show() 5348 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); memory_events_show() 5349 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); memory_events_show() 5350 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); memory_events_show() 5403 * @memcg: the memory cgroup 5407 void mem_cgroup_events(struct mem_cgroup *memcg, mem_cgroup_events() argument 5411 this_cpu_add(memcg->stat->events[idx], nr); mem_cgroup_events() 5417 * @memcg: the memory cgroup to check 5419 * Returns %true if memory consumption of @memcg, and that of all 5422 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) mem_cgroup_low() argument 5433 if (memcg == root_mem_cgroup) mem_cgroup_low() 5436 if (page_counter_read(&memcg->memory) >= memcg->low) mem_cgroup_low() 5439 while (memcg != root) { mem_cgroup_low() 5440 memcg = parent_mem_cgroup(memcg); mem_cgroup_low() 5442 if (memcg == root_mem_cgroup) mem_cgroup_low() 5445 if (page_counter_read(&memcg->memory) >= memcg->low) mem_cgroup_low() 5456 * @memcgp: charged memcg return 5458 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5461 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5471 struct mem_cgroup *memcg = NULL; mem_cgroup_try_charge() local 5496 memcg = try_get_mem_cgroup_from_page(page); mem_cgroup_try_charge() 5497 if (!memcg) mem_cgroup_try_charge() 5498 memcg = get_mem_cgroup_from_mm(mm); mem_cgroup_try_charge() 5500 ret = try_charge(memcg, gfp_mask, nr_pages); mem_cgroup_try_charge() 5502 css_put(&memcg->css); mem_cgroup_try_charge() 5505 memcg = root_mem_cgroup; mem_cgroup_try_charge() 5509 *memcgp = memcg; mem_cgroup_try_charge() 5516 * @memcg: memcg to charge the page to 5529 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, mem_cgroup_commit_charge() argument 5544 if (!memcg) mem_cgroup_commit_charge() 5547 commit_charge(page, memcg, lrucare); mem_cgroup_commit_charge() 5555 mem_cgroup_charge_statistics(memcg, page, nr_pages); mem_cgroup_commit_charge() 5556 memcg_check_events(memcg, page); mem_cgroup_commit_charge() 5573 * @memcg: memcg to charge the page to 5577 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) mem_cgroup_cancel_charge() argument 5588 if (!memcg) mem_cgroup_cancel_charge() 5596 cancel_charge(memcg, nr_pages); mem_cgroup_cancel_charge() 5599 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, uncharge_batch() argument 5606 if (!mem_cgroup_is_root(memcg)) { uncharge_batch() 5607 page_counter_uncharge(&memcg->memory, nr_pages); uncharge_batch() 5609 page_counter_uncharge(&memcg->memsw, nr_pages); uncharge_batch() 5610 memcg_oom_recover(memcg); uncharge_batch() 5614 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); uncharge_batch() 5615 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); uncharge_batch() 5616 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); uncharge_batch() 5617 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); uncharge_batch() 5618 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); uncharge_batch() 5619 memcg_check_events(memcg, dummy_page); uncharge_batch() 5622 if (!mem_cgroup_is_root(memcg)) uncharge_batch() 5623 css_put_many(&memcg->css, nr_pages); uncharge_batch() 5628 struct mem_cgroup *memcg = NULL; uncharge_list() local 5655 if (memcg != page->mem_cgroup) { uncharge_list() 5656 if (memcg) { uncharge_list() 5657 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, uncharge_list() 5661 memcg = page->mem_cgroup; uncharge_list() 5680 if (memcg) uncharge_list() 5681 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, uncharge_list() 5734 struct mem_cgroup *memcg; mem_cgroup_migrate() local 5758 memcg = oldpage->mem_cgroup; mem_cgroup_migrate() 5759 if (!memcg) mem_cgroup_migrate() 5770 commit_charge(newpage, memcg, lrucare); mem_cgroup_migrate() 5822 struct mem_cgroup *memcg; mem_cgroup_swapout() local 5831 memcg = page->mem_cgroup; mem_cgroup_swapout() 5834 if (!memcg) mem_cgroup_swapout() 5837 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); mem_cgroup_swapout() 5839 mem_cgroup_swap_statistics(memcg, true); mem_cgroup_swapout() 5843 if (!mem_cgroup_is_root(memcg)) mem_cgroup_swapout() 5844 page_counter_uncharge(&memcg->memory, 1); mem_cgroup_swapout() 5847 mem_cgroup_charge_statistics(memcg, page, -1); mem_cgroup_swapout() 5848 memcg_check_events(memcg, page); mem_cgroup_swapout() 5859 struct mem_cgroup *memcg; mem_cgroup_uncharge_swap() local 5867 memcg = mem_cgroup_from_id(id); mem_cgroup_uncharge_swap() 5868 if (memcg) { mem_cgroup_uncharge_swap() 5869 if (!mem_cgroup_is_root(memcg)) mem_cgroup_uncharge_swap() 5870 page_counter_uncharge(&memcg->memsw, 1); mem_cgroup_uncharge_swap() 5871 mem_cgroup_swap_statistics(memcg, false); mem_cgroup_uncharge_swap() 5872 css_put(&memcg->css); mem_cgroup_uncharge_swap() 1188 mem_cgroup_zone_lruvec(struct zone *zone, struct mem_cgroup *memcg) mem_cgroup_zone_lruvec() argument
|
H A D | vmpressure.c | 81 struct mem_cgroup *memcg = mem_cgroup_from_css(css); vmpressure_parent() local 83 memcg = parent_mem_cgroup(memcg); vmpressure_parent() 84 if (!memcg) vmpressure_parent() 86 return memcg_to_vmpressure(memcg); vmpressure_parent() 201 * @memcg: cgroup memory controller handle 211 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, vmpressure() argument 214 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); vmpressure() 255 * @memcg: cgroup memory controller handle 263 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) vmpressure_prio() argument 279 vmpressure(gfp, memcg, vmpressure_win, 0); vmpressure_prio() 284 * @memcg: memcg that is interested in vmpressure notifications 294 * To be used as memcg event method. 296 int vmpressure_register_event(struct mem_cgroup *memcg, vmpressure_register_event() argument 299 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); vmpressure_register_event() 327 * @memcg: memcg handle 334 * To be used as memcg event method. 336 void vmpressure_unregister_event(struct mem_cgroup *memcg, vmpressure_unregister_event() argument 339 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); vmpressure_unregister_event()
|
H A D | list_lru.c | 65 struct mem_cgroup *memcg; list_lru_from_kmem() local 70 memcg = mem_cgroup_from_kmem(ptr); list_lru_from_kmem() 71 if (!memcg) list_lru_from_kmem() 74 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); list_lru_from_kmem() 164 int nid, struct mem_cgroup *memcg) list_lru_count_one() 166 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg)); list_lru_count_one() 245 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_one() argument 249 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), list_lru_walk_one() 163 list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) list_lru_count_one() argument
|
H A D | slab_common.c | 117 struct mem_cgroup *memcg, struct kmem_cache *root_cache) init_memcg_params() 121 if (memcg) { init_memcg_params() 123 s->memcg_params.memcg = memcg; init_memcg_params() 193 struct mem_cgroup *memcg, struct kmem_cache *root_cache) init_memcg_params() 300 struct mem_cgroup *memcg, struct kmem_cache *root_cache) do_kmem_cache_create() 316 err = init_memcg_params(s, memcg, root_cache); do_kmem_cache_create() 470 * @memcg: The memory cgroup the new cache is for. 474 * requests going from @memcg to @root_cache. The new cache inherits properties 477 void memcg_create_kmem_cache(struct mem_cgroup *memcg, memcg_create_kmem_cache() argument 481 struct cgroup_subsys_state *css = mem_cgroup_css(memcg); memcg_create_kmem_cache() 496 if (!memcg_kmem_is_active(memcg)) memcg_create_kmem_cache() 499 idx = memcg_cache_id(memcg); memcg_create_kmem_cache() 504 * Since per-memcg caches are created asynchronously on first memcg_create_kmem_cache() 520 memcg, root_cache); memcg_create_kmem_cache() 522 * If we could not create a memcg cache, do not complain, because memcg_create_kmem_cache() 548 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) memcg_deactivate_kmem_caches() argument 554 idx = memcg_cache_id(memcg); memcg_deactivate_kmem_caches() 579 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) memcg_destroy_kmem_caches() argument 590 if (is_root_cache(s) || s->memcg_params.memcg != memcg) memcg_destroy_kmem_caches() 1020 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); memcg_slab_show() local 1024 if (!is_root_cache(s) && s->memcg_params.memcg == memcg) memcg_slab_show() 116 init_memcg_params(struct kmem_cache *s, struct mem_cgroup *memcg, struct kmem_cache *root_cache) init_memcg_params() argument 192 init_memcg_params(struct kmem_cache *s, struct mem_cgroup *memcg, struct kmem_cache *root_cache) init_memcg_params() argument 298 do_kmem_cache_create(const char *name, size_t object_size, size_t size, size_t align, unsigned long flags, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) do_kmem_cache_create() argument
|
H A D | oom_kill.c | 122 struct mem_cgroup *memcg, const nodemask_t *nodemask) oom_unkillable_task() 130 if (memcg && !task_in_mem_cgroup(p, memcg)) oom_unkillable_task() 149 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, oom_badness() argument 155 if (oom_unkillable_task(p, memcg, nodemask)) oom_badness() 340 * @memcg: current's memory controller, if constrained 344 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 349 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) dump_tasks() argument 357 if (oom_unkillable_task(p, memcg, nodemask)) for_each_process() 383 struct mem_cgroup *memcg, const nodemask_t *nodemask) dump_header() 393 if (memcg) dump_header() 394 mem_cgroup_print_oom_info(memcg, p); dump_header() 398 dump_tasks(memcg, nodemask); dump_header() 420 /* OOM killer might race with memcg OOM */ mark_tsk_oom_victim() 503 struct mem_cgroup *memcg, nodemask_t *nodemask, oom_kill_process() 528 dump_header(p, gfp_mask, order, memcg, nodemask); oom_kill_process() 551 child_points = oom_badness(child, memcg, nodemask, for_each_thread() 616 struct mem_cgroup *memcg) check_panic_on_oom() 623 * does not panic for cpuset, mempolicy, or memcg allocation check_panic_on_oom() 629 dump_header(NULL, gfp_mask, order, memcg, nodemask); check_panic_on_oom() 121 oom_unkillable_task(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask) oom_unkillable_task() argument 382 dump_header(struct task_struct *p, gfp_t gfp_mask, int order, struct mem_cgroup *memcg, const nodemask_t *nodemask) dump_header() argument 501 oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, unsigned int points, unsigned long totalpages, struct mem_cgroup *memcg, nodemask_t *nodemask, const char *message) oom_kill_process() argument 614 check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, int order, const nodemask_t *nodemask, struct mem_cgroup *memcg) check_panic_on_oom() argument
|
H A D | slab.h | 167 * Iterate over all memcg caches of the given root cache. The caller must hold 190 * We use suffixes to the name in memcg because we can't have caches 202 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 203 * That said the caller must assure the memcg's cache won't go away by either 240 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); memcg_charge_slab() 249 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); memcg_uncharge_slab()
|
H A D | rmap.c | 718 struct mem_cgroup *memcg; member in struct:page_referenced_arg 799 struct mem_cgroup *memcg = pra->memcg; invalid_page_referenced_vma() local 801 if (!mm_match_cgroup(vma->vm_mm, memcg)) invalid_page_referenced_vma() 811 * @memcg: target memory cgroup 819 struct mem_cgroup *memcg, page_referenced() 826 .memcg = memcg, page_referenced() 852 if (memcg) { page_referenced() 1094 struct mem_cgroup *memcg; page_add_file_rmap() local 1096 memcg = mem_cgroup_begin_page_stat(page); page_add_file_rmap() 1099 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); page_add_file_rmap() 1101 mem_cgroup_end_page_stat(memcg); page_add_file_rmap() 1106 struct mem_cgroup *memcg; page_remove_file_rmap() local 1108 memcg = mem_cgroup_begin_page_stat(page); page_remove_file_rmap() 1124 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); page_remove_file_rmap() 1129 mem_cgroup_end_page_stat(memcg); page_remove_file_rmap() 817 page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) page_referenced() argument
|
H A D | vmscan.c | 350 * @memcg: memory cgroup whose slab caches to target 359 * @memcg specifies the memory cgroup to target. If it is not NULL, 362 * are called, and memcg aware shrinkers are supposed to scan the 376 struct mem_cgroup *memcg, shrink_slab() 383 if (memcg && !memcg_kmem_is_active(memcg)) shrink_slab() 404 .memcg = memcg, shrink_slab() 407 if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) shrink_slab() 427 struct mem_cgroup *memcg = NULL; drop_slab_node() local 431 freed += shrink_slab(GFP_KERNEL, nid, memcg, drop_slab_node() 433 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); drop_slab_node() 938 * 2) Global reclaim encounters a page, memcg encounters a shrink_page_list() 951 * 3) memcg encounters a page that is not already marked shrink_page_list() 952 * PageReclaim. memcg does not have any dirty pages shrink_page_list() 974 * page to have PageReclaim set next time memcg shrink_page_list() 1607 * memcg will stall in page writeback so only consider forcibly shrink_inactive_list() 1934 * If the zone or memcg is small, nr[l] can be 0. This get_scan_count() 1939 * reclaiming for a memcg, a priority drop can cause high get_scan_count() 1960 * swappiness, but memcg users want to use this knob to get_scan_count() 2144 * do a batch of work at once. For memcg reclaim one check is made to shrink_lruvec() 2171 * For kswapd and memcg, reclaim at least the number of pages 2324 struct mem_cgroup *memcg; shrink_zone() local 2329 memcg = mem_cgroup_iter(root, NULL, &reclaim); shrink_zone() 2336 if (mem_cgroup_low(root, memcg)) { shrink_zone() 2339 mem_cgroup_events(memcg, MEMCG_LOW, 1); shrink_zone() 2342 lruvec = mem_cgroup_zone_lruvec(zone, memcg); shrink_zone() 2343 swappiness = mem_cgroup_swappiness(memcg); shrink_zone() 2349 if (memcg && is_classzone) shrink_zone() 2351 memcg, sc->nr_scanned - scanned, shrink_zone() 2366 mem_cgroup_iter_break(root, memcg); shrink_zone() 2369 } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim))); shrink_zone() 2518 * and balancing, not for a memcg's limit. for_each_zone_zonelist_nodemask() 2797 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, mem_cgroup_shrink_node_zone() argument 2804 .target_mem_cgroup = memcg, mem_cgroup_shrink_node_zone() 2809 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); mem_cgroup_shrink_node_zone() 2810 int swappiness = mem_cgroup_swappiness(memcg); mem_cgroup_shrink_node_zone() 2835 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, try_to_free_mem_cgroup_pages() argument 2847 .target_mem_cgroup = memcg, try_to_free_mem_cgroup_pages() 2855 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't try_to_free_mem_cgroup_pages() 2859 nid = mem_cgroup_select_victim_node(memcg); try_to_free_mem_cgroup_pages() 2877 struct mem_cgroup *memcg; age_active_anon() local 2882 memcg = mem_cgroup_iter(NULL, NULL, NULL); age_active_anon() 2884 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); age_active_anon() 2890 memcg = mem_cgroup_iter(NULL, memcg, NULL); age_active_anon() 2891 } while (memcg); age_active_anon() 375 shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, unsigned long nr_scanned, unsigned long nr_eligible) shrink_slab() argument
|
H A D | hwpoison-inject.c | 130 dentry = debugfs_create_u64("corrupt-filter-memcg", 0600, pfn_inject_init()
|
H A D | huge_memory.c | 722 struct mem_cgroup *memcg; __do_huge_pmd_anonymous_page() local 728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) __do_huge_pmd_anonymous_page() 733 mem_cgroup_cancel_charge(page, memcg); __do_huge_pmd_anonymous_page() 748 mem_cgroup_cancel_charge(page, memcg); __do_huge_pmd_anonymous_page() 756 mem_cgroup_commit_charge(page, memcg, false); __do_huge_pmd_anonymous_page() 980 struct mem_cgroup *memcg; do_huge_pmd_wp_page_fallback() local 1002 &memcg))) { do_huge_pmd_wp_page_fallback() 1006 memcg = (void *)page_private(pages[i]); do_huge_pmd_wp_page_fallback() 1008 mem_cgroup_cancel_charge(pages[i], memcg); do_huge_pmd_wp_page_fallback() 1015 set_page_private(pages[i], (unsigned long)memcg); do_huge_pmd_wp_page_fallback() 1044 memcg = (void *)page_private(pages[i]); do_huge_pmd_wp_page_fallback() 1047 mem_cgroup_commit_charge(pages[i], memcg, false); do_huge_pmd_wp_page_fallback() 1073 memcg = (void *)page_private(pages[i]); do_huge_pmd_wp_page_fallback() 1075 mem_cgroup_cancel_charge(pages[i], memcg); do_huge_pmd_wp_page_fallback() 1088 struct mem_cgroup *memcg; do_huge_pmd_wp_page() local 1141 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) { do_huge_pmd_wp_page() 1170 mem_cgroup_cancel_charge(new_page, memcg); do_huge_pmd_wp_page() 1179 mem_cgroup_commit_charge(new_page, memcg, false); do_huge_pmd_wp_page() 1648 /* complete memcg works before add pages to LRU */ __split_huge_page_refcount() 2439 struct mem_cgroup *memcg; collapse_huge_page() local 2456 gfp, &memcg))) collapse_huge_page() 2543 mem_cgroup_commit_charge(new_page, memcg, false); collapse_huge_page() 2558 mem_cgroup_cancel_charge(new_page, memcg); collapse_huge_page()
|
H A D | swap_cgroup.c | 23 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
|
H A D | memory.c | 2069 struct mem_cgroup *memcg; wp_page_copy() local 2086 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) wp_page_copy() 2115 mem_cgroup_commit_charge(new_page, memcg, false); wp_page_copy() 2154 mem_cgroup_cancel_charge(new_page, memcg); wp_page_copy() 2450 struct mem_cgroup *memcg; do_swap_page() local 2529 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { do_swap_page() 2571 mem_cgroup_commit_charge(page, memcg, true); do_swap_page() 2574 mem_cgroup_commit_charge(page, memcg, false); do_swap_page() 2609 mem_cgroup_cancel_charge(page, memcg); do_swap_page() 2665 struct mem_cgroup *memcg; do_anonymous_page() local 2703 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) do_anonymous_page() 2716 mem_cgroup_commit_charge(page, memcg, false); do_anonymous_page() 2727 mem_cgroup_cancel_charge(page, memcg); do_anonymous_page() 2966 struct mem_cgroup *memcg; do_cow_fault() local 2978 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) { do_cow_fault() 3007 mem_cgroup_commit_charge(new_page, memcg, false); do_cow_fault() 3022 mem_cgroup_cancel_charge(new_page, memcg); do_cow_fault() 3410 * Enable the memcg OOM handling for faults triggered in user handle_mm_fault() 3421 * The task may have entered a memcg OOM situation but handle_mm_fault()
|
H A D | shmem.c | 694 struct mem_cgroup *memcg; shmem_unuse() local 709 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); shmem_unuse() 732 mem_cgroup_cancel_charge(page, memcg); shmem_unuse() 734 mem_cgroup_commit_charge(page, memcg, true); shmem_unuse() 1038 struct mem_cgroup *memcg; shmem_getpage_gfp() local 1117 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); shmem_getpage_gfp() 1134 mem_cgroup_cancel_charge(page, memcg); shmem_getpage_gfp() 1141 mem_cgroup_commit_charge(page, memcg, true); shmem_getpage_gfp() 1180 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); shmem_getpage_gfp() 1190 mem_cgroup_cancel_charge(page, memcg); shmem_getpage_gfp() 1193 mem_cgroup_commit_charge(page, memcg, false); shmem_getpage_gfp()
|
H A D | page-writeback.c | 2339 struct mem_cgroup *memcg; test_clear_page_writeback() local 2342 memcg = mem_cgroup_begin_page_stat(page); test_clear_page_writeback() 2363 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); test_clear_page_writeback() 2367 mem_cgroup_end_page_stat(memcg); test_clear_page_writeback() 2374 struct mem_cgroup *memcg; __test_set_page_writeback() local 2377 memcg = mem_cgroup_begin_page_stat(page); __test_set_page_writeback() 2404 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); __test_set_page_writeback() 2407 mem_cgroup_end_page_stat(memcg); __test_set_page_writeback()
|
H A D | filemap.c | 549 struct mem_cgroup *memcg; __add_to_page_cache_locked() local 557 gfp_mask, &memcg); __add_to_page_cache_locked() 565 mem_cgroup_cancel_charge(page, memcg); __add_to_page_cache_locked() 581 mem_cgroup_commit_charge(page, memcg, false); __add_to_page_cache_locked() 589 mem_cgroup_cancel_charge(page, memcg); __add_to_page_cache_locked()
|
H A D | swapfile.c | 1096 struct mem_cgroup *memcg; unuse_pte() local 1106 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { unuse_pte() 1113 mem_cgroup_cancel_charge(page, memcg); unuse_pte() 1125 mem_cgroup_commit_charge(page, memcg, true); unuse_pte() 1128 mem_cgroup_commit_charge(page, memcg, false); unuse_pte()
|
H A D | memory-failure.c | 123 * by putting them under some memcg. This prevents killing unrelated/important 127 * is also included in the memcg. At last, due to race conditions this filter 128 * can only guarantee that the page either belongs to the memcg tasks, or is
|
H A D | page_alloc.c | 3007 struct mem_cgroup *memcg = NULL; alloc_kmem_pages() local 3009 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) alloc_kmem_pages() 3012 memcg_kmem_commit_charge(page, memcg, order); alloc_kmem_pages() 3019 struct mem_cgroup *memcg = NULL; alloc_kmem_pages_node() local 3021 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) alloc_kmem_pages_node() 3024 memcg_kmem_commit_charge(page, memcg, order); alloc_kmem_pages_node()
|
H A D | swap.c | 82 * (it's never listed to any LRU lists) and no memcg routines should __put_compound_page()
|
/linux-4.1.27/include/linux/ |
H A D | memcontrol.h | 70 void mem_cgroup_events(struct mem_cgroup *memcg, 74 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 78 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 80 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 90 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 92 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 97 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 101 struct mem_cgroup *memcg) mm_match_cgroup() 109 match = mem_cgroup_is_descendant(task_memcg, memcg); mm_match_cgroup() 114 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 126 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 129 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 146 return p->memcg_oom.memcg; task_in_memcg_oom() 163 void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 165 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 167 static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, mem_cgroup_inc_page_stat() argument 170 mem_cgroup_update_page_stat(memcg, idx, 1); mem_cgroup_inc_page_stat() 173 static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, mem_cgroup_dec_page_stat() argument 176 mem_cgroup_update_page_stat(memcg, idx, -1); mem_cgroup_dec_page_stat() 198 static inline void mem_cgroup_events(struct mem_cgroup *memcg, mem_cgroup_events() argument 205 struct mem_cgroup *memcg) mem_cgroup_low() 219 struct mem_cgroup *memcg, mem_cgroup_commit_charge() 225 struct mem_cgroup *memcg) mem_cgroup_cancel_charge() 244 struct mem_cgroup *memcg) mem_cgroup_zone_lruvec() 261 struct mem_cgroup *memcg) mm_match_cgroup() 267 const struct mem_cgroup *memcg) task_in_mem_cgroup() 273 *mem_cgroup_css(struct mem_cgroup *memcg) mem_cgroup_css() argument 320 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) mem_cgroup_print_oom_info() argument 329 static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) mem_cgroup_end_page_stat() argument 351 static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, mem_cgroup_inc_page_stat() argument 356 static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, mem_cgroup_dec_page_stat() argument 406 * Helper macro to loop through all memcg-specific caches. Callers must still 418 bool memcg_kmem_is_active(struct mem_cgroup *memcg); 422 * for non-memcg users for the kmem functions. Not even a function call, if we 431 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, 434 struct mem_cgroup *memcg, int order); 437 int memcg_cache_id(struct mem_cgroup *memcg); 444 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, 446 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); 451 * @memcg: a pointer to the memcg this was charged against. 454 * returns true if the memcg where the current task belongs can hold this 458 * any memcg. 461 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) memcg_kmem_newpage_charge() argument 483 return __memcg_kmem_newpage_charge(gfp, memcg, order); memcg_kmem_newpage_charge() 487 * memcg_kmem_uncharge_pages: uncharge pages from memcg 499 * memcg_kmem_commit_charge: embeds correct memcg in a page 501 * @memcg: the memcg structure we charged against 506 * charges. Otherwise, it will commit @page to @memcg. 509 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) memcg_kmem_commit_charge() argument 511 if (memcg_kmem_enabled() && memcg) memcg_kmem_commit_charge() 512 __memcg_kmem_commit_charge(page, memcg, order); memcg_kmem_commit_charge() 516 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 520 * All memory allocated from a per-memcg cache is charged to the owner memcg. 560 static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) memcg_kmem_is_active() argument 566 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) memcg_kmem_newpage_charge() argument 576 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) memcg_kmem_commit_charge() argument 580 static inline int memcg_cache_id(struct mem_cgroup *memcg) memcg_cache_id() argument 100 mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) mm_match_cgroup() argument 204 mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) mem_cgroup_low() argument 218 mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, bool lrucare) mem_cgroup_commit_charge() argument 224 mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) mem_cgroup_cancel_charge() argument 243 mem_cgroup_zone_lruvec(struct zone *zone, struct mem_cgroup *memcg) mem_cgroup_zone_lruvec() argument 260 mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) mm_match_cgroup() argument 266 task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) task_in_mem_cgroup() argument
|
H A D | vmpressure.h | 29 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, 31 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 35 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 37 extern int vmpressure_register_event(struct mem_cgroup *memcg, 40 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 43 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, vmpressure() argument 45 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, vmpressure_prio() argument
|
H A D | oom.h | 55 struct mem_cgroup *memcg, const nodemask_t *nodemask, 62 struct mem_cgroup *memcg, nodemask_t *nodemask, 70 struct mem_cgroup *memcg);
|
H A D | shrinker.h | 24 /* current memcg being shrunk (for memcg aware shrinkers) */ 25 struct mem_cgroup *memcg; member in struct:shrink_control
|
H A D | list_lru.h | 29 /* may become negative during memcg reparenting */ 102 * @memcg: the cgroup to count from. 109 int nid, struct mem_cgroup *memcg); 115 return list_lru_count_one(lru, sc->nid, sc->memcg); list_lru_shrink_count() 140 * @memcg: the cgroup to scan from. 159 int nid, struct mem_cgroup *memcg, 170 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, list_lru_shrink_walk()
|
H A D | ksm.h | 92 struct mem_cgroup *memcg, unsigned long *vm_flags) page_referenced_ksm() 91 page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, unsigned long *vm_flags) page_referenced_ksm() argument
|
H A D | rmap.h | 172 struct mem_cgroup *memcg, unsigned long *vm_flags); 248 struct mem_cgroup *memcg, page_referenced() 247 page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) page_referenced() argument
|
H A D | slab.h | 490 * This is the main placeholder for memcg-related information in kmem caches. 499 * @memcg: pointer to the memcg this cache belongs to 511 struct mem_cgroup *memcg; member in struct:memcg_cache_params::__anon12444::__anon12445
|
H A D | swap.h | 322 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
|
H A D | cgroup.h | 170 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
|
H A D | sched.h | 1712 struct mem_cgroup *memcg; member in struct:task_struct::memcg_oom_info
|
/linux-4.1.27/net/ipv4/ |
H A D | tcp_memcontrol.c | 9 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) tcp_init_cgroup() argument 16 struct mem_cgroup *parent = parent_mem_cgroup(memcg); tcp_init_cgroup() 20 cg_proto = tcp_prot.proto_cgroup(memcg); tcp_init_cgroup() 28 cg_proto->memcg = memcg; tcp_init_cgroup() 41 void tcp_destroy_cgroup(struct mem_cgroup *memcg) tcp_destroy_cgroup() argument 45 cg_proto = tcp_prot.proto_cgroup(memcg); tcp_destroy_cgroup() 57 static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) tcp_update_limit() argument 63 cg_proto = tcp_prot.proto_cgroup(memcg); tcp_update_limit() 83 * to this memcg until that flag is up. tcp_update_limit() 95 * will do the update in the same memcg. Without that, we can't tcp_update_limit() 118 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); tcp_cgroup_write() local 131 ret = tcp_update_limit(memcg, nr_pages); tcp_cgroup_write() 143 struct mem_cgroup *memcg = mem_cgroup_from_css(css); tcp_cgroup_read() local 144 struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg); tcp_cgroup_read() 181 struct mem_cgroup *memcg; tcp_cgroup_reset() local 184 memcg = mem_cgroup_from_css(of_css(of)); tcp_cgroup_reset() 185 cg_proto = tcp_prot.proto_cgroup(memcg); tcp_cgroup_reset()
|
/linux-4.1.27/kernel/events/ |
H A D | uprobes.c | 170 struct mem_cgroup *memcg; __replace_page() local 172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); __replace_page() 187 mem_cgroup_commit_charge(kpage, memcg, false); __replace_page() 210 mem_cgroup_cancel_charge(kpage, memcg); __replace_page()
|
/linux-4.1.27/net/core/ |
H A D | sock.c | 198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) mem_cgroup_sockets_init() argument 206 ret = proto->init_cgroup(memcg, ss); mem_cgroup_sockets_init() 217 proto->destroy_cgroup(memcg); mem_cgroup_sockets_init() 222 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) mem_cgroup_sockets_destroy() argument 229 proto->destroy_cgroup(memcg); mem_cgroup_sockets_destroy()
|
/linux-4.1.27/fs/kernfs/ |
H A D | dir.c | 523 * allocated from an ida layer, which is accounted to the memcg that __kernfs_new_node() 524 * owns the cache, the memcg will get pinned forever. So do not account __kernfs_new_node()
|
/linux-4.1.27/block/ |
H A D | blk-cgroup.c | 936 * This ensures that, if available, memcg is automatically enabled
|
/linux-4.1.27/fs/ |
H A D | super.c | 95 * Ensure that we always scan at least one object - memcg kmem super_cache_scan()
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_aops.c | 963 * random callers for direct reclaim or memcg reclaim. We explicitly xfs_vm_writepage()
|