Lines Matching refs:memcg

152 	struct mem_cgroup *memcg;  member
166 int (*register_event)(struct mem_cgroup *memcg,
173 void (*unregister_event)(struct mem_cgroup *memcg,
185 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
250 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
252 if (!memcg) in memcg_to_vmpressure()
253 memcg = root_mem_cgroup; in memcg_to_vmpressure()
254 return &memcg->vmpressure; in memcg_to_vmpressure()
262 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument
264 return (memcg == root_mem_cgroup); in mem_cgroup_is_root()
273 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) in mem_cgroup_id() argument
275 return memcg->css.id; in mem_cgroup_id()
298 struct mem_cgroup *memcg; in sock_update_memcg() local
312 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); in sock_update_memcg()
313 css_get(&sk->sk_cgrp->memcg->css); in sock_update_memcg()
318 memcg = mem_cgroup_from_task(current); in sock_update_memcg()
319 cg_proto = sk->sk_prot->proto_cgroup(memcg); in sock_update_memcg()
321 css_tryget_online(&memcg->css)) { in sock_update_memcg()
332 struct mem_cgroup *memcg; in sock_release_memcg() local
333 WARN_ON(!sk->sk_cgrp->memcg); in sock_release_memcg()
334 memcg = sk->sk_cgrp->memcg; in sock_release_memcg()
335 css_put(&sk->sk_cgrp->memcg->css); in sock_release_memcg()
339 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) in tcp_proto_cgroup() argument
341 if (!memcg || mem_cgroup_is_root(memcg)) in tcp_proto_cgroup()
344 return &memcg->tcp_mem; in tcp_proto_cgroup()
405 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) in mem_cgroup_zone_zoneinfo() argument
410 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_zone_zoneinfo()
433 struct mem_cgroup *memcg; in mem_cgroup_css_from_page() local
437 memcg = page->mem_cgroup; in mem_cgroup_css_from_page()
439 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) in mem_cgroup_css_from_page()
440 memcg = root_mem_cgroup; in mem_cgroup_css_from_page()
443 return &memcg->css; in mem_cgroup_css_from_page()
461 struct mem_cgroup *memcg; in page_cgroup_ino() local
465 memcg = READ_ONCE(page->mem_cgroup); in page_cgroup_ino()
466 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
467 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino()
468 if (memcg) in page_cgroup_ino()
469 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
475 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_page_zoneinfo() argument
480 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_page_zoneinfo()
549 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument
551 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
552 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
561 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_update_tree() argument
572 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
573 mz = mem_cgroup_page_zoneinfo(memcg, page); in mem_cgroup_update_tree()
574 excess = soft_limit_excess(memcg); in mem_cgroup_update_tree()
596 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) in mem_cgroup_remove_from_trees() argument
604 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_remove_from_trees()
630 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
631 !css_tryget_online(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
670 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) in mem_cgroup_read_stat() argument
677 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat()
687 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, in mem_cgroup_read_events() argument
694 val += per_cpu(memcg->stat->events[idx], cpu); in mem_cgroup_read_events()
698 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, in mem_cgroup_charge_statistics() argument
707 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], in mem_cgroup_charge_statistics()
710 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], in mem_cgroup_charge_statistics()
714 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], in mem_cgroup_charge_statistics()
719 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); in mem_cgroup_charge_statistics()
721 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); in mem_cgroup_charge_statistics()
725 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
728 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_node_nr_lru_pages() argument
744 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_node_nr_lru_pages()
751 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_nr_lru_pages() argument
758 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); in mem_cgroup_nr_lru_pages()
762 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, in mem_cgroup_event_ratelimit() argument
767 val = __this_cpu_read(memcg->stat->nr_page_events); in mem_cgroup_event_ratelimit()
768 next = __this_cpu_read(memcg->stat->targets[target]); in mem_cgroup_event_ratelimit()
784 __this_cpu_write(memcg->stat->targets[target], next); in mem_cgroup_event_ratelimit()
794 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) in memcg_check_events() argument
797 if (unlikely(mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
802 do_softlimit = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
805 do_numainfo = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
808 mem_cgroup_threshold(memcg); in memcg_check_events()
810 mem_cgroup_update_tree(memcg, page); in memcg_check_events()
813 atomic_inc(&memcg->numainfo_events); in memcg_check_events()
834 struct mem_cgroup *memcg = NULL; in get_mem_cgroup_from_mm() local
844 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
846 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
847 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
848 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
850 } while (!css_tryget_online(&memcg->css)); in get_mem_cgroup_from_mm()
852 return memcg; in get_mem_cgroup_from_mm()
878 struct mem_cgroup *memcg = NULL; in mem_cgroup_iter() local
945 memcg = mem_cgroup_from_css(css); in mem_cgroup_iter()
956 if (smp_load_acquire(&memcg->initialized)) in mem_cgroup_iter()
962 memcg = NULL; in mem_cgroup_iter()
971 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
976 if (!memcg) in mem_cgroup_iter()
988 return memcg; in mem_cgroup_iter()
1007 struct mem_cgroup *memcg = dead_memcg; in invalidate_reclaim_iterators() local
1013 while ((memcg = parent_mem_cgroup(memcg))) { in invalidate_reclaim_iterators()
1016 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in invalidate_reclaim_iterators()
1052 struct mem_cgroup *memcg) in mem_cgroup_zone_lruvec() argument
1062 mz = mem_cgroup_zone_zoneinfo(memcg, zone); in mem_cgroup_zone_lruvec()
1087 struct mem_cgroup *memcg; in mem_cgroup_page_lruvec() local
1095 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1100 if (!memcg) in mem_cgroup_page_lruvec()
1101 memcg = root_mem_cgroup; in mem_cgroup_page_lruvec()
1103 mz = mem_cgroup_page_zoneinfo(memcg, page); in mem_cgroup_page_lruvec()
1140 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) in task_in_mem_cgroup() argument
1161 ret = mem_cgroup_is_descendant(task_memcg, memcg); in task_in_mem_cgroup()
1176 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1182 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1183 limit = READ_ONCE(memcg->memory.limit); in mem_cgroup_margin()
1188 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1189 limit = READ_ONCE(memcg->memsw.limit); in mem_cgroup_margin()
1204 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) in mem_cgroup_under_move() argument
1219 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move()
1220 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
1226 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) in mem_cgroup_wait_acct_move() argument
1229 if (mem_cgroup_under_move(memcg)) { in mem_cgroup_wait_acct_move()
1251 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_info() argument
1269 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_info()
1275 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_info()
1276 K((u64)memcg->memory.limit), memcg->memory.failcnt); in mem_cgroup_print_oom_info()
1278 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_info()
1279 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); in mem_cgroup_print_oom_info()
1281 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_info()
1282 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); in mem_cgroup_print_oom_info()
1284 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_print_oom_info()
1309 static int mem_cgroup_count_children(struct mem_cgroup *memcg) in mem_cgroup_count_children() argument
1314 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_count_children()
1322 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) in mem_cgroup_get_limit() argument
1326 limit = memcg->memory.limit; in mem_cgroup_get_limit()
1327 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_limit()
1330 memsw_limit = memcg->memsw.limit; in mem_cgroup_get_limit()
1336 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1363 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); in mem_cgroup_out_of_memory()
1364 totalpages = mem_cgroup_get_limit(memcg) ? : 1; in mem_cgroup_out_of_memory()
1365 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_out_of_memory()
1383 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_out_of_memory()
1390 points = oom_badness(task, memcg, NULL, totalpages); in mem_cgroup_out_of_memory()
1409 oom_kill_process(&oc, chosen, points, totalpages, memcg, in mem_cgroup_out_of_memory()
1429 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, in test_mem_cgroup_node_reclaimable() argument
1432 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) in test_mem_cgroup_node_reclaimable()
1436 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) in test_mem_cgroup_node_reclaimable()
1448 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) in mem_cgroup_may_update_nodemask() argument
1455 if (!atomic_read(&memcg->numainfo_events)) in mem_cgroup_may_update_nodemask()
1457 if (atomic_inc_return(&memcg->numainfo_updating) > 1) in mem_cgroup_may_update_nodemask()
1461 memcg->scan_nodes = node_states[N_MEMORY]; in mem_cgroup_may_update_nodemask()
1465 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) in mem_cgroup_may_update_nodemask()
1466 node_clear(nid, memcg->scan_nodes); in mem_cgroup_may_update_nodemask()
1469 atomic_set(&memcg->numainfo_events, 0); in mem_cgroup_may_update_nodemask()
1470 atomic_set(&memcg->numainfo_updating, 0); in mem_cgroup_may_update_nodemask()
1485 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1489 mem_cgroup_may_update_nodemask(memcg); in mem_cgroup_select_victim_node()
1490 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node()
1492 node = next_node(node, memcg->scan_nodes); in mem_cgroup_select_victim_node()
1494 node = first_node(memcg->scan_nodes); in mem_cgroup_select_victim_node()
1504 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node()
1508 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1577 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) in mem_cgroup_oom_trylock() argument
1583 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1590 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1601 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1603 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1616 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) in mem_cgroup_oom_unlock() argument
1622 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_unlock()
1627 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_mark_under_oom() argument
1632 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_mark_under_oom()
1637 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_unmark_under_oom() argument
1646 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_unmark_under_oom()
1655 struct mem_cgroup *memcg; member
1667 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1675 static void memcg_oom_recover(struct mem_cgroup *memcg) in memcg_oom_recover() argument
1685 if (memcg && memcg->under_oom) in memcg_oom_recover()
1686 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); in memcg_oom_recover()
1689 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1707 css_get(&memcg->css); in mem_cgroup_oom()
1708 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1732 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize() local
1737 if (!memcg) in mem_cgroup_oom_synchronize()
1743 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
1750 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1752 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize()
1755 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom_synchronize()
1757 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1758 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1760 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
1764 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1769 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom_synchronize()
1775 memcg_oom_recover(memcg); in mem_cgroup_oom_synchronize()
1779 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1798 struct mem_cgroup *memcg; in mem_cgroup_begin_page_stat() local
1818 memcg = page->mem_cgroup; in mem_cgroup_begin_page_stat()
1819 if (unlikely(!memcg)) in mem_cgroup_begin_page_stat()
1822 if (atomic_read(&memcg->moving_account) <= 0) in mem_cgroup_begin_page_stat()
1823 return memcg; in mem_cgroup_begin_page_stat()
1825 spin_lock_irqsave(&memcg->move_lock, flags); in mem_cgroup_begin_page_stat()
1826 if (memcg != page->mem_cgroup) { in mem_cgroup_begin_page_stat()
1827 spin_unlock_irqrestore(&memcg->move_lock, flags); in mem_cgroup_begin_page_stat()
1836 memcg->move_lock_task = current; in mem_cgroup_begin_page_stat()
1837 memcg->move_lock_flags = flags; in mem_cgroup_begin_page_stat()
1839 return memcg; in mem_cgroup_begin_page_stat()
1847 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) in mem_cgroup_end_page_stat() argument
1849 if (memcg && memcg->move_lock_task == current) { in mem_cgroup_end_page_stat()
1850 unsigned long flags = memcg->move_lock_flags; in mem_cgroup_end_page_stat()
1852 memcg->move_lock_task = NULL; in mem_cgroup_end_page_stat()
1853 memcg->move_lock_flags = 0; in mem_cgroup_end_page_stat()
1855 spin_unlock_irqrestore(&memcg->move_lock, flags); in mem_cgroup_end_page_stat()
1888 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
1897 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
1937 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
1941 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
1943 stock->cached = memcg; in refill_stock()
1965 struct mem_cgroup *memcg; in drain_all_stock() local
1967 memcg = stock->cached; in drain_all_stock()
1968 if (!memcg || !stock->nr_pages) in drain_all_stock()
1970 if (!mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
2009 struct mem_cgroup *memcg, *pos; in mem_cgroup_handle_over_high() local
2014 pos = memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2023 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2027 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2038 if (mem_cgroup_is_root(memcg)) in try_charge()
2041 if (consume_stock(memcg, nr_pages)) in try_charge()
2045 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2046 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2049 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2134 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2136 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2137 css_get_many(&memcg->css, nr_pages); in try_charge()
2142 css_get_many(&memcg->css, batch); in try_charge()
2144 refill_stock(memcg, batch - nr_pages); in try_charge()
2156 if (page_counter_read(&memcg->memory) > memcg->high) { in try_charge()
2161 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge()
2166 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2168 if (mem_cgroup_is_root(memcg)) in cancel_charge()
2171 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2173 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2175 css_put_many(&memcg->css, nr_pages); in cancel_charge()
2209 static void commit_charge(struct page *page, struct mem_cgroup *memcg, in commit_charge() argument
2237 page->mem_cgroup = memcg; in commit_charge()
2290 struct mem_cgroup *memcg; member
2299 struct mem_cgroup *memcg = cw->memcg; in memcg_kmem_cache_create_func() local
2302 memcg_create_kmem_cache(memcg, cachep); in memcg_kmem_cache_create_func()
2304 css_put(&memcg->css); in memcg_kmem_cache_create_func()
2311 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, in __memcg_schedule_kmem_cache_create() argument
2320 css_get(&memcg->css); in __memcg_schedule_kmem_cache_create()
2322 cw->memcg = memcg; in __memcg_schedule_kmem_cache_create()
2329 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, in memcg_schedule_kmem_cache_create() argument
2344 __memcg_schedule_kmem_cache_create(memcg, cachep); in memcg_schedule_kmem_cache_create()
2363 struct mem_cgroup *memcg; in __memcg_kmem_get_cache() local
2372 memcg = get_mem_cgroup_from_mm(current->mm); in __memcg_kmem_get_cache()
2373 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in __memcg_kmem_get_cache()
2393 memcg_schedule_kmem_cache_create(memcg, cachep); in __memcg_kmem_get_cache()
2395 css_put(&memcg->css); in __memcg_kmem_get_cache()
2402 css_put(&cachep->memcg_params.memcg->css); in __memcg_kmem_put_cache()
2406 struct mem_cgroup *memcg) in __memcg_kmem_charge_memcg() argument
2412 if (!memcg_kmem_is_active(memcg)) in __memcg_kmem_charge_memcg()
2415 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) in __memcg_kmem_charge_memcg()
2418 ret = try_charge(memcg, gfp, nr_pages); in __memcg_kmem_charge_memcg()
2420 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_charge_memcg()
2424 page->mem_cgroup = memcg; in __memcg_kmem_charge_memcg()
2431 struct mem_cgroup *memcg; in __memcg_kmem_charge() local
2434 memcg = get_mem_cgroup_from_mm(current->mm); in __memcg_kmem_charge()
2435 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); in __memcg_kmem_charge()
2436 css_put(&memcg->css); in __memcg_kmem_charge()
2442 struct mem_cgroup *memcg = page->mem_cgroup; in __memcg_kmem_uncharge() local
2445 if (!memcg) in __memcg_kmem_uncharge()
2448 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); in __memcg_kmem_uncharge()
2450 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_uncharge()
2451 page_counter_uncharge(&memcg->memory, nr_pages); in __memcg_kmem_uncharge()
2453 page_counter_uncharge(&memcg->memsw, nr_pages); in __memcg_kmem_uncharge()
2456 css_put_many(&memcg->css, nr_pages); in __memcg_kmem_uncharge()
2484 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, in mem_cgroup_swap_statistics() argument
2488 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); in mem_cgroup_swap_statistics()
2530 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, in mem_cgroup_resize_limit() argument
2545 mem_cgroup_count_children(memcg); in mem_cgroup_resize_limit()
2547 oldusage = page_counter_read(&memcg->memory); in mem_cgroup_resize_limit()
2556 if (limit > memcg->memsw.limit) { in mem_cgroup_resize_limit()
2561 if (limit > memcg->memory.limit) in mem_cgroup_resize_limit()
2563 ret = page_counter_limit(&memcg->memory, limit); in mem_cgroup_resize_limit()
2569 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); in mem_cgroup_resize_limit()
2571 curusage = page_counter_read(&memcg->memory); in mem_cgroup_resize_limit()
2580 memcg_oom_recover(memcg); in mem_cgroup_resize_limit()
2585 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, in mem_cgroup_resize_memsw_limit() argument
2596 mem_cgroup_count_children(memcg); in mem_cgroup_resize_memsw_limit()
2598 oldusage = page_counter_read(&memcg->memsw); in mem_cgroup_resize_memsw_limit()
2607 if (limit < memcg->memory.limit) { in mem_cgroup_resize_memsw_limit()
2612 if (limit > memcg->memsw.limit) in mem_cgroup_resize_memsw_limit()
2614 ret = page_counter_limit(&memcg->memsw, limit); in mem_cgroup_resize_memsw_limit()
2620 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); in mem_cgroup_resize_memsw_limit()
2622 curusage = page_counter_read(&memcg->memsw); in mem_cgroup_resize_memsw_limit()
2631 memcg_oom_recover(memcg); in mem_cgroup_resize_memsw_limit()
2666 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, in mem_cgroup_soft_limit_reclaim()
2681 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
2693 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
2706 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
2716 static inline bool memcg_has_children(struct mem_cgroup *memcg) in memcg_has_children() argument
2729 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
2740 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) in mem_cgroup_force_empty() argument
2747 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
2753 progress = try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_force_empty()
2770 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_force_empty_write() local
2772 if (mem_cgroup_is_root(memcg)) in mem_cgroup_force_empty_write()
2774 return mem_cgroup_force_empty(memcg) ?: nbytes; in mem_cgroup_force_empty_write()
2787 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_hierarchy_write() local
2788 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
2792 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
2805 if (!memcg_has_children(memcg)) in mem_cgroup_hierarchy_write()
2806 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
2818 static unsigned long tree_stat(struct mem_cgroup *memcg, in tree_stat() argument
2824 for_each_mem_cgroup_tree(iter, memcg) in tree_stat()
2830 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
2834 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
2835 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); in mem_cgroup_usage()
2836 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); in mem_cgroup_usage()
2838 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); in mem_cgroup_usage()
2841 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
2843 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
2859 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_read_u64() local
2864 counter = &memcg->memory; in mem_cgroup_read_u64()
2867 counter = &memcg->memsw; in mem_cgroup_read_u64()
2870 counter = &memcg->kmem; in mem_cgroup_read_u64()
2878 if (counter == &memcg->memory) in mem_cgroup_read_u64()
2879 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; in mem_cgroup_read_u64()
2880 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
2881 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; in mem_cgroup_read_u64()
2890 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
2897 static int memcg_activate_kmem(struct mem_cgroup *memcg, in memcg_activate_kmem() argument
2903 BUG_ON(memcg->kmemcg_id >= 0); in memcg_activate_kmem()
2904 BUG_ON(memcg->kmem_acct_activated); in memcg_activate_kmem()
2905 BUG_ON(memcg->kmem_acct_active); in memcg_activate_kmem()
2920 if (cgroup_is_populated(memcg->css.cgroup) || in memcg_activate_kmem()
2921 (memcg->use_hierarchy && memcg_has_children(memcg))) in memcg_activate_kmem()
2937 err = page_counter_limit(&memcg->kmem, nr_pages); in memcg_activate_kmem()
2947 memcg->kmemcg_id = memcg_id; in memcg_activate_kmem()
2948 memcg->kmem_acct_activated = true; in memcg_activate_kmem()
2949 memcg->kmem_acct_active = true; in memcg_activate_kmem()
2954 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, in memcg_update_kmem_limit() argument
2960 if (!memcg_kmem_is_active(memcg)) in memcg_update_kmem_limit()
2961 ret = memcg_activate_kmem(memcg, limit); in memcg_update_kmem_limit()
2963 ret = page_counter_limit(&memcg->kmem, limit); in memcg_update_kmem_limit()
2968 static int memcg_propagate_kmem(struct mem_cgroup *memcg) in memcg_propagate_kmem() argument
2971 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in memcg_propagate_kmem()
2982 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); in memcg_propagate_kmem()
2987 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, in memcg_update_kmem_limit() argument
3001 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_write() local
3012 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3018 ret = mem_cgroup_resize_limit(memcg, nr_pages); in mem_cgroup_write()
3021 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); in mem_cgroup_write()
3024 ret = memcg_update_kmem_limit(memcg, nr_pages); in mem_cgroup_write()
3029 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3039 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_reset() local
3044 counter = &memcg->memory; in mem_cgroup_reset()
3047 counter = &memcg->memsw; in mem_cgroup_reset()
3050 counter = &memcg->kmem; in mem_cgroup_reset()
3080 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_move_charge_write() local
3091 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3119 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_numa_stat_show() local
3122 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show()
3125 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
3136 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3141 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3155 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_stat_show() local
3170 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); in memcg_stat_show()
3175 mem_cgroup_read_events(memcg, i)); in memcg_stat_show()
3179 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); in memcg_stat_show()
3183 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { in memcg_stat_show()
3198 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3206 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3215 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3230 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in memcg_stat_show()
3251 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_read() local
3253 return mem_cgroup_swappiness(memcg); in mem_cgroup_swappiness_read()
3259 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_write() local
3265 memcg->swappiness = val; in mem_cgroup_swappiness_write()
3272 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
3280 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3282 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3287 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
3323 static void mem_cgroup_threshold(struct mem_cgroup *memcg) in mem_cgroup_threshold() argument
3325 while (memcg) { in mem_cgroup_threshold()
3326 __mem_cgroup_threshold(memcg, false); in mem_cgroup_threshold()
3328 __mem_cgroup_threshold(memcg, true); in mem_cgroup_threshold()
3330 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold()
3348 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) in mem_cgroup_oom_notify_cb() argument
3354 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
3361 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) in mem_cgroup_oom_notify() argument
3365 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_notify()
3369 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_register_event() argument
3382 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3385 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
3386 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_register_event()
3388 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
3389 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_register_event()
3395 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_register_event()
3446 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3451 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in mem_cgroup_usage_register_event() argument
3454 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); in mem_cgroup_usage_register_event()
3457 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_register_event() argument
3460 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); in memsw_cgroup_usage_register_event()
3463 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_unregister_event() argument
3471 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3474 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
3475 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_unregister_event()
3477 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
3478 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_unregister_event()
3486 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_unregister_event()
3539 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3542 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_usage_unregister_event() argument
3545 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); in mem_cgroup_usage_unregister_event()
3548 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_unregister_event() argument
3551 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); in memsw_cgroup_usage_unregister_event()
3554 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, in mem_cgroup_oom_register_event() argument
3566 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
3569 if (memcg->under_oom) in mem_cgroup_oom_register_event()
3576 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_oom_unregister_event() argument
3583 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
3595 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); in mem_cgroup_oom_control_read() local
3597 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
3598 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
3605 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_oom_control_write() local
3611 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
3613 memcg_oom_recover(memcg); in mem_cgroup_oom_control_write()
3619 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in memcg_init_kmem() argument
3623 ret = memcg_propagate_kmem(memcg); in memcg_init_kmem()
3627 return mem_cgroup_sockets_init(memcg, ss); in memcg_init_kmem()
3630 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) in memcg_deactivate_kmem() argument
3636 if (!memcg->kmem_acct_active) in memcg_deactivate_kmem()
3645 memcg->kmem_acct_active = false; in memcg_deactivate_kmem()
3647 memcg_deactivate_kmem_caches(memcg); in memcg_deactivate_kmem()
3649 kmemcg_id = memcg->kmemcg_id; in memcg_deactivate_kmem()
3652 parent = parent_mem_cgroup(memcg); in memcg_deactivate_kmem()
3665 css_for_each_descendant_pre(css, &memcg->css) { in memcg_deactivate_kmem()
3669 if (!memcg->use_hierarchy) in memcg_deactivate_kmem()
3679 static void memcg_destroy_kmem(struct mem_cgroup *memcg) in memcg_destroy_kmem() argument
3681 if (memcg->kmem_acct_activated) { in memcg_destroy_kmem()
3682 memcg_destroy_kmem_caches(memcg); in memcg_destroy_kmem()
3684 WARN_ON(page_counter_read(&memcg->kmem)); in memcg_destroy_kmem()
3686 mem_cgroup_sockets_destroy(memcg); in memcg_destroy_kmem()
3689 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in memcg_init_kmem() argument
3694 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) in memcg_deactivate_kmem() argument
3698 static void memcg_destroy_kmem(struct mem_cgroup *memcg) in memcg_destroy_kmem() argument
3705 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) in mem_cgroup_cgwb_list() argument
3707 return &memcg->cgwb_list; in mem_cgroup_cgwb_list()
3710 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
3712 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
3715 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
3717 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
3720 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
3722 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
3727 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain() local
3729 if (!memcg->css.parent) in mem_cgroup_wb_domain()
3732 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
3757 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats() local
3760 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); in mem_cgroup_wb_stats()
3763 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); in mem_cgroup_wb_stats()
3764 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | in mem_cgroup_wb_stats()
3768 while ((parent = parent_mem_cgroup(memcg))) { in mem_cgroup_wb_stats()
3769 unsigned long ceiling = min(memcg->memory.limit, memcg->high); in mem_cgroup_wb_stats()
3770 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
3773 memcg = parent; in mem_cgroup_wb_stats()
3779 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
3784 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
3788 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
3816 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local
3820 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
3827 css_put(&memcg->css); in memcg_event_remove()
3840 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local
3853 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
3862 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
3890 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memcg_write_event_control() local
3916 event->memcg = memcg; in memcg_write_event_control()
3988 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
3994 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
3995 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
3996 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
4127 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_zone_info() argument
4151 mz->memcg = memcg; in alloc_mem_cgroup_per_zone_info()
4153 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_zone_info()
4157 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_zone_info() argument
4159 kfree(memcg->nodeinfo[node]); in free_mem_cgroup_per_zone_info()
4164 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
4170 memcg = kzalloc(size, GFP_KERNEL); in mem_cgroup_alloc()
4171 if (!memcg) in mem_cgroup_alloc()
4174 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); in mem_cgroup_alloc()
4175 if (!memcg->stat) in mem_cgroup_alloc()
4178 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) in mem_cgroup_alloc()
4181 return memcg; in mem_cgroup_alloc()
4184 free_percpu(memcg->stat); in mem_cgroup_alloc()
4186 kfree(memcg); in mem_cgroup_alloc()
4201 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
4205 mem_cgroup_remove_from_trees(memcg); in __mem_cgroup_free()
4208 free_mem_cgroup_per_zone_info(memcg, node); in __mem_cgroup_free()
4210 free_percpu(memcg->stat); in __mem_cgroup_free()
4211 memcg_wb_domain_exit(memcg); in __mem_cgroup_free()
4212 kfree(memcg); in __mem_cgroup_free()
4218 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) in parent_mem_cgroup() argument
4220 if (!memcg->memory.parent) in parent_mem_cgroup()
4222 return mem_cgroup_from_counter(memcg->memory.parent, memory); in parent_mem_cgroup()
4229 struct mem_cgroup *memcg; in mem_cgroup_css_alloc() local
4233 memcg = mem_cgroup_alloc(); in mem_cgroup_css_alloc()
4234 if (!memcg) in mem_cgroup_css_alloc()
4238 if (alloc_mem_cgroup_per_zone_info(memcg, node)) in mem_cgroup_css_alloc()
4243 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
4244 mem_cgroup_root_css = &memcg->css; in mem_cgroup_css_alloc()
4245 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
4246 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4247 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4248 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_alloc()
4249 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
4252 memcg->last_scanned_node = MAX_NUMNODES; in mem_cgroup_css_alloc()
4253 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_css_alloc()
4254 memcg->move_charge_at_immigrate = 0; in mem_cgroup_css_alloc()
4255 mutex_init(&memcg->thresholds_lock); in mem_cgroup_css_alloc()
4256 spin_lock_init(&memcg->move_lock); in mem_cgroup_css_alloc()
4257 vmpressure_init(&memcg->vmpressure); in mem_cgroup_css_alloc()
4258 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_css_alloc()
4259 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_css_alloc()
4261 memcg->kmemcg_id = -1; in mem_cgroup_css_alloc()
4264 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_css_alloc()
4266 return &memcg->css; in mem_cgroup_css_alloc()
4269 __mem_cgroup_free(memcg); in mem_cgroup_css_alloc()
4276 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
4288 memcg->use_hierarchy = parent->use_hierarchy; in mem_cgroup_css_online()
4289 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_online()
4290 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_online()
4293 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_online()
4294 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4295 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4296 page_counter_init(&memcg->memsw, &parent->memsw); in mem_cgroup_css_online()
4297 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_online()
4304 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_online()
4305 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4306 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4307 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_online()
4308 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_online()
4319 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); in mem_cgroup_css_online()
4328 smp_store_release(&memcg->initialized, 1); in mem_cgroup_css_online()
4335 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
4343 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4344 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
4348 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4350 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_offline()
4352 memcg_deactivate_kmem(memcg); in mem_cgroup_css_offline()
4354 wb_memcg_offline(memcg); in mem_cgroup_css_offline()
4359 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_released() local
4361 invalidate_reclaim_iterators(memcg); in mem_cgroup_css_released()
4366 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
4368 memcg_destroy_kmem(memcg); in mem_cgroup_css_free()
4369 __mem_cgroup_free(memcg); in mem_cgroup_css_free()
4387 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
4389 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4390 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4391 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4392 memcg->low = 0; in mem_cgroup_css_reset()
4393 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
4394 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
4395 memcg_wb_domain_size_changed(memcg); in mem_cgroup_css_reset()
4827 struct mem_cgroup *memcg; in mem_cgroup_can_attach() local
4848 memcg = mem_cgroup_from_css(css); in mem_cgroup_can_attach()
4858 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
4864 VM_BUG_ON(from == memcg); in mem_cgroup_can_attach()
4880 mc.to = memcg; in mem_cgroup_can_attach()
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memory_current_read() local
5079 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
5084 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_low_show() local
5085 unsigned long low = READ_ONCE(memcg->low); in memory_low_show()
5098 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
5107 memcg->low = low; in memory_low_write()
5114 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_high_show() local
5115 unsigned long high = READ_ONCE(memcg->high); in memory_high_show()
5128 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
5138 memcg->high = high; in memory_high_write()
5140 nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
5142 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
5145 memcg_wb_domain_size_changed(memcg); in memory_high_write()
5151 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_max_show() local
5152 unsigned long max = READ_ONCE(memcg->memory.limit); in memory_max_show()
5165 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
5176 xchg(&memcg->memory.limit, max); in memory_max_write()
5179 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
5190 drain_all_stock(memcg); in memory_max_write()
5196 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
5202 mem_cgroup_events(memcg, MEMCG_OOM, 1); in memory_max_write()
5203 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) in memory_max_write()
5207 memcg_wb_domain_size_changed(memcg); in memory_max_write()
5213 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_events_show() local
5215 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); in memory_events_show()
5216 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); in memory_events_show()
5217 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); in memory_events_show()
5218 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); in memory_events_show()
5280 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) in mem_cgroup_low() argument
5291 if (memcg == root_mem_cgroup) in mem_cgroup_low()
5294 if (page_counter_read(&memcg->memory) >= memcg->low) in mem_cgroup_low()
5297 while (memcg != root) { in mem_cgroup_low()
5298 memcg = parent_mem_cgroup(memcg); in mem_cgroup_low()
5300 if (memcg == root_mem_cgroup) in mem_cgroup_low()
5303 if (page_counter_read(&memcg->memory) >= memcg->low) in mem_cgroup_low()
5329 struct mem_cgroup *memcg = NULL; in mem_cgroup_try_charge() local
5353 memcg = mem_cgroup_from_id(id); in mem_cgroup_try_charge()
5354 if (memcg && !css_tryget_online(&memcg->css)) in mem_cgroup_try_charge()
5355 memcg = NULL; in mem_cgroup_try_charge()
5365 if (!memcg) in mem_cgroup_try_charge()
5366 memcg = get_mem_cgroup_from_mm(mm); in mem_cgroup_try_charge()
5368 ret = try_charge(memcg, gfp_mask, nr_pages); in mem_cgroup_try_charge()
5370 css_put(&memcg->css); in mem_cgroup_try_charge()
5372 *memcgp = memcg; in mem_cgroup_try_charge()
5392 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, in mem_cgroup_commit_charge() argument
5407 if (!memcg) in mem_cgroup_commit_charge()
5410 commit_charge(page, memcg, lrucare); in mem_cgroup_commit_charge()
5418 mem_cgroup_charge_statistics(memcg, page, nr_pages); in mem_cgroup_commit_charge()
5419 memcg_check_events(memcg, page); in mem_cgroup_commit_charge()
5440 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) in mem_cgroup_cancel_charge() argument
5451 if (!memcg) in mem_cgroup_cancel_charge()
5459 cancel_charge(memcg, nr_pages); in mem_cgroup_cancel_charge()
5462 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, in uncharge_batch() argument
5469 if (!mem_cgroup_is_root(memcg)) { in uncharge_batch()
5470 page_counter_uncharge(&memcg->memory, nr_pages); in uncharge_batch()
5472 page_counter_uncharge(&memcg->memsw, nr_pages); in uncharge_batch()
5473 memcg_oom_recover(memcg); in uncharge_batch()
5477 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); in uncharge_batch()
5478 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); in uncharge_batch()
5479 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); in uncharge_batch()
5480 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); in uncharge_batch()
5481 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in uncharge_batch()
5482 memcg_check_events(memcg, dummy_page); in uncharge_batch()
5485 if (!mem_cgroup_is_root(memcg)) in uncharge_batch()
5486 css_put_many(&memcg->css, nr_pages); in uncharge_batch()
5491 struct mem_cgroup *memcg = NULL; in uncharge_list() local
5518 if (memcg != page->mem_cgroup) { in uncharge_list()
5519 if (memcg) { in uncharge_list()
5520 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, in uncharge_list()
5524 memcg = page->mem_cgroup; in uncharge_list()
5543 if (memcg) in uncharge_list()
5544 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, in uncharge_list()
5596 struct mem_cgroup *memcg; in mem_cgroup_replace_page() local
5613 memcg = oldpage->mem_cgroup; in mem_cgroup_replace_page()
5614 if (!memcg) in mem_cgroup_replace_page()
5621 commit_charge(newpage, memcg, true); in mem_cgroup_replace_page()
5673 struct mem_cgroup *memcg; in mem_cgroup_swapout() local
5682 memcg = page->mem_cgroup; in mem_cgroup_swapout()
5685 if (!memcg) in mem_cgroup_swapout()
5688 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); in mem_cgroup_swapout()
5690 mem_cgroup_swap_statistics(memcg, true); in mem_cgroup_swapout()
5694 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
5695 page_counter_uncharge(&memcg->memory, 1); in mem_cgroup_swapout()
5704 mem_cgroup_charge_statistics(memcg, page, -1); in mem_cgroup_swapout()
5705 memcg_check_events(memcg, page); in mem_cgroup_swapout()
5716 struct mem_cgroup *memcg; in mem_cgroup_uncharge_swap() local
5724 memcg = mem_cgroup_from_id(id); in mem_cgroup_uncharge_swap()
5725 if (memcg) { in mem_cgroup_uncharge_swap()
5726 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_uncharge_swap()
5727 page_counter_uncharge(&memcg->memsw, 1); in mem_cgroup_uncharge_swap()
5728 mem_cgroup_swap_statistics(memcg, false); in mem_cgroup_uncharge_swap()
5729 css_put(&memcg->css); in mem_cgroup_uncharge_swap()