1/* memcontrol.c - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * Memory thresholds 10 * Copyright (C) 2009 Nokia Corporation 11 * Author: Kirill A. Shutemov 12 * 13 * Kernel Memory Controller 14 * Copyright (C) 2012 Parallels Inc. and Google Inc. 15 * Authors: Glauber Costa and Suleiman Souhlal 16 * 17 * Native page reclaim 18 * Charge lifetime sanitation 19 * Lockless page tracking & accounting 20 * Unified hierarchy configuration model 21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 */ 33 34#include <linux/page_counter.h> 35#include <linux/memcontrol.h> 36#include <linux/cgroup.h> 37#include <linux/mm.h> 38#include <linux/hugetlb.h> 39#include <linux/pagemap.h> 40#include <linux/smp.h> 41#include <linux/page-flags.h> 42#include <linux/backing-dev.h> 43#include <linux/bit_spinlock.h> 44#include <linux/rcupdate.h> 45#include <linux/limits.h> 46#include <linux/export.h> 47#include <linux/mutex.h> 48#include <linux/rbtree.h> 49#include <linux/slab.h> 50#include <linux/swap.h> 51#include <linux/swapops.h> 52#include <linux/spinlock.h> 53#include <linux/eventfd.h> 54#include <linux/poll.h> 55#include <linux/sort.h> 56#include <linux/fs.h> 57#include <linux/seq_file.h> 58#include <linux/vmpressure.h> 59#include <linux/mm_inline.h> 60#include <linux/swap_cgroup.h> 61#include <linux/cpu.h> 62#include <linux/oom.h> 63#include <linux/lockdep.h> 64#include <linux/file.h> 65#include <linux/tracehook.h> 66#include "internal.h" 67#include <net/sock.h> 68#include <net/ip.h> 69#include <net/tcp_memcontrol.h> 70#include "slab.h" 71 72#include <asm/uaccess.h> 73 74#include <trace/events/vmscan.h> 75 76struct cgroup_subsys memory_cgrp_subsys __read_mostly; 77EXPORT_SYMBOL(memory_cgrp_subsys); 78 79#define MEM_CGROUP_RECLAIM_RETRIES 5 80static struct mem_cgroup *root_mem_cgroup __read_mostly; 81struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly; 82 83/* Whether the swap controller is active */ 84#ifdef CONFIG_MEMCG_SWAP 85int do_swap_account __read_mostly; 86#else 87#define do_swap_account 0 88#endif 89 90static const char * const mem_cgroup_stat_names[] = { 91 "cache", 92 "rss", 93 "rss_huge", 94 "mapped_file", 95 "dirty", 96 "writeback", 97 "swap", 98}; 99 100static const char * const mem_cgroup_events_names[] = { 101 "pgpgin", 102 "pgpgout", 103 "pgfault", 104 "pgmajfault", 105}; 106 107static const char * const mem_cgroup_lru_names[] = { 108 "inactive_anon", 109 "active_anon", 110 "inactive_file", 111 "active_file", 112 "unevictable", 113}; 114 115#define THRESHOLDS_EVENTS_TARGET 128 116#define SOFTLIMIT_EVENTS_TARGET 1024 117#define NUMAINFO_EVENTS_TARGET 1024 118 119/* 120 * Cgroups above their limits are maintained in a RB-Tree, independent of 121 * their hierarchy representation 122 */ 123 124struct mem_cgroup_tree_per_zone { 125 struct rb_root rb_root; 126 spinlock_t lock; 127}; 128 129struct mem_cgroup_tree_per_node { 130 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; 131}; 132 133struct mem_cgroup_tree { 134 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; 135}; 136 137static struct mem_cgroup_tree soft_limit_tree __read_mostly; 138 139/* for OOM */ 140struct mem_cgroup_eventfd_list { 141 struct list_head list; 142 struct eventfd_ctx *eventfd; 143}; 144 145/* 146 * cgroup_event represents events which userspace want to receive. 147 */ 148struct mem_cgroup_event { 149 /* 150 * memcg which the event belongs to. 151 */ 152 struct mem_cgroup *memcg; 153 /* 154 * eventfd to signal userspace about the event. 155 */ 156 struct eventfd_ctx *eventfd; 157 /* 158 * Each of these stored in a list by the cgroup. 159 */ 160 struct list_head list; 161 /* 162 * register_event() callback will be used to add new userspace 163 * waiter for changes related to this event. Use eventfd_signal() 164 * on eventfd to send notification to userspace. 165 */ 166 int (*register_event)(struct mem_cgroup *memcg, 167 struct eventfd_ctx *eventfd, const char *args); 168 /* 169 * unregister_event() callback will be called when userspace closes 170 * the eventfd or on cgroup removing. This callback must be set, 171 * if you want provide notification functionality. 172 */ 173 void (*unregister_event)(struct mem_cgroup *memcg, 174 struct eventfd_ctx *eventfd); 175 /* 176 * All fields below needed to unregister event when 177 * userspace closes eventfd. 178 */ 179 poll_table pt; 180 wait_queue_head_t *wqh; 181 wait_queue_t wait; 182 struct work_struct remove; 183}; 184 185static void mem_cgroup_threshold(struct mem_cgroup *memcg); 186static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 187 188/* Stuffs for move charges at task migration. */ 189/* 190 * Types of charges to be moved. 191 */ 192#define MOVE_ANON 0x1U 193#define MOVE_FILE 0x2U 194#define MOVE_MASK (MOVE_ANON | MOVE_FILE) 195 196/* "mc" and its members are protected by cgroup_mutex */ 197static struct move_charge_struct { 198 spinlock_t lock; /* for from, to */ 199 struct mm_struct *mm; 200 struct mem_cgroup *from; 201 struct mem_cgroup *to; 202 unsigned long flags; 203 unsigned long precharge; 204 unsigned long moved_charge; 205 unsigned long moved_swap; 206 struct task_struct *moving_task; /* a task moving charges */ 207 wait_queue_head_t waitq; /* a waitq for other context */ 208} mc = { 209 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 210 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 211}; 212 213/* 214 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 215 * limit reclaim to prevent infinite loops, if they ever occur. 216 */ 217#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 218#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 219 220enum charge_type { 221 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 222 MEM_CGROUP_CHARGE_TYPE_ANON, 223 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ 224 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ 225 NR_CHARGE_TYPE, 226}; 227 228/* for encoding cft->private value on file */ 229enum res_type { 230 _MEM, 231 _MEMSWAP, 232 _OOM_TYPE, 233 _KMEM, 234}; 235 236#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) 237#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) 238#define MEMFILE_ATTR(val) ((val) & 0xffff) 239/* Used for OOM nofiier */ 240#define OOM_CONTROL (0) 241 242/* 243 * The memcg_create_mutex will be held whenever a new cgroup is created. 244 * As a consequence, any change that needs to protect against new child cgroups 245 * appearing has to hold it as well. 246 */ 247static DEFINE_MUTEX(memcg_create_mutex); 248 249/* Some nice accessors for the vmpressure. */ 250struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) 251{ 252 if (!memcg) 253 memcg = root_mem_cgroup; 254 return &memcg->vmpressure; 255} 256 257struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) 258{ 259 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css; 260} 261 262static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 263{ 264 return (memcg == root_mem_cgroup); 265} 266 267/* 268 * We restrict the id in the range of [1, 65535], so it can fit into 269 * an unsigned short. 270 */ 271#define MEM_CGROUP_ID_MAX USHRT_MAX 272 273static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 274{ 275 return memcg->css.id; 276} 277 278/* 279 * A helper function to get mem_cgroup from ID. must be called under 280 * rcu_read_lock(). The caller is responsible for calling 281 * css_tryget_online() if the mem_cgroup is used for charging. (dropping 282 * refcnt from swap can be called against removed memcg.) 283 */ 284static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 285{ 286 struct cgroup_subsys_state *css; 287 288 css = css_from_id(id, &memory_cgrp_subsys); 289 return mem_cgroup_from_css(css); 290} 291 292/* Writing them here to avoid exposing memcg's inner layout */ 293#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 294 295void sock_update_memcg(struct sock *sk) 296{ 297 if (mem_cgroup_sockets_enabled) { 298 struct mem_cgroup *memcg; 299 struct cg_proto *cg_proto; 300 301 BUG_ON(!sk->sk_prot->proto_cgroup); 302 303 /* Socket cloning can throw us here with sk_cgrp already 304 * filled. It won't however, necessarily happen from 305 * process context. So the test for root memcg given 306 * the current task's memcg won't help us in this case. 307 * 308 * Respecting the original socket's memcg is a better 309 * decision in this case. 310 */ 311 if (sk->sk_cgrp) { 312 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); 313 css_get(&sk->sk_cgrp->memcg->css); 314 return; 315 } 316 317 rcu_read_lock(); 318 memcg = mem_cgroup_from_task(current); 319 cg_proto = sk->sk_prot->proto_cgroup(memcg); 320 if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) && 321 css_tryget_online(&memcg->css)) { 322 sk->sk_cgrp = cg_proto; 323 } 324 rcu_read_unlock(); 325 } 326} 327EXPORT_SYMBOL(sock_update_memcg); 328 329void sock_release_memcg(struct sock *sk) 330{ 331 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 332 struct mem_cgroup *memcg; 333 WARN_ON(!sk->sk_cgrp->memcg); 334 memcg = sk->sk_cgrp->memcg; 335 css_put(&sk->sk_cgrp->memcg->css); 336 } 337} 338 339struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) 340{ 341 if (!memcg || mem_cgroup_is_root(memcg)) 342 return NULL; 343 344 return &memcg->tcp_mem; 345} 346EXPORT_SYMBOL(tcp_proto_cgroup); 347 348#endif 349 350#ifdef CONFIG_MEMCG_KMEM 351/* 352 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches. 353 * The main reason for not using cgroup id for this: 354 * this works better in sparse environments, where we have a lot of memcgs, 355 * but only a few kmem-limited. Or also, if we have, for instance, 200 356 * memcgs, and none but the 200th is kmem-limited, we'd have to have a 357 * 200 entry array for that. 358 * 359 * The current size of the caches array is stored in memcg_nr_cache_ids. It 360 * will double each time we have to increase it. 361 */ 362static DEFINE_IDA(memcg_cache_ida); 363int memcg_nr_cache_ids; 364 365/* Protects memcg_nr_cache_ids */ 366static DECLARE_RWSEM(memcg_cache_ids_sem); 367 368void memcg_get_cache_ids(void) 369{ 370 down_read(&memcg_cache_ids_sem); 371} 372 373void memcg_put_cache_ids(void) 374{ 375 up_read(&memcg_cache_ids_sem); 376} 377 378/* 379 * MIN_SIZE is different than 1, because we would like to avoid going through 380 * the alloc/free process all the time. In a small machine, 4 kmem-limited 381 * cgroups is a reasonable guess. In the future, it could be a parameter or 382 * tunable, but that is strictly not necessary. 383 * 384 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get 385 * this constant directly from cgroup, but it is understandable that this is 386 * better kept as an internal representation in cgroup.c. In any case, the 387 * cgrp_id space is not getting any smaller, and we don't have to necessarily 388 * increase ours as well if it increases. 389 */ 390#define MEMCG_CACHES_MIN_SIZE 4 391#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX 392 393/* 394 * A lot of the calls to the cache allocation functions are expected to be 395 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are 396 * conditional to this static branch, we'll have to allow modules that does 397 * kmem_cache_alloc and the such to see this symbol as well 398 */ 399struct static_key memcg_kmem_enabled_key; 400EXPORT_SYMBOL(memcg_kmem_enabled_key); 401 402#endif /* CONFIG_MEMCG_KMEM */ 403 404static struct mem_cgroup_per_zone * 405mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) 406{ 407 int nid = zone_to_nid(zone); 408 int zid = zone_idx(zone); 409 410 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 411} 412 413/** 414 * mem_cgroup_css_from_page - css of the memcg associated with a page 415 * @page: page of interest 416 * 417 * If memcg is bound to the default hierarchy, css of the memcg associated 418 * with @page is returned. The returned css remains associated with @page 419 * until it is released. 420 * 421 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup 422 * is returned. 423 * 424 * XXX: The above description of behavior on the default hierarchy isn't 425 * strictly true yet as replace_page_cache_page() can modify the 426 * association before @page is released even on the default hierarchy; 427 * however, the current and planned usages don't mix the the two functions 428 * and replace_page_cache_page() will soon be updated to make the invariant 429 * actually true. 430 */ 431struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page) 432{ 433 struct mem_cgroup *memcg; 434 435 rcu_read_lock(); 436 437 memcg = page->mem_cgroup; 438 439 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) 440 memcg = root_mem_cgroup; 441 442 rcu_read_unlock(); 443 return &memcg->css; 444} 445 446/** 447 * page_cgroup_ino - return inode number of the memcg a page is charged to 448 * @page: the page 449 * 450 * Look up the closest online ancestor of the memory cgroup @page is charged to 451 * and return its inode number or 0 if @page is not charged to any cgroup. It 452 * is safe to call this function without holding a reference to @page. 453 * 454 * Note, this function is inherently racy, because there is nothing to prevent 455 * the cgroup inode from getting torn down and potentially reallocated a moment 456 * after page_cgroup_ino() returns, so it only should be used by callers that 457 * do not care (such as procfs interfaces). 458 */ 459ino_t page_cgroup_ino(struct page *page) 460{ 461 struct mem_cgroup *memcg; 462 unsigned long ino = 0; 463 464 rcu_read_lock(); 465 memcg = READ_ONCE(page->mem_cgroup); 466 while (memcg && !(memcg->css.flags & CSS_ONLINE)) 467 memcg = parent_mem_cgroup(memcg); 468 if (memcg) 469 ino = cgroup_ino(memcg->css.cgroup); 470 rcu_read_unlock(); 471 return ino; 472} 473 474static struct mem_cgroup_per_zone * 475mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) 476{ 477 int nid = page_to_nid(page); 478 int zid = page_zonenum(page); 479 480 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 481} 482 483static struct mem_cgroup_tree_per_zone * 484soft_limit_tree_node_zone(int nid, int zid) 485{ 486 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 487} 488 489static struct mem_cgroup_tree_per_zone * 490soft_limit_tree_from_page(struct page *page) 491{ 492 int nid = page_to_nid(page); 493 int zid = page_zonenum(page); 494 495 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; 496} 497 498static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, 499 struct mem_cgroup_tree_per_zone *mctz, 500 unsigned long new_usage_in_excess) 501{ 502 struct rb_node **p = &mctz->rb_root.rb_node; 503 struct rb_node *parent = NULL; 504 struct mem_cgroup_per_zone *mz_node; 505 506 if (mz->on_tree) 507 return; 508 509 mz->usage_in_excess = new_usage_in_excess; 510 if (!mz->usage_in_excess) 511 return; 512 while (*p) { 513 parent = *p; 514 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 515 tree_node); 516 if (mz->usage_in_excess < mz_node->usage_in_excess) 517 p = &(*p)->rb_left; 518 /* 519 * We can't avoid mem cgroups that are over their soft 520 * limit by the same amount 521 */ 522 else if (mz->usage_in_excess >= mz_node->usage_in_excess) 523 p = &(*p)->rb_right; 524 } 525 rb_link_node(&mz->tree_node, parent, p); 526 rb_insert_color(&mz->tree_node, &mctz->rb_root); 527 mz->on_tree = true; 528} 529 530static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 531 struct mem_cgroup_tree_per_zone *mctz) 532{ 533 if (!mz->on_tree) 534 return; 535 rb_erase(&mz->tree_node, &mctz->rb_root); 536 mz->on_tree = false; 537} 538 539static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, 540 struct mem_cgroup_tree_per_zone *mctz) 541{ 542 unsigned long flags; 543 544 spin_lock_irqsave(&mctz->lock, flags); 545 __mem_cgroup_remove_exceeded(mz, mctz); 546 spin_unlock_irqrestore(&mctz->lock, flags); 547} 548 549static unsigned long soft_limit_excess(struct mem_cgroup *memcg) 550{ 551 unsigned long nr_pages = page_counter_read(&memcg->memory); 552 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); 553 unsigned long excess = 0; 554 555 if (nr_pages > soft_limit) 556 excess = nr_pages - soft_limit; 557 558 return excess; 559} 560 561static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) 562{ 563 unsigned long excess; 564 struct mem_cgroup_per_zone *mz; 565 struct mem_cgroup_tree_per_zone *mctz; 566 567 mctz = soft_limit_tree_from_page(page); 568 /* 569 * Necessary to update all ancestors when hierarchy is used. 570 * because their event counter is not touched. 571 */ 572 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 573 mz = mem_cgroup_page_zoneinfo(memcg, page); 574 excess = soft_limit_excess(memcg); 575 /* 576 * We have to update the tree if mz is on RB-tree or 577 * mem is over its softlimit. 578 */ 579 if (excess || mz->on_tree) { 580 unsigned long flags; 581 582 spin_lock_irqsave(&mctz->lock, flags); 583 /* if on-tree, remove it */ 584 if (mz->on_tree) 585 __mem_cgroup_remove_exceeded(mz, mctz); 586 /* 587 * Insert again. mz->usage_in_excess will be updated. 588 * If excess is 0, no tree ops. 589 */ 590 __mem_cgroup_insert_exceeded(mz, mctz, excess); 591 spin_unlock_irqrestore(&mctz->lock, flags); 592 } 593 } 594} 595 596static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 597{ 598 struct mem_cgroup_tree_per_zone *mctz; 599 struct mem_cgroup_per_zone *mz; 600 int nid, zid; 601 602 for_each_node(nid) { 603 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 604 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 605 mctz = soft_limit_tree_node_zone(nid, zid); 606 mem_cgroup_remove_exceeded(mz, mctz); 607 } 608 } 609} 610 611static struct mem_cgroup_per_zone * 612__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 613{ 614 struct rb_node *rightmost = NULL; 615 struct mem_cgroup_per_zone *mz; 616 617retry: 618 mz = NULL; 619 rightmost = rb_last(&mctz->rb_root); 620 if (!rightmost) 621 goto done; /* Nothing to reclaim from */ 622 623 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); 624 /* 625 * Remove the node now but someone else can add it back, 626 * we will to add it back at the end of reclaim to its correct 627 * position in the tree. 628 */ 629 __mem_cgroup_remove_exceeded(mz, mctz); 630 if (!soft_limit_excess(mz->memcg) || 631 !css_tryget_online(&mz->memcg->css)) 632 goto retry; 633done: 634 return mz; 635} 636 637static struct mem_cgroup_per_zone * 638mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 639{ 640 struct mem_cgroup_per_zone *mz; 641 642 spin_lock_irq(&mctz->lock); 643 mz = __mem_cgroup_largest_soft_limit_node(mctz); 644 spin_unlock_irq(&mctz->lock); 645 return mz; 646} 647 648/* 649 * Return page count for single (non recursive) @memcg. 650 * 651 * Implementation Note: reading percpu statistics for memcg. 652 * 653 * Both of vmstat[] and percpu_counter has threshold and do periodic 654 * synchronization to implement "quick" read. There are trade-off between 655 * reading cost and precision of value. Then, we may have a chance to implement 656 * a periodic synchronization of counter in memcg's counter. 657 * 658 * But this _read() function is used for user interface now. The user accounts 659 * memory usage by memory cgroup and he _always_ requires exact value because 660 * he accounts memory. Even if we provide quick-and-fuzzy read, we always 661 * have to visit all online cpus and make sum. So, for now, unnecessary 662 * synchronization is not implemented. (just implemented for cpu hotplug) 663 * 664 * If there are kernel internal actions which can make use of some not-exact 665 * value, and reading all cpu value can be performance bottleneck in some 666 * common workload, threshold and synchronization as vmstat[] should be 667 * implemented. 668 */ 669static unsigned long 670mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) 671{ 672 long val = 0; 673 int cpu; 674 675 /* Per-cpu values can be negative, use a signed accumulator */ 676 for_each_possible_cpu(cpu) 677 val += per_cpu(memcg->stat->count[idx], cpu); 678 /* 679 * Summing races with updates, so val may be negative. Avoid exposing 680 * transient negative values. 681 */ 682 if (val < 0) 683 val = 0; 684 return val; 685} 686 687static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, 688 enum mem_cgroup_events_index idx) 689{ 690 unsigned long val = 0; 691 int cpu; 692 693 for_each_possible_cpu(cpu) 694 val += per_cpu(memcg->stat->events[idx], cpu); 695 return val; 696} 697 698static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, 699 struct page *page, 700 int nr_pages) 701{ 702 /* 703 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is 704 * counted as CACHE even if it's on ANON LRU. 705 */ 706 if (PageAnon(page)) 707 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], 708 nr_pages); 709 else 710 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], 711 nr_pages); 712 713 if (PageTransHuge(page)) 714 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 715 nr_pages); 716 717 /* pagein of a big page is an event. So, ignore page size */ 718 if (nr_pages > 0) 719 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); 720 else { 721 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); 722 nr_pages = -nr_pages; /* for event */ 723 } 724 725 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 726} 727 728static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 729 int nid, 730 unsigned int lru_mask) 731{ 732 unsigned long nr = 0; 733 int zid; 734 735 VM_BUG_ON((unsigned)nid >= nr_node_ids); 736 737 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 738 struct mem_cgroup_per_zone *mz; 739 enum lru_list lru; 740 741 for_each_lru(lru) { 742 if (!(BIT(lru) & lru_mask)) 743 continue; 744 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 745 nr += mz->lru_size[lru]; 746 } 747 } 748 return nr; 749} 750 751static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 752 unsigned int lru_mask) 753{ 754 unsigned long nr = 0; 755 int nid; 756 757 for_each_node_state(nid, N_MEMORY) 758 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 759 return nr; 760} 761 762static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 763 enum mem_cgroup_events_target target) 764{ 765 unsigned long val, next; 766 767 val = __this_cpu_read(memcg->stat->nr_page_events); 768 next = __this_cpu_read(memcg->stat->targets[target]); 769 /* from time_after() in jiffies.h */ 770 if ((long)next - (long)val < 0) { 771 switch (target) { 772 case MEM_CGROUP_TARGET_THRESH: 773 next = val + THRESHOLDS_EVENTS_TARGET; 774 break; 775 case MEM_CGROUP_TARGET_SOFTLIMIT: 776 next = val + SOFTLIMIT_EVENTS_TARGET; 777 break; 778 case MEM_CGROUP_TARGET_NUMAINFO: 779 next = val + NUMAINFO_EVENTS_TARGET; 780 break; 781 default: 782 break; 783 } 784 __this_cpu_write(memcg->stat->targets[target], next); 785 return true; 786 } 787 return false; 788} 789 790/* 791 * Check events in order. 792 * 793 */ 794static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) 795{ 796 /* threshold event is triggered in finer grain than soft limit */ 797 if (unlikely(mem_cgroup_event_ratelimit(memcg, 798 MEM_CGROUP_TARGET_THRESH))) { 799 bool do_softlimit; 800 bool do_numainfo __maybe_unused; 801 802 do_softlimit = mem_cgroup_event_ratelimit(memcg, 803 MEM_CGROUP_TARGET_SOFTLIMIT); 804#if MAX_NUMNODES > 1 805 do_numainfo = mem_cgroup_event_ratelimit(memcg, 806 MEM_CGROUP_TARGET_NUMAINFO); 807#endif 808 mem_cgroup_threshold(memcg); 809 if (unlikely(do_softlimit)) 810 mem_cgroup_update_tree(memcg, page); 811#if MAX_NUMNODES > 1 812 if (unlikely(do_numainfo)) 813 atomic_inc(&memcg->numainfo_events); 814#endif 815 } 816} 817 818struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 819{ 820 /* 821 * mm_update_next_owner() may clear mm->owner to NULL 822 * if it races with swapoff, page migration, etc. 823 * So this can be called with p == NULL. 824 */ 825 if (unlikely(!p)) 826 return NULL; 827 828 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 829} 830EXPORT_SYMBOL(mem_cgroup_from_task); 831 832static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 833{ 834 struct mem_cgroup *memcg = NULL; 835 836 rcu_read_lock(); 837 do { 838 /* 839 * Page cache insertions can happen withou an 840 * actual mm context, e.g. during disk probing 841 * on boot, loopback IO, acct() writes etc. 842 */ 843 if (unlikely(!mm)) 844 memcg = root_mem_cgroup; 845 else { 846 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 847 if (unlikely(!memcg)) 848 memcg = root_mem_cgroup; 849 } 850 } while (!css_tryget_online(&memcg->css)); 851 rcu_read_unlock(); 852 return memcg; 853} 854 855/** 856 * mem_cgroup_iter - iterate over memory cgroup hierarchy 857 * @root: hierarchy root 858 * @prev: previously returned memcg, NULL on first invocation 859 * @reclaim: cookie for shared reclaim walks, NULL for full walks 860 * 861 * Returns references to children of the hierarchy below @root, or 862 * @root itself, or %NULL after a full round-trip. 863 * 864 * Caller must pass the return value in @prev on subsequent 865 * invocations for reference counting, or use mem_cgroup_iter_break() 866 * to cancel a hierarchy walk before the round-trip is complete. 867 * 868 * Reclaimers can specify a zone and a priority level in @reclaim to 869 * divide up the memcgs in the hierarchy among all concurrent 870 * reclaimers operating on the same zone and priority. 871 */ 872struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 873 struct mem_cgroup *prev, 874 struct mem_cgroup_reclaim_cookie *reclaim) 875{ 876 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); 877 struct cgroup_subsys_state *css = NULL; 878 struct mem_cgroup *memcg = NULL; 879 struct mem_cgroup *pos = NULL; 880 881 if (mem_cgroup_disabled()) 882 return NULL; 883 884 if (!root) 885 root = root_mem_cgroup; 886 887 if (prev && !reclaim) 888 pos = prev; 889 890 if (!root->use_hierarchy && root != root_mem_cgroup) { 891 if (prev) 892 goto out; 893 return root; 894 } 895 896 rcu_read_lock(); 897 898 if (reclaim) { 899 struct mem_cgroup_per_zone *mz; 900 901 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); 902 iter = &mz->iter[reclaim->priority]; 903 904 if (prev && reclaim->generation != iter->generation) 905 goto out_unlock; 906 907 while (1) { 908 pos = READ_ONCE(iter->position); 909 if (!pos || css_tryget(&pos->css)) 910 break; 911 /* 912 * css reference reached zero, so iter->position will 913 * be cleared by ->css_released. However, we should not 914 * rely on this happening soon, because ->css_released 915 * is called from a work queue, and by busy-waiting we 916 * might block it. So we clear iter->position right 917 * away. 918 */ 919 (void)cmpxchg(&iter->position, pos, NULL); 920 } 921 } 922 923 if (pos) 924 css = &pos->css; 925 926 for (;;) { 927 css = css_next_descendant_pre(css, &root->css); 928 if (!css) { 929 /* 930 * Reclaimers share the hierarchy walk, and a 931 * new one might jump in right at the end of 932 * the hierarchy - make sure they see at least 933 * one group and restart from the beginning. 934 */ 935 if (!prev) 936 continue; 937 break; 938 } 939 940 /* 941 * Verify the css and acquire a reference. The root 942 * is provided by the caller, so we know it's alive 943 * and kicking, and don't take an extra reference. 944 */ 945 memcg = mem_cgroup_from_css(css); 946 947 if (css == &root->css) 948 break; 949 950 if (css_tryget(css)) { 951 /* 952 * Make sure the memcg is initialized: 953 * mem_cgroup_css_online() orders the the 954 * initialization against setting the flag. 955 */ 956 if (smp_load_acquire(&memcg->initialized)) 957 break; 958 959 css_put(css); 960 } 961 962 memcg = NULL; 963 } 964 965 if (reclaim) { 966 /* 967 * The position could have already been updated by a competing 968 * thread, so check that the value hasn't changed since we read 969 * it to avoid reclaiming from the same cgroup twice. 970 */ 971 (void)cmpxchg(&iter->position, pos, memcg); 972 973 if (pos) 974 css_put(&pos->css); 975 976 if (!memcg) 977 iter->generation++; 978 else if (!prev) 979 reclaim->generation = iter->generation; 980 } 981 982out_unlock: 983 rcu_read_unlock(); 984out: 985 if (prev && prev != root) 986 css_put(&prev->css); 987 988 return memcg; 989} 990 991/** 992 * mem_cgroup_iter_break - abort a hierarchy walk prematurely 993 * @root: hierarchy root 994 * @prev: last visited hierarchy member as returned by mem_cgroup_iter() 995 */ 996void mem_cgroup_iter_break(struct mem_cgroup *root, 997 struct mem_cgroup *prev) 998{ 999 if (!root) 1000 root = root_mem_cgroup; 1001 if (prev && prev != root) 1002 css_put(&prev->css); 1003} 1004 1005static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1006{ 1007 struct mem_cgroup *memcg = dead_memcg; 1008 struct mem_cgroup_reclaim_iter *iter; 1009 struct mem_cgroup_per_zone *mz; 1010 int nid, zid; 1011 int i; 1012 1013 while ((memcg = parent_mem_cgroup(memcg))) { 1014 for_each_node(nid) { 1015 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1016 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 1017 for (i = 0; i <= DEF_PRIORITY; i++) { 1018 iter = &mz->iter[i]; 1019 cmpxchg(&iter->position, 1020 dead_memcg, NULL); 1021 } 1022 } 1023 } 1024 } 1025} 1026 1027/* 1028 * Iteration constructs for visiting all cgroups (under a tree). If 1029 * loops are exited prematurely (break), mem_cgroup_iter_break() must 1030 * be used for reference counting. 1031 */ 1032#define for_each_mem_cgroup_tree(iter, root) \ 1033 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1034 iter != NULL; \ 1035 iter = mem_cgroup_iter(root, iter, NULL)) 1036 1037#define for_each_mem_cgroup(iter) \ 1038 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1039 iter != NULL; \ 1040 iter = mem_cgroup_iter(NULL, iter, NULL)) 1041 1042/** 1043 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1044 * @zone: zone of the wanted lruvec 1045 * @memcg: memcg of the wanted lruvec 1046 * 1047 * Returns the lru list vector holding pages for the given @zone and 1048 * @mem. This can be the global zone lruvec, if the memory controller 1049 * is disabled. 1050 */ 1051struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 1052 struct mem_cgroup *memcg) 1053{ 1054 struct mem_cgroup_per_zone *mz; 1055 struct lruvec *lruvec; 1056 1057 if (mem_cgroup_disabled()) { 1058 lruvec = &zone->lruvec; 1059 goto out; 1060 } 1061 1062 mz = mem_cgroup_zone_zoneinfo(memcg, zone); 1063 lruvec = &mz->lruvec; 1064out: 1065 /* 1066 * Since a node can be onlined after the mem_cgroup was created, 1067 * we have to be prepared to initialize lruvec->zone here; 1068 * and if offlined then reonlined, we need to reinitialize it. 1069 */ 1070 if (unlikely(lruvec->zone != zone)) 1071 lruvec->zone = zone; 1072 return lruvec; 1073} 1074 1075/** 1076 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page 1077 * @page: the page 1078 * @zone: zone of the page 1079 * 1080 * This function is only safe when following the LRU page isolation 1081 * and putback protocol: the LRU lock must be held, and the page must 1082 * either be PageLRU() or the caller must have isolated/allocated it. 1083 */ 1084struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) 1085{ 1086 struct mem_cgroup_per_zone *mz; 1087 struct mem_cgroup *memcg; 1088 struct lruvec *lruvec; 1089 1090 if (mem_cgroup_disabled()) { 1091 lruvec = &zone->lruvec; 1092 goto out; 1093 } 1094 1095 memcg = page->mem_cgroup; 1096 /* 1097 * Swapcache readahead pages are added to the LRU - and 1098 * possibly migrated - before they are charged. 1099 */ 1100 if (!memcg) 1101 memcg = root_mem_cgroup; 1102 1103 mz = mem_cgroup_page_zoneinfo(memcg, page); 1104 lruvec = &mz->lruvec; 1105out: 1106 /* 1107 * Since a node can be onlined after the mem_cgroup was created, 1108 * we have to be prepared to initialize lruvec->zone here; 1109 * and if offlined then reonlined, we need to reinitialize it. 1110 */ 1111 if (unlikely(lruvec->zone != zone)) 1112 lruvec->zone = zone; 1113 return lruvec; 1114} 1115 1116/** 1117 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1118 * @lruvec: mem_cgroup per zone lru vector 1119 * @lru: index of lru list the page is sitting on 1120 * @nr_pages: positive when adding or negative when removing 1121 * 1122 * This function must be called when a page is added to or removed from an 1123 * lru list. 1124 */ 1125void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1126 int nr_pages) 1127{ 1128 struct mem_cgroup_per_zone *mz; 1129 unsigned long *lru_size; 1130 1131 if (mem_cgroup_disabled()) 1132 return; 1133 1134 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); 1135 lru_size = mz->lru_size + lru; 1136 *lru_size += nr_pages; 1137 VM_BUG_ON((long)(*lru_size) < 0); 1138} 1139 1140bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) 1141{ 1142 struct mem_cgroup *task_memcg; 1143 struct task_struct *p; 1144 bool ret; 1145 1146 p = find_lock_task_mm(task); 1147 if (p) { 1148 task_memcg = get_mem_cgroup_from_mm(p->mm); 1149 task_unlock(p); 1150 } else { 1151 /* 1152 * All threads may have already detached their mm's, but the oom 1153 * killer still needs to detect if they have already been oom 1154 * killed to prevent needlessly killing additional tasks. 1155 */ 1156 rcu_read_lock(); 1157 task_memcg = mem_cgroup_from_task(task); 1158 css_get(&task_memcg->css); 1159 rcu_read_unlock(); 1160 } 1161 ret = mem_cgroup_is_descendant(task_memcg, memcg); 1162 css_put(&task_memcg->css); 1163 return ret; 1164} 1165 1166#define mem_cgroup_from_counter(counter, member) \ 1167 container_of(counter, struct mem_cgroup, member) 1168 1169/** 1170 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1171 * @memcg: the memory cgroup 1172 * 1173 * Returns the maximum amount of memory @mem can be charged with, in 1174 * pages. 1175 */ 1176static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) 1177{ 1178 unsigned long margin = 0; 1179 unsigned long count; 1180 unsigned long limit; 1181 1182 count = page_counter_read(&memcg->memory); 1183 limit = READ_ONCE(memcg->memory.limit); 1184 if (count < limit) 1185 margin = limit - count; 1186 1187 if (do_swap_account) { 1188 count = page_counter_read(&memcg->memsw); 1189 limit = READ_ONCE(memcg->memsw.limit); 1190 if (count <= limit) 1191 margin = min(margin, limit - count); 1192 } 1193 1194 return margin; 1195} 1196 1197/* 1198 * A routine for checking "mem" is under move_account() or not. 1199 * 1200 * Checking a cgroup is mc.from or mc.to or under hierarchy of 1201 * moving cgroups. This is for waiting at high-memory pressure 1202 * caused by "move". 1203 */ 1204static bool mem_cgroup_under_move(struct mem_cgroup *memcg) 1205{ 1206 struct mem_cgroup *from; 1207 struct mem_cgroup *to; 1208 bool ret = false; 1209 /* 1210 * Unlike task_move routines, we access mc.to, mc.from not under 1211 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. 1212 */ 1213 spin_lock(&mc.lock); 1214 from = mc.from; 1215 to = mc.to; 1216 if (!from) 1217 goto unlock; 1218 1219 ret = mem_cgroup_is_descendant(from, memcg) || 1220 mem_cgroup_is_descendant(to, memcg); 1221unlock: 1222 spin_unlock(&mc.lock); 1223 return ret; 1224} 1225 1226static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) 1227{ 1228 if (mc.moving_task && current != mc.moving_task) { 1229 if (mem_cgroup_under_move(memcg)) { 1230 DEFINE_WAIT(wait); 1231 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); 1232 /* moving charge context might have finished. */ 1233 if (mc.moving_task) 1234 schedule(); 1235 finish_wait(&mc.waitq, &wait); 1236 return true; 1237 } 1238 } 1239 return false; 1240} 1241 1242#define K(x) ((x) << (PAGE_SHIFT-10)) 1243/** 1244 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. 1245 * @memcg: The memory cgroup that went over limit 1246 * @p: Task that is going to be killed 1247 * 1248 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is 1249 * enabled 1250 */ 1251void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1252{ 1253 /* oom_info_lock ensures that parallel ooms do not interleave */ 1254 static DEFINE_MUTEX(oom_info_lock); 1255 struct mem_cgroup *iter; 1256 unsigned int i; 1257 1258 mutex_lock(&oom_info_lock); 1259 rcu_read_lock(); 1260 1261 if (p) { 1262 pr_info("Task in "); 1263 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); 1264 pr_cont(" killed as a result of limit of "); 1265 } else { 1266 pr_info("Memory limit reached of cgroup "); 1267 } 1268 1269 pr_cont_cgroup_path(memcg->css.cgroup); 1270 pr_cont("\n"); 1271 1272 rcu_read_unlock(); 1273 1274 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", 1275 K((u64)page_counter_read(&memcg->memory)), 1276 K((u64)memcg->memory.limit), memcg->memory.failcnt); 1277 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", 1278 K((u64)page_counter_read(&memcg->memsw)), 1279 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); 1280 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", 1281 K((u64)page_counter_read(&memcg->kmem)), 1282 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); 1283 1284 for_each_mem_cgroup_tree(iter, memcg) { 1285 pr_info("Memory cgroup stats for "); 1286 pr_cont_cgroup_path(iter->css.cgroup); 1287 pr_cont(":"); 1288 1289 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1290 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 1291 continue; 1292 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], 1293 K(mem_cgroup_read_stat(iter, i))); 1294 } 1295 1296 for (i = 0; i < NR_LRU_LISTS; i++) 1297 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i], 1298 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); 1299 1300 pr_cont("\n"); 1301 } 1302 mutex_unlock(&oom_info_lock); 1303} 1304 1305/* 1306 * This function returns the number of memcg under hierarchy tree. Returns 1307 * 1(self count) if no children. 1308 */ 1309static int mem_cgroup_count_children(struct mem_cgroup *memcg) 1310{ 1311 int num = 0; 1312 struct mem_cgroup *iter; 1313 1314 for_each_mem_cgroup_tree(iter, memcg) 1315 num++; 1316 return num; 1317} 1318 1319/* 1320 * Return the memory (and swap, if configured) limit for a memcg. 1321 */ 1322static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) 1323{ 1324 unsigned long limit; 1325 1326 limit = memcg->memory.limit; 1327 if (mem_cgroup_swappiness(memcg)) { 1328 unsigned long memsw_limit; 1329 1330 memsw_limit = memcg->memsw.limit; 1331 limit = min(limit + total_swap_pages, memsw_limit); 1332 } 1333 return limit; 1334} 1335 1336static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 1337 int order) 1338{ 1339 struct oom_control oc = { 1340 .zonelist = NULL, 1341 .nodemask = NULL, 1342 .gfp_mask = gfp_mask, 1343 .order = order, 1344 }; 1345 struct mem_cgroup *iter; 1346 unsigned long chosen_points = 0; 1347 unsigned long totalpages; 1348 unsigned int points = 0; 1349 struct task_struct *chosen = NULL; 1350 1351 mutex_lock(&oom_lock); 1352 1353 /* 1354 * If current has a pending SIGKILL or is exiting, then automatically 1355 * select it. The goal is to allow it to allocate so that it may 1356 * quickly exit and free its memory. 1357 */ 1358 if (fatal_signal_pending(current) || task_will_free_mem(current)) { 1359 mark_oom_victim(current); 1360 goto unlock; 1361 } 1362 1363 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); 1364 totalpages = mem_cgroup_get_limit(memcg) ? : 1; 1365 for_each_mem_cgroup_tree(iter, memcg) { 1366 struct css_task_iter it; 1367 struct task_struct *task; 1368 1369 css_task_iter_start(&iter->css, &it); 1370 while ((task = css_task_iter_next(&it))) { 1371 switch (oom_scan_process_thread(&oc, task, totalpages)) { 1372 case OOM_SCAN_SELECT: 1373 if (chosen) 1374 put_task_struct(chosen); 1375 chosen = task; 1376 chosen_points = ULONG_MAX; 1377 get_task_struct(chosen); 1378 /* fall through */ 1379 case OOM_SCAN_CONTINUE: 1380 continue; 1381 case OOM_SCAN_ABORT: 1382 css_task_iter_end(&it); 1383 mem_cgroup_iter_break(memcg, iter); 1384 if (chosen) 1385 put_task_struct(chosen); 1386 goto unlock; 1387 case OOM_SCAN_OK: 1388 break; 1389 }; 1390 points = oom_badness(task, memcg, NULL, totalpages); 1391 if (!points || points < chosen_points) 1392 continue; 1393 /* Prefer thread group leaders for display purposes */ 1394 if (points == chosen_points && 1395 thread_group_leader(chosen)) 1396 continue; 1397 1398 if (chosen) 1399 put_task_struct(chosen); 1400 chosen = task; 1401 chosen_points = points; 1402 get_task_struct(chosen); 1403 } 1404 css_task_iter_end(&it); 1405 } 1406 1407 if (chosen) { 1408 points = chosen_points * 1000 / totalpages; 1409 oom_kill_process(&oc, chosen, points, totalpages, memcg, 1410 "Memory cgroup out of memory"); 1411 } 1412unlock: 1413 mutex_unlock(&oom_lock); 1414 return chosen; 1415} 1416 1417#if MAX_NUMNODES > 1 1418 1419/** 1420 * test_mem_cgroup_node_reclaimable 1421 * @memcg: the target memcg 1422 * @nid: the node ID to be checked. 1423 * @noswap : specify true here if the user wants flle only information. 1424 * 1425 * This function returns whether the specified memcg contains any 1426 * reclaimable pages on a node. Returns true if there are any reclaimable 1427 * pages in the node. 1428 */ 1429static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, 1430 int nid, bool noswap) 1431{ 1432 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) 1433 return true; 1434 if (noswap || !total_swap_pages) 1435 return false; 1436 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) 1437 return true; 1438 return false; 1439 1440} 1441 1442/* 1443 * Always updating the nodemask is not very good - even if we have an empty 1444 * list or the wrong list here, we can start from some node and traverse all 1445 * nodes based on the zonelist. So update the list loosely once per 10 secs. 1446 * 1447 */ 1448static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) 1449{ 1450 int nid; 1451 /* 1452 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET 1453 * pagein/pageout changes since the last update. 1454 */ 1455 if (!atomic_read(&memcg->numainfo_events)) 1456 return; 1457 if (atomic_inc_return(&memcg->numainfo_updating) > 1) 1458 return; 1459 1460 /* make a nodemask where this memcg uses memory from */ 1461 memcg->scan_nodes = node_states[N_MEMORY]; 1462 1463 for_each_node_mask(nid, node_states[N_MEMORY]) { 1464 1465 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) 1466 node_clear(nid, memcg->scan_nodes); 1467 } 1468 1469 atomic_set(&memcg->numainfo_events, 0); 1470 atomic_set(&memcg->numainfo_updating, 0); 1471} 1472 1473/* 1474 * Selecting a node where we start reclaim from. Because what we need is just 1475 * reducing usage counter, start from anywhere is O,K. Considering 1476 * memory reclaim from current node, there are pros. and cons. 1477 * 1478 * Freeing memory from current node means freeing memory from a node which 1479 * we'll use or we've used. So, it may make LRU bad. And if several threads 1480 * hit limits, it will see a contention on a node. But freeing from remote 1481 * node means more costs for memory reclaim because of memory latency. 1482 * 1483 * Now, we use round-robin. Better algorithm is welcomed. 1484 */ 1485int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1486{ 1487 int node; 1488 1489 mem_cgroup_may_update_nodemask(memcg); 1490 node = memcg->last_scanned_node; 1491 1492 node = next_node(node, memcg->scan_nodes); 1493 if (node == MAX_NUMNODES) 1494 node = first_node(memcg->scan_nodes); 1495 /* 1496 * We call this when we hit limit, not when pages are added to LRU. 1497 * No LRU may hold pages because all pages are UNEVICTABLE or 1498 * memcg is too small and all pages are not on LRU. In that case, 1499 * we use curret node. 1500 */ 1501 if (unlikely(node == MAX_NUMNODES)) 1502 node = numa_node_id(); 1503 1504 memcg->last_scanned_node = node; 1505 return node; 1506} 1507#else 1508int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1509{ 1510 return 0; 1511} 1512#endif 1513 1514static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1515 struct zone *zone, 1516 gfp_t gfp_mask, 1517 unsigned long *total_scanned) 1518{ 1519 struct mem_cgroup *victim = NULL; 1520 int total = 0; 1521 int loop = 0; 1522 unsigned long excess; 1523 unsigned long nr_scanned; 1524 struct mem_cgroup_reclaim_cookie reclaim = { 1525 .zone = zone, 1526 .priority = 0, 1527 }; 1528 1529 excess = soft_limit_excess(root_memcg); 1530 1531 while (1) { 1532 victim = mem_cgroup_iter(root_memcg, victim, &reclaim); 1533 if (!victim) { 1534 loop++; 1535 if (loop >= 2) { 1536 /* 1537 * If we have not been able to reclaim 1538 * anything, it might because there are 1539 * no reclaimable pages under this hierarchy 1540 */ 1541 if (!total) 1542 break; 1543 /* 1544 * We want to do more targeted reclaim. 1545 * excess >> 2 is not to excessive so as to 1546 * reclaim too much, nor too less that we keep 1547 * coming back to reclaim from this cgroup 1548 */ 1549 if (total >= (excess >> 2) || 1550 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) 1551 break; 1552 } 1553 continue; 1554 } 1555 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1556 zone, &nr_scanned); 1557 *total_scanned += nr_scanned; 1558 if (!soft_limit_excess(root_memcg)) 1559 break; 1560 } 1561 mem_cgroup_iter_break(root_memcg, victim); 1562 return total; 1563} 1564 1565#ifdef CONFIG_LOCKDEP 1566static struct lockdep_map memcg_oom_lock_dep_map = { 1567 .name = "memcg_oom_lock", 1568}; 1569#endif 1570 1571static DEFINE_SPINLOCK(memcg_oom_lock); 1572 1573/* 1574 * Check OOM-Killer is already running under our hierarchy. 1575 * If someone is running, return false. 1576 */ 1577static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) 1578{ 1579 struct mem_cgroup *iter, *failed = NULL; 1580 1581 spin_lock(&memcg_oom_lock); 1582 1583 for_each_mem_cgroup_tree(iter, memcg) { 1584 if (iter->oom_lock) { 1585 /* 1586 * this subtree of our hierarchy is already locked 1587 * so we cannot give a lock. 1588 */ 1589 failed = iter; 1590 mem_cgroup_iter_break(memcg, iter); 1591 break; 1592 } else 1593 iter->oom_lock = true; 1594 } 1595 1596 if (failed) { 1597 /* 1598 * OK, we failed to lock the whole subtree so we have 1599 * to clean up what we set up to the failing subtree 1600 */ 1601 for_each_mem_cgroup_tree(iter, memcg) { 1602 if (iter == failed) { 1603 mem_cgroup_iter_break(memcg, iter); 1604 break; 1605 } 1606 iter->oom_lock = false; 1607 } 1608 } else 1609 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_); 1610 1611 spin_unlock(&memcg_oom_lock); 1612 1613 return !failed; 1614} 1615 1616static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) 1617{ 1618 struct mem_cgroup *iter; 1619 1620 spin_lock(&memcg_oom_lock); 1621 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1622 for_each_mem_cgroup_tree(iter, memcg) 1623 iter->oom_lock = false; 1624 spin_unlock(&memcg_oom_lock); 1625} 1626 1627static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) 1628{ 1629 struct mem_cgroup *iter; 1630 1631 spin_lock(&memcg_oom_lock); 1632 for_each_mem_cgroup_tree(iter, memcg) 1633 iter->under_oom++; 1634 spin_unlock(&memcg_oom_lock); 1635} 1636 1637static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) 1638{ 1639 struct mem_cgroup *iter; 1640 1641 /* 1642 * When a new child is created while the hierarchy is under oom, 1643 * mem_cgroup_oom_lock() may not be called. Watch for underflow. 1644 */ 1645 spin_lock(&memcg_oom_lock); 1646 for_each_mem_cgroup_tree(iter, memcg) 1647 if (iter->under_oom > 0) 1648 iter->under_oom--; 1649 spin_unlock(&memcg_oom_lock); 1650} 1651 1652static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1653 1654struct oom_wait_info { 1655 struct mem_cgroup *memcg; 1656 wait_queue_t wait; 1657}; 1658 1659static int memcg_oom_wake_function(wait_queue_t *wait, 1660 unsigned mode, int sync, void *arg) 1661{ 1662 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; 1663 struct mem_cgroup *oom_wait_memcg; 1664 struct oom_wait_info *oom_wait_info; 1665 1666 oom_wait_info = container_of(wait, struct oom_wait_info, wait); 1667 oom_wait_memcg = oom_wait_info->memcg; 1668 1669 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && 1670 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) 1671 return 0; 1672 return autoremove_wake_function(wait, mode, sync, arg); 1673} 1674 1675static void memcg_oom_recover(struct mem_cgroup *memcg) 1676{ 1677 /* 1678 * For the following lockless ->under_oom test, the only required 1679 * guarantee is that it must see the state asserted by an OOM when 1680 * this function is called as a result of userland actions 1681 * triggered by the notification of the OOM. This is trivially 1682 * achieved by invoking mem_cgroup_mark_under_oom() before 1683 * triggering notification. 1684 */ 1685 if (memcg && memcg->under_oom) 1686 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); 1687} 1688 1689static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1690{ 1691 if (!current->memcg_may_oom) 1692 return; 1693 /* 1694 * We are in the middle of the charge context here, so we 1695 * don't want to block when potentially sitting on a callstack 1696 * that holds all kinds of filesystem and mm locks. 1697 * 1698 * Also, the caller may handle a failed allocation gracefully 1699 * (like optional page cache readahead) and so an OOM killer 1700 * invocation might not even be necessary. 1701 * 1702 * That's why we don't do anything here except remember the 1703 * OOM context and then deal with it at the end of the page 1704 * fault when the stack is unwound, the locks are released, 1705 * and when we know whether the fault was overall successful. 1706 */ 1707 css_get(&memcg->css); 1708 current->memcg_in_oom = memcg; 1709 current->memcg_oom_gfp_mask = mask; 1710 current->memcg_oom_order = order; 1711} 1712 1713/** 1714 * mem_cgroup_oom_synchronize - complete memcg OOM handling 1715 * @handle: actually kill/wait or just clean up the OOM state 1716 * 1717 * This has to be called at the end of a page fault if the memcg OOM 1718 * handler was enabled. 1719 * 1720 * Memcg supports userspace OOM handling where failed allocations must 1721 * sleep on a waitqueue until the userspace task resolves the 1722 * situation. Sleeping directly in the charge context with all kinds 1723 * of locks held is not a good idea, instead we remember an OOM state 1724 * in the task and mem_cgroup_oom_synchronize() has to be called at 1725 * the end of the page fault to complete the OOM handling. 1726 * 1727 * Returns %true if an ongoing memcg OOM situation was detected and 1728 * completed, %false otherwise. 1729 */ 1730bool mem_cgroup_oom_synchronize(bool handle) 1731{ 1732 struct mem_cgroup *memcg = current->memcg_in_oom; 1733 struct oom_wait_info owait; 1734 bool locked; 1735 1736 /* OOM is global, do not handle */ 1737 if (!memcg) 1738 return false; 1739 1740 if (!handle || oom_killer_disabled) 1741 goto cleanup; 1742 1743 owait.memcg = memcg; 1744 owait.wait.flags = 0; 1745 owait.wait.func = memcg_oom_wake_function; 1746 owait.wait.private = current; 1747 INIT_LIST_HEAD(&owait.wait.task_list); 1748 1749 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); 1750 mem_cgroup_mark_under_oom(memcg); 1751 1752 locked = mem_cgroup_oom_trylock(memcg); 1753 1754 if (locked) 1755 mem_cgroup_oom_notify(memcg); 1756 1757 if (locked && !memcg->oom_kill_disable) { 1758 mem_cgroup_unmark_under_oom(memcg); 1759 finish_wait(&memcg_oom_waitq, &owait.wait); 1760 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, 1761 current->memcg_oom_order); 1762 } else { 1763 schedule(); 1764 mem_cgroup_unmark_under_oom(memcg); 1765 finish_wait(&memcg_oom_waitq, &owait.wait); 1766 } 1767 1768 if (locked) { 1769 mem_cgroup_oom_unlock(memcg); 1770 /* 1771 * There is no guarantee that an OOM-lock contender 1772 * sees the wakeups triggered by the OOM kill 1773 * uncharges. Wake any sleepers explicitely. 1774 */ 1775 memcg_oom_recover(memcg); 1776 } 1777cleanup: 1778 current->memcg_in_oom = NULL; 1779 css_put(&memcg->css); 1780 return true; 1781} 1782 1783/** 1784 * mem_cgroup_begin_page_stat - begin a page state statistics transaction 1785 * @page: page that is going to change accounted state 1786 * 1787 * This function must mark the beginning of an accounted page state 1788 * change to prevent double accounting when the page is concurrently 1789 * being moved to another memcg: 1790 * 1791 * memcg = mem_cgroup_begin_page_stat(page); 1792 * if (TestClearPageState(page)) 1793 * mem_cgroup_update_page_stat(memcg, state, -1); 1794 * mem_cgroup_end_page_stat(memcg); 1795 */ 1796struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 1797{ 1798 struct mem_cgroup *memcg; 1799 unsigned long flags; 1800 1801 /* 1802 * The RCU lock is held throughout the transaction. The fast 1803 * path can get away without acquiring the memcg->move_lock 1804 * because page moving starts with an RCU grace period. 1805 * 1806 * The RCU lock also protects the memcg from being freed when 1807 * the page state that is going to change is the only thing 1808 * preventing the page from being uncharged. 1809 * E.g. end-writeback clearing PageWriteback(), which allows 1810 * migration to go ahead and uncharge the page before the 1811 * account transaction might be complete. 1812 */ 1813 rcu_read_lock(); 1814 1815 if (mem_cgroup_disabled()) 1816 return NULL; 1817again: 1818 memcg = page->mem_cgroup; 1819 if (unlikely(!memcg)) 1820 return NULL; 1821 1822 if (atomic_read(&memcg->moving_account) <= 0) 1823 return memcg; 1824 1825 spin_lock_irqsave(&memcg->move_lock, flags); 1826 if (memcg != page->mem_cgroup) { 1827 spin_unlock_irqrestore(&memcg->move_lock, flags); 1828 goto again; 1829 } 1830 1831 /* 1832 * When charge migration first begins, we can have locked and 1833 * unlocked page stat updates happening concurrently. Track 1834 * the task who has the lock for mem_cgroup_end_page_stat(). 1835 */ 1836 memcg->move_lock_task = current; 1837 memcg->move_lock_flags = flags; 1838 1839 return memcg; 1840} 1841EXPORT_SYMBOL(mem_cgroup_begin_page_stat); 1842 1843/** 1844 * mem_cgroup_end_page_stat - finish a page state statistics transaction 1845 * @memcg: the memcg that was accounted against 1846 */ 1847void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 1848{ 1849 if (memcg && memcg->move_lock_task == current) { 1850 unsigned long flags = memcg->move_lock_flags; 1851 1852 memcg->move_lock_task = NULL; 1853 memcg->move_lock_flags = 0; 1854 1855 spin_unlock_irqrestore(&memcg->move_lock, flags); 1856 } 1857 1858 rcu_read_unlock(); 1859} 1860EXPORT_SYMBOL(mem_cgroup_end_page_stat); 1861 1862/* 1863 * size of first charge trial. "32" comes from vmscan.c's magic value. 1864 * TODO: maybe necessary to use big numbers in big irons. 1865 */ 1866#define CHARGE_BATCH 32U 1867struct memcg_stock_pcp { 1868 struct mem_cgroup *cached; /* this never be root cgroup */ 1869 unsigned int nr_pages; 1870 struct work_struct work; 1871 unsigned long flags; 1872#define FLUSHING_CACHED_CHARGE 0 1873}; 1874static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1875static DEFINE_MUTEX(percpu_charge_mutex); 1876 1877/** 1878 * consume_stock: Try to consume stocked charge on this cpu. 1879 * @memcg: memcg to consume from. 1880 * @nr_pages: how many pages to charge. 1881 * 1882 * The charges will only happen if @memcg matches the current cpu's memcg 1883 * stock, and at least @nr_pages are available in that stock. Failure to 1884 * service an allocation will refill the stock. 1885 * 1886 * returns true if successful, false otherwise. 1887 */ 1888static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1889{ 1890 struct memcg_stock_pcp *stock; 1891 bool ret = false; 1892 1893 if (nr_pages > CHARGE_BATCH) 1894 return ret; 1895 1896 stock = &get_cpu_var(memcg_stock); 1897 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { 1898 stock->nr_pages -= nr_pages; 1899 ret = true; 1900 } 1901 put_cpu_var(memcg_stock); 1902 return ret; 1903} 1904 1905/* 1906 * Returns stocks cached in percpu and reset cached information. 1907 */ 1908static void drain_stock(struct memcg_stock_pcp *stock) 1909{ 1910 struct mem_cgroup *old = stock->cached; 1911 1912 if (stock->nr_pages) { 1913 page_counter_uncharge(&old->memory, stock->nr_pages); 1914 if (do_swap_account) 1915 page_counter_uncharge(&old->memsw, stock->nr_pages); 1916 css_put_many(&old->css, stock->nr_pages); 1917 stock->nr_pages = 0; 1918 } 1919 stock->cached = NULL; 1920} 1921 1922/* 1923 * This must be called under preempt disabled or must be called by 1924 * a thread which is pinned to local cpu. 1925 */ 1926static void drain_local_stock(struct work_struct *dummy) 1927{ 1928 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); 1929 drain_stock(stock); 1930 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1931} 1932 1933/* 1934 * Cache charges(val) to local per_cpu area. 1935 * This will be consumed by consume_stock() function, later. 1936 */ 1937static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) 1938{ 1939 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); 1940 1941 if (stock->cached != memcg) { /* reset if necessary */ 1942 drain_stock(stock); 1943 stock->cached = memcg; 1944 } 1945 stock->nr_pages += nr_pages; 1946 put_cpu_var(memcg_stock); 1947} 1948 1949/* 1950 * Drains all per-CPU charge caches for given root_memcg resp. subtree 1951 * of the hierarchy under it. 1952 */ 1953static void drain_all_stock(struct mem_cgroup *root_memcg) 1954{ 1955 int cpu, curcpu; 1956 1957 /* If someone's already draining, avoid adding running more workers. */ 1958 if (!mutex_trylock(&percpu_charge_mutex)) 1959 return; 1960 /* Notify other cpus that system-wide "drain" is running */ 1961 get_online_cpus(); 1962 curcpu = get_cpu(); 1963 for_each_online_cpu(cpu) { 1964 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1965 struct mem_cgroup *memcg; 1966 1967 memcg = stock->cached; 1968 if (!memcg || !stock->nr_pages) 1969 continue; 1970 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1971 continue; 1972 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1973 if (cpu == curcpu) 1974 drain_local_stock(&stock->work); 1975 else 1976 schedule_work_on(cpu, &stock->work); 1977 } 1978 } 1979 put_cpu(); 1980 put_online_cpus(); 1981 mutex_unlock(&percpu_charge_mutex); 1982} 1983 1984static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 1985 unsigned long action, 1986 void *hcpu) 1987{ 1988 int cpu = (unsigned long)hcpu; 1989 struct memcg_stock_pcp *stock; 1990 1991 if (action == CPU_ONLINE) 1992 return NOTIFY_OK; 1993 1994 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) 1995 return NOTIFY_OK; 1996 1997 stock = &per_cpu(memcg_stock, cpu); 1998 drain_stock(stock); 1999 return NOTIFY_OK; 2000} 2001 2002/* 2003 * Scheduled by try_charge() to be executed from the userland return path 2004 * and reclaims memory over the high limit. 2005 */ 2006void mem_cgroup_handle_over_high(void) 2007{ 2008 unsigned int nr_pages = current->memcg_nr_pages_over_high; 2009 struct mem_cgroup *memcg, *pos; 2010 2011 if (likely(!nr_pages)) 2012 return; 2013 2014 pos = memcg = get_mem_cgroup_from_mm(current->mm); 2015 2016 do { 2017 if (page_counter_read(&pos->memory) <= pos->high) 2018 continue; 2019 mem_cgroup_events(pos, MEMCG_HIGH, 1); 2020 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true); 2021 } while ((pos = parent_mem_cgroup(pos))); 2022 2023 css_put(&memcg->css); 2024 current->memcg_nr_pages_over_high = 0; 2025} 2026 2027static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, 2028 unsigned int nr_pages) 2029{ 2030 unsigned int batch = max(CHARGE_BATCH, nr_pages); 2031 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2032 struct mem_cgroup *mem_over_limit; 2033 struct page_counter *counter; 2034 unsigned long nr_reclaimed; 2035 bool may_swap = true; 2036 bool drained = false; 2037 2038 if (mem_cgroup_is_root(memcg)) 2039 return 0; 2040retry: 2041 if (consume_stock(memcg, nr_pages)) 2042 return 0; 2043 2044 if (!do_swap_account || 2045 page_counter_try_charge(&memcg->memsw, batch, &counter)) { 2046 if (page_counter_try_charge(&memcg->memory, batch, &counter)) 2047 goto done_restock; 2048 if (do_swap_account) 2049 page_counter_uncharge(&memcg->memsw, batch); 2050 mem_over_limit = mem_cgroup_from_counter(counter, memory); 2051 } else { 2052 mem_over_limit = mem_cgroup_from_counter(counter, memsw); 2053 may_swap = false; 2054 } 2055 2056 if (batch > nr_pages) { 2057 batch = nr_pages; 2058 goto retry; 2059 } 2060 2061 /* 2062 * Unlike in global OOM situations, memcg is not in a physical 2063 * memory shortage. Allow dying and OOM-killed tasks to 2064 * bypass the last charges so that they can exit quickly and 2065 * free their memory. 2066 */ 2067 if (unlikely(test_thread_flag(TIF_MEMDIE) || 2068 fatal_signal_pending(current) || 2069 current->flags & PF_EXITING)) 2070 goto force; 2071 2072 if (unlikely(task_in_memcg_oom(current))) 2073 goto nomem; 2074 2075 if (!gfpflags_allow_blocking(gfp_mask)) 2076 goto nomem; 2077 2078 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1); 2079 2080 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, 2081 gfp_mask, may_swap); 2082 2083 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2084 goto retry; 2085 2086 if (!drained) { 2087 drain_all_stock(mem_over_limit); 2088 drained = true; 2089 goto retry; 2090 } 2091 2092 if (gfp_mask & __GFP_NORETRY) 2093 goto nomem; 2094 /* 2095 * Even though the limit is exceeded at this point, reclaim 2096 * may have been able to free some pages. Retry the charge 2097 * before killing the task. 2098 * 2099 * Only for regular pages, though: huge pages are rather 2100 * unlikely to succeed so close to the limit, and we fall back 2101 * to regular pages anyway in case of failure. 2102 */ 2103 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) 2104 goto retry; 2105 /* 2106 * At task move, charge accounts can be doubly counted. So, it's 2107 * better to wait until the end of task_move if something is going on. 2108 */ 2109 if (mem_cgroup_wait_acct_move(mem_over_limit)) 2110 goto retry; 2111 2112 if (nr_retries--) 2113 goto retry; 2114 2115 if (gfp_mask & __GFP_NOFAIL) 2116 goto force; 2117 2118 if (fatal_signal_pending(current)) 2119 goto force; 2120 2121 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1); 2122 2123 mem_cgroup_oom(mem_over_limit, gfp_mask, 2124 get_order(nr_pages * PAGE_SIZE)); 2125nomem: 2126 if (!(gfp_mask & __GFP_NOFAIL)) 2127 return -ENOMEM; 2128force: 2129 /* 2130 * The allocation either can't fail or will lead to more memory 2131 * being freed very soon. Allow memory usage go over the limit 2132 * temporarily by force charging it. 2133 */ 2134 page_counter_charge(&memcg->memory, nr_pages); 2135 if (do_swap_account) 2136 page_counter_charge(&memcg->memsw, nr_pages); 2137 css_get_many(&memcg->css, nr_pages); 2138 2139 return 0; 2140 2141done_restock: 2142 css_get_many(&memcg->css, batch); 2143 if (batch > nr_pages) 2144 refill_stock(memcg, batch - nr_pages); 2145 2146 /* 2147 * If the hierarchy is above the normal consumption range, schedule 2148 * reclaim on returning to userland. We can perform reclaim here 2149 * if __GFP_RECLAIM but let's always punt for simplicity and so that 2150 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2151 * not recorded as it most likely matches current's and won't 2152 * change in the meantime. As high limit is checked again before 2153 * reclaim, the cost of mismatch is negligible. 2154 */ 2155 do { 2156 if (page_counter_read(&memcg->memory) > memcg->high) { 2157 current->memcg_nr_pages_over_high += batch; 2158 set_notify_resume(current); 2159 break; 2160 } 2161 } while ((memcg = parent_mem_cgroup(memcg))); 2162 2163 return 0; 2164} 2165 2166static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) 2167{ 2168 if (mem_cgroup_is_root(memcg)) 2169 return; 2170 2171 page_counter_uncharge(&memcg->memory, nr_pages); 2172 if (do_swap_account) 2173 page_counter_uncharge(&memcg->memsw, nr_pages); 2174 2175 css_put_many(&memcg->css, nr_pages); 2176} 2177 2178static void lock_page_lru(struct page *page, int *isolated) 2179{ 2180 struct zone *zone = page_zone(page); 2181 2182 spin_lock_irq(&zone->lru_lock); 2183 if (PageLRU(page)) { 2184 struct lruvec *lruvec; 2185 2186 lruvec = mem_cgroup_page_lruvec(page, zone); 2187 ClearPageLRU(page); 2188 del_page_from_lru_list(page, lruvec, page_lru(page)); 2189 *isolated = 1; 2190 } else 2191 *isolated = 0; 2192} 2193 2194static void unlock_page_lru(struct page *page, int isolated) 2195{ 2196 struct zone *zone = page_zone(page); 2197 2198 if (isolated) { 2199 struct lruvec *lruvec; 2200 2201 lruvec = mem_cgroup_page_lruvec(page, zone); 2202 VM_BUG_ON_PAGE(PageLRU(page), page); 2203 SetPageLRU(page); 2204 add_page_to_lru_list(page, lruvec, page_lru(page)); 2205 } 2206 spin_unlock_irq(&zone->lru_lock); 2207} 2208 2209static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2210 bool lrucare) 2211{ 2212 int isolated; 2213 2214 VM_BUG_ON_PAGE(page->mem_cgroup, page); 2215 2216 /* 2217 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page 2218 * may already be on some other mem_cgroup's LRU. Take care of it. 2219 */ 2220 if (lrucare) 2221 lock_page_lru(page, &isolated); 2222 2223 /* 2224 * Nobody should be changing or seriously looking at 2225 * page->mem_cgroup at this point: 2226 * 2227 * - the page is uncharged 2228 * 2229 * - the page is off-LRU 2230 * 2231 * - an anonymous fault has exclusive page access, except for 2232 * a locked page table 2233 * 2234 * - a page cache insertion, a swapin fault, or a migration 2235 * have the page locked 2236 */ 2237 page->mem_cgroup = memcg; 2238 2239 if (lrucare) 2240 unlock_page_lru(page, isolated); 2241} 2242 2243#ifdef CONFIG_MEMCG_KMEM 2244static int memcg_alloc_cache_id(void) 2245{ 2246 int id, size; 2247 int err; 2248 2249 id = ida_simple_get(&memcg_cache_ida, 2250 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); 2251 if (id < 0) 2252 return id; 2253 2254 if (id < memcg_nr_cache_ids) 2255 return id; 2256 2257 /* 2258 * There's no space for the new id in memcg_caches arrays, 2259 * so we have to grow them. 2260 */ 2261 down_write(&memcg_cache_ids_sem); 2262 2263 size = 2 * (id + 1); 2264 if (size < MEMCG_CACHES_MIN_SIZE) 2265 size = MEMCG_CACHES_MIN_SIZE; 2266 else if (size > MEMCG_CACHES_MAX_SIZE) 2267 size = MEMCG_CACHES_MAX_SIZE; 2268 2269 err = memcg_update_all_caches(size); 2270 if (!err) 2271 err = memcg_update_all_list_lrus(size); 2272 if (!err) 2273 memcg_nr_cache_ids = size; 2274 2275 up_write(&memcg_cache_ids_sem); 2276 2277 if (err) { 2278 ida_simple_remove(&memcg_cache_ida, id); 2279 return err; 2280 } 2281 return id; 2282} 2283 2284static void memcg_free_cache_id(int id) 2285{ 2286 ida_simple_remove(&memcg_cache_ida, id); 2287} 2288 2289struct memcg_kmem_cache_create_work { 2290 struct mem_cgroup *memcg; 2291 struct kmem_cache *cachep; 2292 struct work_struct work; 2293}; 2294 2295static void memcg_kmem_cache_create_func(struct work_struct *w) 2296{ 2297 struct memcg_kmem_cache_create_work *cw = 2298 container_of(w, struct memcg_kmem_cache_create_work, work); 2299 struct mem_cgroup *memcg = cw->memcg; 2300 struct kmem_cache *cachep = cw->cachep; 2301 2302 memcg_create_kmem_cache(memcg, cachep); 2303 2304 css_put(&memcg->css); 2305 kfree(cw); 2306} 2307 2308/* 2309 * Enqueue the creation of a per-memcg kmem_cache. 2310 */ 2311static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2312 struct kmem_cache *cachep) 2313{ 2314 struct memcg_kmem_cache_create_work *cw; 2315 2316 cw = kmalloc(sizeof(*cw), GFP_NOWAIT); 2317 if (!cw) 2318 return; 2319 2320 css_get(&memcg->css); 2321 2322 cw->memcg = memcg; 2323 cw->cachep = cachep; 2324 INIT_WORK(&cw->work, memcg_kmem_cache_create_func); 2325 2326 schedule_work(&cw->work); 2327} 2328 2329static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2330 struct kmem_cache *cachep) 2331{ 2332 /* 2333 * We need to stop accounting when we kmalloc, because if the 2334 * corresponding kmalloc cache is not yet created, the first allocation 2335 * in __memcg_schedule_kmem_cache_create will recurse. 2336 * 2337 * However, it is better to enclose the whole function. Depending on 2338 * the debugging options enabled, INIT_WORK(), for instance, can 2339 * trigger an allocation. This too, will make us recurse. Because at 2340 * this point we can't allow ourselves back into memcg_kmem_get_cache, 2341 * the safest choice is to do it like this, wrapping the whole function. 2342 */ 2343 current->memcg_kmem_skip_account = 1; 2344 __memcg_schedule_kmem_cache_create(memcg, cachep); 2345 current->memcg_kmem_skip_account = 0; 2346} 2347 2348/* 2349 * Return the kmem_cache we're supposed to use for a slab allocation. 2350 * We try to use the current memcg's version of the cache. 2351 * 2352 * If the cache does not exist yet, if we are the first user of it, 2353 * we either create it immediately, if possible, or create it asynchronously 2354 * in a workqueue. 2355 * In the latter case, we will let the current allocation go through with 2356 * the original cache. 2357 * 2358 * Can't be called in interrupt context or from kernel threads. 2359 * This function needs to be called with rcu_read_lock() held. 2360 */ 2361struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) 2362{ 2363 struct mem_cgroup *memcg; 2364 struct kmem_cache *memcg_cachep; 2365 int kmemcg_id; 2366 2367 VM_BUG_ON(!is_root_cache(cachep)); 2368 2369 if (current->memcg_kmem_skip_account) 2370 return cachep; 2371 2372 memcg = get_mem_cgroup_from_mm(current->mm); 2373 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2374 if (kmemcg_id < 0) 2375 goto out; 2376 2377 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); 2378 if (likely(memcg_cachep)) 2379 return memcg_cachep; 2380 2381 /* 2382 * If we are in a safe context (can wait, and not in interrupt 2383 * context), we could be be predictable and return right away. 2384 * This would guarantee that the allocation being performed 2385 * already belongs in the new cache. 2386 * 2387 * However, there are some clashes that can arrive from locking. 2388 * For instance, because we acquire the slab_mutex while doing 2389 * memcg_create_kmem_cache, this means no further allocation 2390 * could happen with the slab_mutex held. So it's better to 2391 * defer everything. 2392 */ 2393 memcg_schedule_kmem_cache_create(memcg, cachep); 2394out: 2395 css_put(&memcg->css); 2396 return cachep; 2397} 2398 2399void __memcg_kmem_put_cache(struct kmem_cache *cachep) 2400{ 2401 if (!is_root_cache(cachep)) 2402 css_put(&cachep->memcg_params.memcg->css); 2403} 2404 2405int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 2406 struct mem_cgroup *memcg) 2407{ 2408 unsigned int nr_pages = 1 << order; 2409 struct page_counter *counter; 2410 int ret; 2411 2412 if (!memcg_kmem_is_active(memcg)) 2413 return 0; 2414 2415 if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) 2416 return -ENOMEM; 2417 2418 ret = try_charge(memcg, gfp, nr_pages); 2419 if (ret) { 2420 page_counter_uncharge(&memcg->kmem, nr_pages); 2421 return ret; 2422 } 2423 2424 page->mem_cgroup = memcg; 2425 2426 return 0; 2427} 2428 2429int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 2430{ 2431 struct mem_cgroup *memcg; 2432 int ret; 2433 2434 memcg = get_mem_cgroup_from_mm(current->mm); 2435 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); 2436 css_put(&memcg->css); 2437 return ret; 2438} 2439 2440void __memcg_kmem_uncharge(struct page *page, int order) 2441{ 2442 struct mem_cgroup *memcg = page->mem_cgroup; 2443 unsigned int nr_pages = 1 << order; 2444 2445 if (!memcg) 2446 return; 2447 2448 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 2449 2450 page_counter_uncharge(&memcg->kmem, nr_pages); 2451 page_counter_uncharge(&memcg->memory, nr_pages); 2452 if (do_swap_account) 2453 page_counter_uncharge(&memcg->memsw, nr_pages); 2454 2455 page->mem_cgroup = NULL; 2456 css_put_many(&memcg->css, nr_pages); 2457} 2458#endif /* CONFIG_MEMCG_KMEM */ 2459 2460#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2461 2462/* 2463 * Because tail pages are not marked as "used", set it. We're under 2464 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2465 * charge/uncharge will be never happen and move_account() is done under 2466 * compound_lock(), so we don't have to take care of races. 2467 */ 2468void mem_cgroup_split_huge_fixup(struct page *head) 2469{ 2470 int i; 2471 2472 if (mem_cgroup_disabled()) 2473 return; 2474 2475 for (i = 1; i < HPAGE_PMD_NR; i++) 2476 head[i].mem_cgroup = head->mem_cgroup; 2477 2478 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], 2479 HPAGE_PMD_NR); 2480} 2481#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2482 2483#ifdef CONFIG_MEMCG_SWAP 2484static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2485 bool charge) 2486{ 2487 int val = (charge) ? 1 : -1; 2488 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); 2489} 2490 2491/** 2492 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. 2493 * @entry: swap entry to be moved 2494 * @from: mem_cgroup which the entry is moved from 2495 * @to: mem_cgroup which the entry is moved to 2496 * 2497 * It succeeds only when the swap_cgroup's record for this entry is the same 2498 * as the mem_cgroup's id of @from. 2499 * 2500 * Returns 0 on success, -EINVAL on failure. 2501 * 2502 * The caller must have charged to @to, IOW, called page_counter_charge() about 2503 * both res and memsw, and called css_get(). 2504 */ 2505static int mem_cgroup_move_swap_account(swp_entry_t entry, 2506 struct mem_cgroup *from, struct mem_cgroup *to) 2507{ 2508 unsigned short old_id, new_id; 2509 2510 old_id = mem_cgroup_id(from); 2511 new_id = mem_cgroup_id(to); 2512 2513 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 2514 mem_cgroup_swap_statistics(from, false); 2515 mem_cgroup_swap_statistics(to, true); 2516 return 0; 2517 } 2518 return -EINVAL; 2519} 2520#else 2521static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 2522 struct mem_cgroup *from, struct mem_cgroup *to) 2523{ 2524 return -EINVAL; 2525} 2526#endif 2527 2528static DEFINE_MUTEX(memcg_limit_mutex); 2529 2530static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, 2531 unsigned long limit) 2532{ 2533 unsigned long curusage; 2534 unsigned long oldusage; 2535 bool enlarge = false; 2536 int retry_count; 2537 int ret; 2538 2539 /* 2540 * For keeping hierarchical_reclaim simple, how long we should retry 2541 * is depends on callers. We set our retry-count to be function 2542 * of # of children which we should visit in this loop. 2543 */ 2544 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2545 mem_cgroup_count_children(memcg); 2546 2547 oldusage = page_counter_read(&memcg->memory); 2548 2549 do { 2550 if (signal_pending(current)) { 2551 ret = -EINTR; 2552 break; 2553 } 2554 2555 mutex_lock(&memcg_limit_mutex); 2556 if (limit > memcg->memsw.limit) { 2557 mutex_unlock(&memcg_limit_mutex); 2558 ret = -EINVAL; 2559 break; 2560 } 2561 if (limit > memcg->memory.limit) 2562 enlarge = true; 2563 ret = page_counter_limit(&memcg->memory, limit); 2564 mutex_unlock(&memcg_limit_mutex); 2565 2566 if (!ret) 2567 break; 2568 2569 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); 2570 2571 curusage = page_counter_read(&memcg->memory); 2572 /* Usage is reduced ? */ 2573 if (curusage >= oldusage) 2574 retry_count--; 2575 else 2576 oldusage = curusage; 2577 } while (retry_count); 2578 2579 if (!ret && enlarge) 2580 memcg_oom_recover(memcg); 2581 2582 return ret; 2583} 2584 2585static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, 2586 unsigned long limit) 2587{ 2588 unsigned long curusage; 2589 unsigned long oldusage; 2590 bool enlarge = false; 2591 int retry_count; 2592 int ret; 2593 2594 /* see mem_cgroup_resize_res_limit */ 2595 retry_count = MEM_CGROUP_RECLAIM_RETRIES * 2596 mem_cgroup_count_children(memcg); 2597 2598 oldusage = page_counter_read(&memcg->memsw); 2599 2600 do { 2601 if (signal_pending(current)) { 2602 ret = -EINTR; 2603 break; 2604 } 2605 2606 mutex_lock(&memcg_limit_mutex); 2607 if (limit < memcg->memory.limit) { 2608 mutex_unlock(&memcg_limit_mutex); 2609 ret = -EINVAL; 2610 break; 2611 } 2612 if (limit > memcg->memsw.limit) 2613 enlarge = true; 2614 ret = page_counter_limit(&memcg->memsw, limit); 2615 mutex_unlock(&memcg_limit_mutex); 2616 2617 if (!ret) 2618 break; 2619 2620 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); 2621 2622 curusage = page_counter_read(&memcg->memsw); 2623 /* Usage is reduced ? */ 2624 if (curusage >= oldusage) 2625 retry_count--; 2626 else 2627 oldusage = curusage; 2628 } while (retry_count); 2629 2630 if (!ret && enlarge) 2631 memcg_oom_recover(memcg); 2632 2633 return ret; 2634} 2635 2636unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 2637 gfp_t gfp_mask, 2638 unsigned long *total_scanned) 2639{ 2640 unsigned long nr_reclaimed = 0; 2641 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 2642 unsigned long reclaimed; 2643 int loop = 0; 2644 struct mem_cgroup_tree_per_zone *mctz; 2645 unsigned long excess; 2646 unsigned long nr_scanned; 2647 2648 if (order > 0) 2649 return 0; 2650 2651 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); 2652 /* 2653 * This loop can run a while, specially if mem_cgroup's continuously 2654 * keep exceeding their soft limit and putting the system under 2655 * pressure 2656 */ 2657 do { 2658 if (next_mz) 2659 mz = next_mz; 2660 else 2661 mz = mem_cgroup_largest_soft_limit_node(mctz); 2662 if (!mz) 2663 break; 2664 2665 nr_scanned = 0; 2666 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, 2667 gfp_mask, &nr_scanned); 2668 nr_reclaimed += reclaimed; 2669 *total_scanned += nr_scanned; 2670 spin_lock_irq(&mctz->lock); 2671 __mem_cgroup_remove_exceeded(mz, mctz); 2672 2673 /* 2674 * If we failed to reclaim anything from this memory cgroup 2675 * it is time to move on to the next cgroup 2676 */ 2677 next_mz = NULL; 2678 if (!reclaimed) 2679 next_mz = __mem_cgroup_largest_soft_limit_node(mctz); 2680 2681 excess = soft_limit_excess(mz->memcg); 2682 /* 2683 * One school of thought says that we should not add 2684 * back the node to the tree if reclaim returns 0. 2685 * But our reclaim could return 0, simply because due 2686 * to priority we are exposing a smaller subset of 2687 * memory to reclaim from. Consider this as a longer 2688 * term TODO. 2689 */ 2690 /* If excess == 0, no tree ops */ 2691 __mem_cgroup_insert_exceeded(mz, mctz, excess); 2692 spin_unlock_irq(&mctz->lock); 2693 css_put(&mz->memcg->css); 2694 loop++; 2695 /* 2696 * Could not reclaim anything and there are no more 2697 * mem cgroups to try or we seem to be looping without 2698 * reclaiming anything. 2699 */ 2700 if (!nr_reclaimed && 2701 (next_mz == NULL || 2702 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) 2703 break; 2704 } while (!nr_reclaimed); 2705 if (next_mz) 2706 css_put(&next_mz->memcg->css); 2707 return nr_reclaimed; 2708} 2709 2710/* 2711 * Test whether @memcg has children, dead or alive. Note that this 2712 * function doesn't care whether @memcg has use_hierarchy enabled and 2713 * returns %true if there are child csses according to the cgroup 2714 * hierarchy. Testing use_hierarchy is the caller's responsiblity. 2715 */ 2716static inline bool memcg_has_children(struct mem_cgroup *memcg) 2717{ 2718 bool ret; 2719 2720 /* 2721 * The lock does not prevent addition or deletion of children, but 2722 * it prevents a new child from being initialized based on this 2723 * parent in css_online(), so it's enough to decide whether 2724 * hierarchically inherited attributes can still be changed or not. 2725 */ 2726 lockdep_assert_held(&memcg_create_mutex); 2727 2728 rcu_read_lock(); 2729 ret = css_next_child(NULL, &memcg->css); 2730 rcu_read_unlock(); 2731 return ret; 2732} 2733 2734/* 2735 * Reclaims as many pages from the given memcg as possible and moves 2736 * the rest to the parent. 2737 * 2738 * Caller is responsible for holding css reference for memcg. 2739 */ 2740static int mem_cgroup_force_empty(struct mem_cgroup *memcg) 2741{ 2742 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 2743 2744 /* we call try-to-free pages for make this cgroup empty */ 2745 lru_add_drain_all(); 2746 /* try to free all pages in this cgroup */ 2747 while (nr_retries && page_counter_read(&memcg->memory)) { 2748 int progress; 2749 2750 if (signal_pending(current)) 2751 return -EINTR; 2752 2753 progress = try_to_free_mem_cgroup_pages(memcg, 1, 2754 GFP_KERNEL, true); 2755 if (!progress) { 2756 nr_retries--; 2757 /* maybe some writeback is necessary */ 2758 congestion_wait(BLK_RW_ASYNC, HZ/10); 2759 } 2760 2761 } 2762 2763 return 0; 2764} 2765 2766static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of, 2767 char *buf, size_t nbytes, 2768 loff_t off) 2769{ 2770 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 2771 2772 if (mem_cgroup_is_root(memcg)) 2773 return -EINVAL; 2774 return mem_cgroup_force_empty(memcg) ?: nbytes; 2775} 2776 2777static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css, 2778 struct cftype *cft) 2779{ 2780 return mem_cgroup_from_css(css)->use_hierarchy; 2781} 2782 2783static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, 2784 struct cftype *cft, u64 val) 2785{ 2786 int retval = 0; 2787 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2788 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); 2789 2790 mutex_lock(&memcg_create_mutex); 2791 2792 if (memcg->use_hierarchy == val) 2793 goto out; 2794 2795 /* 2796 * If parent's use_hierarchy is set, we can't make any modifications 2797 * in the child subtrees. If it is unset, then the change can 2798 * occur, provided the current cgroup has no children. 2799 * 2800 * For the root cgroup, parent_mem is NULL, we allow value to be 2801 * set if there are no children. 2802 */ 2803 if ((!parent_memcg || !parent_memcg->use_hierarchy) && 2804 (val == 1 || val == 0)) { 2805 if (!memcg_has_children(memcg)) 2806 memcg->use_hierarchy = val; 2807 else 2808 retval = -EBUSY; 2809 } else 2810 retval = -EINVAL; 2811 2812out: 2813 mutex_unlock(&memcg_create_mutex); 2814 2815 return retval; 2816} 2817 2818static unsigned long tree_stat(struct mem_cgroup *memcg, 2819 enum mem_cgroup_stat_index idx) 2820{ 2821 struct mem_cgroup *iter; 2822 unsigned long val = 0; 2823 2824 for_each_mem_cgroup_tree(iter, memcg) 2825 val += mem_cgroup_read_stat(iter, idx); 2826 2827 return val; 2828} 2829 2830static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) 2831{ 2832 unsigned long val; 2833 2834 if (mem_cgroup_is_root(memcg)) { 2835 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); 2836 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); 2837 if (swap) 2838 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); 2839 } else { 2840 if (!swap) 2841 val = page_counter_read(&memcg->memory); 2842 else 2843 val = page_counter_read(&memcg->memsw); 2844 } 2845 return val; 2846} 2847 2848enum { 2849 RES_USAGE, 2850 RES_LIMIT, 2851 RES_MAX_USAGE, 2852 RES_FAILCNT, 2853 RES_SOFT_LIMIT, 2854}; 2855 2856static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, 2857 struct cftype *cft) 2858{ 2859 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 2860 struct page_counter *counter; 2861 2862 switch (MEMFILE_TYPE(cft->private)) { 2863 case _MEM: 2864 counter = &memcg->memory; 2865 break; 2866 case _MEMSWAP: 2867 counter = &memcg->memsw; 2868 break; 2869 case _KMEM: 2870 counter = &memcg->kmem; 2871 break; 2872 default: 2873 BUG(); 2874 } 2875 2876 switch (MEMFILE_ATTR(cft->private)) { 2877 case RES_USAGE: 2878 if (counter == &memcg->memory) 2879 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; 2880 if (counter == &memcg->memsw) 2881 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; 2882 return (u64)page_counter_read(counter) * PAGE_SIZE; 2883 case RES_LIMIT: 2884 return (u64)counter->limit * PAGE_SIZE; 2885 case RES_MAX_USAGE: 2886 return (u64)counter->watermark * PAGE_SIZE; 2887 case RES_FAILCNT: 2888 return counter->failcnt; 2889 case RES_SOFT_LIMIT: 2890 return (u64)memcg->soft_limit * PAGE_SIZE; 2891 default: 2892 BUG(); 2893 } 2894} 2895 2896#ifdef CONFIG_MEMCG_KMEM 2897static int memcg_activate_kmem(struct mem_cgroup *memcg, 2898 unsigned long nr_pages) 2899{ 2900 int err = 0; 2901 int memcg_id; 2902 2903 BUG_ON(memcg->kmemcg_id >= 0); 2904 BUG_ON(memcg->kmem_acct_activated); 2905 BUG_ON(memcg->kmem_acct_active); 2906 2907 /* 2908 * For simplicity, we won't allow this to be disabled. It also can't 2909 * be changed if the cgroup has children already, or if tasks had 2910 * already joined. 2911 * 2912 * If tasks join before we set the limit, a person looking at 2913 * kmem.usage_in_bytes will have no way to determine when it took 2914 * place, which makes the value quite meaningless. 2915 * 2916 * After it first became limited, changes in the value of the limit are 2917 * of course permitted. 2918 */ 2919 mutex_lock(&memcg_create_mutex); 2920 if (cgroup_is_populated(memcg->css.cgroup) || 2921 (memcg->use_hierarchy && memcg_has_children(memcg))) 2922 err = -EBUSY; 2923 mutex_unlock(&memcg_create_mutex); 2924 if (err) 2925 goto out; 2926 2927 memcg_id = memcg_alloc_cache_id(); 2928 if (memcg_id < 0) { 2929 err = memcg_id; 2930 goto out; 2931 } 2932 2933 /* 2934 * We couldn't have accounted to this cgroup, because it hasn't got 2935 * activated yet, so this should succeed. 2936 */ 2937 err = page_counter_limit(&memcg->kmem, nr_pages); 2938 VM_BUG_ON(err); 2939 2940 static_key_slow_inc(&memcg_kmem_enabled_key); 2941 /* 2942 * A memory cgroup is considered kmem-active as soon as it gets 2943 * kmemcg_id. Setting the id after enabling static branching will 2944 * guarantee no one starts accounting before all call sites are 2945 * patched. 2946 */ 2947 memcg->kmemcg_id = memcg_id; 2948 memcg->kmem_acct_activated = true; 2949 memcg->kmem_acct_active = true; 2950out: 2951 return err; 2952} 2953 2954static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2955 unsigned long limit) 2956{ 2957 int ret; 2958 2959 mutex_lock(&memcg_limit_mutex); 2960 if (!memcg_kmem_is_active(memcg)) 2961 ret = memcg_activate_kmem(memcg, limit); 2962 else 2963 ret = page_counter_limit(&memcg->kmem, limit); 2964 mutex_unlock(&memcg_limit_mutex); 2965 return ret; 2966} 2967 2968static int memcg_propagate_kmem(struct mem_cgroup *memcg) 2969{ 2970 int ret = 0; 2971 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 2972 2973 if (!parent) 2974 return 0; 2975 2976 mutex_lock(&memcg_limit_mutex); 2977 /* 2978 * If the parent cgroup is not kmem-active now, it cannot be activated 2979 * after this point, because it has at least one child already. 2980 */ 2981 if (memcg_kmem_is_active(parent)) 2982 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); 2983 mutex_unlock(&memcg_limit_mutex); 2984 return ret; 2985} 2986#else 2987static int memcg_update_kmem_limit(struct mem_cgroup *memcg, 2988 unsigned long limit) 2989{ 2990 return -EINVAL; 2991} 2992#endif /* CONFIG_MEMCG_KMEM */ 2993 2994/* 2995 * The user of this function is... 2996 * RES_LIMIT. 2997 */ 2998static ssize_t mem_cgroup_write(struct kernfs_open_file *of, 2999 char *buf, size_t nbytes, loff_t off) 3000{ 3001 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3002 unsigned long nr_pages; 3003 int ret; 3004 3005 buf = strstrip(buf); 3006 ret = page_counter_memparse(buf, "-1", &nr_pages); 3007 if (ret) 3008 return ret; 3009 3010 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3011 case RES_LIMIT: 3012 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ 3013 ret = -EINVAL; 3014 break; 3015 } 3016 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3017 case _MEM: 3018 ret = mem_cgroup_resize_limit(memcg, nr_pages); 3019 break; 3020 case _MEMSWAP: 3021 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); 3022 break; 3023 case _KMEM: 3024 ret = memcg_update_kmem_limit(memcg, nr_pages); 3025 break; 3026 } 3027 break; 3028 case RES_SOFT_LIMIT: 3029 memcg->soft_limit = nr_pages; 3030 ret = 0; 3031 break; 3032 } 3033 return ret ?: nbytes; 3034} 3035 3036static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, 3037 size_t nbytes, loff_t off) 3038{ 3039 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 3040 struct page_counter *counter; 3041 3042 switch (MEMFILE_TYPE(of_cft(of)->private)) { 3043 case _MEM: 3044 counter = &memcg->memory; 3045 break; 3046 case _MEMSWAP: 3047 counter = &memcg->memsw; 3048 break; 3049 case _KMEM: 3050 counter = &memcg->kmem; 3051 break; 3052 default: 3053 BUG(); 3054 } 3055 3056 switch (MEMFILE_ATTR(of_cft(of)->private)) { 3057 case RES_MAX_USAGE: 3058 page_counter_reset_watermark(counter); 3059 break; 3060 case RES_FAILCNT: 3061 counter->failcnt = 0; 3062 break; 3063 default: 3064 BUG(); 3065 } 3066 3067 return nbytes; 3068} 3069 3070static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css, 3071 struct cftype *cft) 3072{ 3073 return mem_cgroup_from_css(css)->move_charge_at_immigrate; 3074} 3075 3076#ifdef CONFIG_MMU 3077static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3078 struct cftype *cft, u64 val) 3079{ 3080 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3081 3082 if (val & ~MOVE_MASK) 3083 return -EINVAL; 3084 3085 /* 3086 * No kind of locking is needed in here, because ->can_attach() will 3087 * check this value once in the beginning of the process, and then carry 3088 * on with stale data. This means that changes to this value will only 3089 * affect task migrations starting after the change. 3090 */ 3091 memcg->move_charge_at_immigrate = val; 3092 return 0; 3093} 3094#else 3095static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, 3096 struct cftype *cft, u64 val) 3097{ 3098 return -ENOSYS; 3099} 3100#endif 3101 3102#ifdef CONFIG_NUMA 3103static int memcg_numa_stat_show(struct seq_file *m, void *v) 3104{ 3105 struct numa_stat { 3106 const char *name; 3107 unsigned int lru_mask; 3108 }; 3109 3110 static const struct numa_stat stats[] = { 3111 { "total", LRU_ALL }, 3112 { "file", LRU_ALL_FILE }, 3113 { "anon", LRU_ALL_ANON }, 3114 { "unevictable", BIT(LRU_UNEVICTABLE) }, 3115 }; 3116 const struct numa_stat *stat; 3117 int nid; 3118 unsigned long nr; 3119 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3120 3121 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3122 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); 3123 seq_printf(m, "%s=%lu", stat->name, nr); 3124 for_each_node_state(nid, N_MEMORY) { 3125 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, 3126 stat->lru_mask); 3127 seq_printf(m, " N%d=%lu", nid, nr); 3128 } 3129 seq_putc(m, '\n'); 3130 } 3131 3132 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { 3133 struct mem_cgroup *iter; 3134 3135 nr = 0; 3136 for_each_mem_cgroup_tree(iter, memcg) 3137 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3138 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr); 3139 for_each_node_state(nid, N_MEMORY) { 3140 nr = 0; 3141 for_each_mem_cgroup_tree(iter, memcg) 3142 nr += mem_cgroup_node_nr_lru_pages( 3143 iter, nid, stat->lru_mask); 3144 seq_printf(m, " N%d=%lu", nid, nr); 3145 } 3146 seq_putc(m, '\n'); 3147 } 3148 3149 return 0; 3150} 3151#endif /* CONFIG_NUMA */ 3152 3153static int memcg_stat_show(struct seq_file *m, void *v) 3154{ 3155 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 3156 unsigned long memory, memsw; 3157 struct mem_cgroup *mi; 3158 unsigned int i; 3159 3160 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) != 3161 MEM_CGROUP_STAT_NSTATS); 3162 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) != 3163 MEM_CGROUP_EVENTS_NSTATS); 3164 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); 3165 3166 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3167 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3168 continue; 3169 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], 3170 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); 3171 } 3172 3173 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) 3174 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i], 3175 mem_cgroup_read_events(memcg, i)); 3176 3177 for (i = 0; i < NR_LRU_LISTS; i++) 3178 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i], 3179 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); 3180 3181 /* Hierarchical information */ 3182 memory = memsw = PAGE_COUNTER_MAX; 3183 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { 3184 memory = min(memory, mi->memory.limit); 3185 memsw = min(memsw, mi->memsw.limit); 3186 } 3187 seq_printf(m, "hierarchical_memory_limit %llu\n", 3188 (u64)memory * PAGE_SIZE); 3189 if (do_swap_account) 3190 seq_printf(m, "hierarchical_memsw_limit %llu\n", 3191 (u64)memsw * PAGE_SIZE); 3192 3193 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 3194 unsigned long long val = 0; 3195 3196 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) 3197 continue; 3198 for_each_mem_cgroup_tree(mi, memcg) 3199 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; 3200 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); 3201 } 3202 3203 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { 3204 unsigned long long val = 0; 3205 3206 for_each_mem_cgroup_tree(mi, memcg) 3207 val += mem_cgroup_read_events(mi, i); 3208 seq_printf(m, "total_%s %llu\n", 3209 mem_cgroup_events_names[i], val); 3210 } 3211 3212 for (i = 0; i < NR_LRU_LISTS; i++) { 3213 unsigned long long val = 0; 3214 3215 for_each_mem_cgroup_tree(mi, memcg) 3216 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE; 3217 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val); 3218 } 3219 3220#ifdef CONFIG_DEBUG_VM 3221 { 3222 int nid, zid; 3223 struct mem_cgroup_per_zone *mz; 3224 struct zone_reclaim_stat *rstat; 3225 unsigned long recent_rotated[2] = {0, 0}; 3226 unsigned long recent_scanned[2] = {0, 0}; 3227 3228 for_each_online_node(nid) 3229 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 3230 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; 3231 rstat = &mz->lruvec.reclaim_stat; 3232 3233 recent_rotated[0] += rstat->recent_rotated[0]; 3234 recent_rotated[1] += rstat->recent_rotated[1]; 3235 recent_scanned[0] += rstat->recent_scanned[0]; 3236 recent_scanned[1] += rstat->recent_scanned[1]; 3237 } 3238 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]); 3239 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]); 3240 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]); 3241 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]); 3242 } 3243#endif 3244 3245 return 0; 3246} 3247 3248static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, 3249 struct cftype *cft) 3250{ 3251 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3252 3253 return mem_cgroup_swappiness(memcg); 3254} 3255 3256static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, 3257 struct cftype *cft, u64 val) 3258{ 3259 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3260 3261 if (val > 100) 3262 return -EINVAL; 3263 3264 if (css->parent) 3265 memcg->swappiness = val; 3266 else 3267 vm_swappiness = val; 3268 3269 return 0; 3270} 3271 3272static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3273{ 3274 struct mem_cgroup_threshold_ary *t; 3275 unsigned long usage; 3276 int i; 3277 3278 rcu_read_lock(); 3279 if (!swap) 3280 t = rcu_dereference(memcg->thresholds.primary); 3281 else 3282 t = rcu_dereference(memcg->memsw_thresholds.primary); 3283 3284 if (!t) 3285 goto unlock; 3286 3287 usage = mem_cgroup_usage(memcg, swap); 3288 3289 /* 3290 * current_threshold points to threshold just below or equal to usage. 3291 * If it's not true, a threshold was crossed after last 3292 * call of __mem_cgroup_threshold(). 3293 */ 3294 i = t->current_threshold; 3295 3296 /* 3297 * Iterate backward over array of thresholds starting from 3298 * current_threshold and check if a threshold is crossed. 3299 * If none of thresholds below usage is crossed, we read 3300 * only one element of the array here. 3301 */ 3302 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) 3303 eventfd_signal(t->entries[i].eventfd, 1); 3304 3305 /* i = current_threshold + 1 */ 3306 i++; 3307 3308 /* 3309 * Iterate forward over array of thresholds starting from 3310 * current_threshold+1 and check if a threshold is crossed. 3311 * If none of thresholds above usage is crossed, we read 3312 * only one element of the array here. 3313 */ 3314 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) 3315 eventfd_signal(t->entries[i].eventfd, 1); 3316 3317 /* Update current_threshold */ 3318 t->current_threshold = i - 1; 3319unlock: 3320 rcu_read_unlock(); 3321} 3322 3323static void mem_cgroup_threshold(struct mem_cgroup *memcg) 3324{ 3325 while (memcg) { 3326 __mem_cgroup_threshold(memcg, false); 3327 if (do_swap_account) 3328 __mem_cgroup_threshold(memcg, true); 3329 3330 memcg = parent_mem_cgroup(memcg); 3331 } 3332} 3333 3334static int compare_thresholds(const void *a, const void *b) 3335{ 3336 const struct mem_cgroup_threshold *_a = a; 3337 const struct mem_cgroup_threshold *_b = b; 3338 3339 if (_a->threshold > _b->threshold) 3340 return 1; 3341 3342 if (_a->threshold < _b->threshold) 3343 return -1; 3344 3345 return 0; 3346} 3347 3348static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) 3349{ 3350 struct mem_cgroup_eventfd_list *ev; 3351 3352 spin_lock(&memcg_oom_lock); 3353 3354 list_for_each_entry(ev, &memcg->oom_notify, list) 3355 eventfd_signal(ev->eventfd, 1); 3356 3357 spin_unlock(&memcg_oom_lock); 3358 return 0; 3359} 3360 3361static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) 3362{ 3363 struct mem_cgroup *iter; 3364 3365 for_each_mem_cgroup_tree(iter, memcg) 3366 mem_cgroup_oom_notify_cb(iter); 3367} 3368 3369static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3370 struct eventfd_ctx *eventfd, const char *args, enum res_type type) 3371{ 3372 struct mem_cgroup_thresholds *thresholds; 3373 struct mem_cgroup_threshold_ary *new; 3374 unsigned long threshold; 3375 unsigned long usage; 3376 int i, size, ret; 3377 3378 ret = page_counter_memparse(args, "-1", &threshold); 3379 if (ret) 3380 return ret; 3381 3382 mutex_lock(&memcg->thresholds_lock); 3383 3384 if (type == _MEM) { 3385 thresholds = &memcg->thresholds; 3386 usage = mem_cgroup_usage(memcg, false); 3387 } else if (type == _MEMSWAP) { 3388 thresholds = &memcg->memsw_thresholds; 3389 usage = mem_cgroup_usage(memcg, true); 3390 } else 3391 BUG(); 3392 3393 /* Check if a threshold crossed before adding a new one */ 3394 if (thresholds->primary) 3395 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3396 3397 size = thresholds->primary ? thresholds->primary->size + 1 : 1; 3398 3399 /* Allocate memory for new array of thresholds */ 3400 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), 3401 GFP_KERNEL); 3402 if (!new) { 3403 ret = -ENOMEM; 3404 goto unlock; 3405 } 3406 new->size = size; 3407 3408 /* Copy thresholds (if any) to new array */ 3409 if (thresholds->primary) { 3410 memcpy(new->entries, thresholds->primary->entries, (size - 1) * 3411 sizeof(struct mem_cgroup_threshold)); 3412 } 3413 3414 /* Add new threshold */ 3415 new->entries[size - 1].eventfd = eventfd; 3416 new->entries[size - 1].threshold = threshold; 3417 3418 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3419 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), 3420 compare_thresholds, NULL); 3421 3422 /* Find current threshold */ 3423 new->current_threshold = -1; 3424 for (i = 0; i < size; i++) { 3425 if (new->entries[i].threshold <= usage) { 3426 /* 3427 * new->current_threshold will not be used until 3428 * rcu_assign_pointer(), so it's safe to increment 3429 * it here. 3430 */ 3431 ++new->current_threshold; 3432 } else 3433 break; 3434 } 3435 3436 /* Free old spare buffer and save old primary buffer as spare */ 3437 kfree(thresholds->spare); 3438 thresholds->spare = thresholds->primary; 3439 3440 rcu_assign_pointer(thresholds->primary, new); 3441 3442 /* To be sure that nobody uses thresholds */ 3443 synchronize_rcu(); 3444 3445unlock: 3446 mutex_unlock(&memcg->thresholds_lock); 3447 3448 return ret; 3449} 3450 3451static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, 3452 struct eventfd_ctx *eventfd, const char *args) 3453{ 3454 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); 3455} 3456 3457static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, 3458 struct eventfd_ctx *eventfd, const char *args) 3459{ 3460 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); 3461} 3462 3463static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3464 struct eventfd_ctx *eventfd, enum res_type type) 3465{ 3466 struct mem_cgroup_thresholds *thresholds; 3467 struct mem_cgroup_threshold_ary *new; 3468 unsigned long usage; 3469 int i, j, size; 3470 3471 mutex_lock(&memcg->thresholds_lock); 3472 3473 if (type == _MEM) { 3474 thresholds = &memcg->thresholds; 3475 usage = mem_cgroup_usage(memcg, false); 3476 } else if (type == _MEMSWAP) { 3477 thresholds = &memcg->memsw_thresholds; 3478 usage = mem_cgroup_usage(memcg, true); 3479 } else 3480 BUG(); 3481 3482 if (!thresholds->primary) 3483 goto unlock; 3484 3485 /* Check if a threshold crossed before removing */ 3486 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3487 3488 /* Calculate new number of threshold */ 3489 size = 0; 3490 for (i = 0; i < thresholds->primary->size; i++) { 3491 if (thresholds->primary->entries[i].eventfd != eventfd) 3492 size++; 3493 } 3494 3495 new = thresholds->spare; 3496 3497 /* Set thresholds array to NULL if we don't have thresholds */ 3498 if (!size) { 3499 kfree(new); 3500 new = NULL; 3501 goto swap_buffers; 3502 } 3503 3504 new->size = size; 3505 3506 /* Copy thresholds and find current threshold */ 3507 new->current_threshold = -1; 3508 for (i = 0, j = 0; i < thresholds->primary->size; i++) { 3509 if (thresholds->primary->entries[i].eventfd == eventfd) 3510 continue; 3511 3512 new->entries[j] = thresholds->primary->entries[i]; 3513 if (new->entries[j].threshold <= usage) { 3514 /* 3515 * new->current_threshold will not be used 3516 * until rcu_assign_pointer(), so it's safe to increment 3517 * it here. 3518 */ 3519 ++new->current_threshold; 3520 } 3521 j++; 3522 } 3523 3524swap_buffers: 3525 /* Swap primary and spare array */ 3526 thresholds->spare = thresholds->primary; 3527 3528 rcu_assign_pointer(thresholds->primary, new); 3529 3530 /* To be sure that nobody uses thresholds */ 3531 synchronize_rcu(); 3532 3533 /* If all events are unregistered, free the spare array */ 3534 if (!new) { 3535 kfree(thresholds->spare); 3536 thresholds->spare = NULL; 3537 } 3538unlock: 3539 mutex_unlock(&memcg->thresholds_lock); 3540} 3541 3542static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3543 struct eventfd_ctx *eventfd) 3544{ 3545 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); 3546} 3547 3548static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, 3549 struct eventfd_ctx *eventfd) 3550{ 3551 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); 3552} 3553 3554static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, 3555 struct eventfd_ctx *eventfd, const char *args) 3556{ 3557 struct mem_cgroup_eventfd_list *event; 3558 3559 event = kmalloc(sizeof(*event), GFP_KERNEL); 3560 if (!event) 3561 return -ENOMEM; 3562 3563 spin_lock(&memcg_oom_lock); 3564 3565 event->eventfd = eventfd; 3566 list_add(&event->list, &memcg->oom_notify); 3567 3568 /* already in OOM ? */ 3569 if (memcg->under_oom) 3570 eventfd_signal(eventfd, 1); 3571 spin_unlock(&memcg_oom_lock); 3572 3573 return 0; 3574} 3575 3576static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, 3577 struct eventfd_ctx *eventfd) 3578{ 3579 struct mem_cgroup_eventfd_list *ev, *tmp; 3580 3581 spin_lock(&memcg_oom_lock); 3582 3583 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { 3584 if (ev->eventfd == eventfd) { 3585 list_del(&ev->list); 3586 kfree(ev); 3587 } 3588 } 3589 3590 spin_unlock(&memcg_oom_lock); 3591} 3592 3593static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) 3594{ 3595 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); 3596 3597 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3598 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3599 return 0; 3600} 3601 3602static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, 3603 struct cftype *cft, u64 val) 3604{ 3605 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3606 3607 /* cannot set to root cgroup and only 0 and 1 are allowed */ 3608 if (!css->parent || !((val == 0) || (val == 1))) 3609 return -EINVAL; 3610 3611 memcg->oom_kill_disable = val; 3612 if (!val) 3613 memcg_oom_recover(memcg); 3614 3615 return 0; 3616} 3617 3618#ifdef CONFIG_MEMCG_KMEM 3619static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3620{ 3621 int ret; 3622 3623 ret = memcg_propagate_kmem(memcg); 3624 if (ret) 3625 return ret; 3626 3627 return mem_cgroup_sockets_init(memcg, ss); 3628} 3629 3630static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3631{ 3632 struct cgroup_subsys_state *css; 3633 struct mem_cgroup *parent, *child; 3634 int kmemcg_id; 3635 3636 if (!memcg->kmem_acct_active) 3637 return; 3638 3639 /* 3640 * Clear the 'active' flag before clearing memcg_caches arrays entries. 3641 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it 3642 * guarantees no cache will be created for this cgroup after we are 3643 * done (see memcg_create_kmem_cache()). 3644 */ 3645 memcg->kmem_acct_active = false; 3646 3647 memcg_deactivate_kmem_caches(memcg); 3648 3649 kmemcg_id = memcg->kmemcg_id; 3650 BUG_ON(kmemcg_id < 0); 3651 3652 parent = parent_mem_cgroup(memcg); 3653 if (!parent) 3654 parent = root_mem_cgroup; 3655 3656 /* 3657 * Change kmemcg_id of this cgroup and all its descendants to the 3658 * parent's id, and then move all entries from this cgroup's list_lrus 3659 * to ones of the parent. After we have finished, all list_lrus 3660 * corresponding to this cgroup are guaranteed to remain empty. The 3661 * ordering is imposed by list_lru_node->lock taken by 3662 * memcg_drain_all_list_lrus(). 3663 */ 3664 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ 3665 css_for_each_descendant_pre(css, &memcg->css) { 3666 child = mem_cgroup_from_css(css); 3667 BUG_ON(child->kmemcg_id != kmemcg_id); 3668 child->kmemcg_id = parent->kmemcg_id; 3669 if (!memcg->use_hierarchy) 3670 break; 3671 } 3672 rcu_read_unlock(); 3673 3674 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); 3675 3676 memcg_free_cache_id(kmemcg_id); 3677} 3678 3679static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3680{ 3681 if (memcg->kmem_acct_activated) { 3682 memcg_destroy_kmem_caches(memcg); 3683 static_key_slow_dec(&memcg_kmem_enabled_key); 3684 WARN_ON(page_counter_read(&memcg->kmem)); 3685 } 3686 mem_cgroup_sockets_destroy(memcg); 3687} 3688#else 3689static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 3690{ 3691 return 0; 3692} 3693 3694static void memcg_deactivate_kmem(struct mem_cgroup *memcg) 3695{ 3696} 3697 3698static void memcg_destroy_kmem(struct mem_cgroup *memcg) 3699{ 3700} 3701#endif 3702 3703#ifdef CONFIG_CGROUP_WRITEBACK 3704 3705struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg) 3706{ 3707 return &memcg->cgwb_list; 3708} 3709 3710static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3711{ 3712 return wb_domain_init(&memcg->cgwb_domain, gfp); 3713} 3714 3715static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3716{ 3717 wb_domain_exit(&memcg->cgwb_domain); 3718} 3719 3720static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3721{ 3722 wb_domain_size_changed(&memcg->cgwb_domain); 3723} 3724 3725struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 3726{ 3727 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3728 3729 if (!memcg->css.parent) 3730 return NULL; 3731 3732 return &memcg->cgwb_domain; 3733} 3734 3735/** 3736 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg 3737 * @wb: bdi_writeback in question 3738 * @pfilepages: out parameter for number of file pages 3739 * @pheadroom: out parameter for number of allocatable pages according to memcg 3740 * @pdirty: out parameter for number of dirty pages 3741 * @pwriteback: out parameter for number of pages under writeback 3742 * 3743 * Determine the numbers of file, headroom, dirty, and writeback pages in 3744 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom 3745 * is a bit more involved. 3746 * 3747 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the 3748 * headroom is calculated as the lowest headroom of itself and the 3749 * ancestors. Note that this doesn't consider the actual amount of 3750 * available memory in the system. The caller should further cap 3751 * *@pheadroom accordingly. 3752 */ 3753void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 3754 unsigned long *pheadroom, unsigned long *pdirty, 3755 unsigned long *pwriteback) 3756{ 3757 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); 3758 struct mem_cgroup *parent; 3759 3760 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); 3761 3762 /* this should eventually include NR_UNSTABLE_NFS */ 3763 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); 3764 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | 3765 (1 << LRU_ACTIVE_FILE)); 3766 *pheadroom = PAGE_COUNTER_MAX; 3767 3768 while ((parent = parent_mem_cgroup(memcg))) { 3769 unsigned long ceiling = min(memcg->memory.limit, memcg->high); 3770 unsigned long used = page_counter_read(&memcg->memory); 3771 3772 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); 3773 memcg = parent; 3774 } 3775} 3776 3777#else /* CONFIG_CGROUP_WRITEBACK */ 3778 3779static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) 3780{ 3781 return 0; 3782} 3783 3784static void memcg_wb_domain_exit(struct mem_cgroup *memcg) 3785{ 3786} 3787 3788static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) 3789{ 3790} 3791 3792#endif /* CONFIG_CGROUP_WRITEBACK */ 3793 3794/* 3795 * DO NOT USE IN NEW FILES. 3796 * 3797 * "cgroup.event_control" implementation. 3798 * 3799 * This is way over-engineered. It tries to support fully configurable 3800 * events for each user. Such level of flexibility is completely 3801 * unnecessary especially in the light of the planned unified hierarchy. 3802 * 3803 * Please deprecate this and replace with something simpler if at all 3804 * possible. 3805 */ 3806 3807/* 3808 * Unregister event and free resources. 3809 * 3810 * Gets called from workqueue. 3811 */ 3812static void memcg_event_remove(struct work_struct *work) 3813{ 3814 struct mem_cgroup_event *event = 3815 container_of(work, struct mem_cgroup_event, remove); 3816 struct mem_cgroup *memcg = event->memcg; 3817 3818 remove_wait_queue(event->wqh, &event->wait); 3819 3820 event->unregister_event(memcg, event->eventfd); 3821 3822 /* Notify userspace the event is going away. */ 3823 eventfd_signal(event->eventfd, 1); 3824 3825 eventfd_ctx_put(event->eventfd); 3826 kfree(event); 3827 css_put(&memcg->css); 3828} 3829 3830/* 3831 * Gets called on POLLHUP on eventfd when user closes it. 3832 * 3833 * Called with wqh->lock held and interrupts disabled. 3834 */ 3835static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3836 int sync, void *key) 3837{ 3838 struct mem_cgroup_event *event = 3839 container_of(wait, struct mem_cgroup_event, wait); 3840 struct mem_cgroup *memcg = event->memcg; 3841 unsigned long flags = (unsigned long)key; 3842 3843 if (flags & POLLHUP) { 3844 /* 3845 * If the event has been detached at cgroup removal, we 3846 * can simply return knowing the other side will cleanup 3847 * for us. 3848 * 3849 * We can't race against event freeing since the other 3850 * side will require wqh->lock via remove_wait_queue(), 3851 * which we hold. 3852 */ 3853 spin_lock(&memcg->event_list_lock); 3854 if (!list_empty(&event->list)) { 3855 list_del_init(&event->list); 3856 /* 3857 * We are in atomic context, but cgroup_event_remove() 3858 * may sleep, so we have to call it in workqueue. 3859 */ 3860 schedule_work(&event->remove); 3861 } 3862 spin_unlock(&memcg->event_list_lock); 3863 } 3864 3865 return 0; 3866} 3867 3868static void memcg_event_ptable_queue_proc(struct file *file, 3869 wait_queue_head_t *wqh, poll_table *pt) 3870{ 3871 struct mem_cgroup_event *event = 3872 container_of(pt, struct mem_cgroup_event, pt); 3873 3874 event->wqh = wqh; 3875 add_wait_queue(wqh, &event->wait); 3876} 3877 3878/* 3879 * DO NOT USE IN NEW FILES. 3880 * 3881 * Parse input and register new cgroup event handler. 3882 * 3883 * Input must be in format '<event_fd> <control_fd> <args>'. 3884 * Interpretation of args is defined by control file implementation. 3885 */ 3886static ssize_t memcg_write_event_control(struct kernfs_open_file *of, 3887 char *buf, size_t nbytes, loff_t off) 3888{ 3889 struct cgroup_subsys_state *css = of_css(of); 3890 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 3891 struct mem_cgroup_event *event; 3892 struct cgroup_subsys_state *cfile_css; 3893 unsigned int efd, cfd; 3894 struct fd efile; 3895 struct fd cfile; 3896 const char *name; 3897 char *endp; 3898 int ret; 3899 3900 buf = strstrip(buf); 3901 3902 efd = simple_strtoul(buf, &endp, 10); 3903 if (*endp != ' ') 3904 return -EINVAL; 3905 buf = endp + 1; 3906 3907 cfd = simple_strtoul(buf, &endp, 10); 3908 if ((*endp != ' ') && (*endp != '\0')) 3909 return -EINVAL; 3910 buf = endp + 1; 3911 3912 event = kzalloc(sizeof(*event), GFP_KERNEL); 3913 if (!event) 3914 return -ENOMEM; 3915 3916 event->memcg = memcg; 3917 INIT_LIST_HEAD(&event->list); 3918 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); 3919 init_waitqueue_func_entry(&event->wait, memcg_event_wake); 3920 INIT_WORK(&event->remove, memcg_event_remove); 3921 3922 efile = fdget(efd); 3923 if (!efile.file) { 3924 ret = -EBADF; 3925 goto out_kfree; 3926 } 3927 3928 event->eventfd = eventfd_ctx_fileget(efile.file); 3929 if (IS_ERR(event->eventfd)) { 3930 ret = PTR_ERR(event->eventfd); 3931 goto out_put_efile; 3932 } 3933 3934 cfile = fdget(cfd); 3935 if (!cfile.file) { 3936 ret = -EBADF; 3937 goto out_put_eventfd; 3938 } 3939 3940 /* the process need read permission on control file */ 3941 /* AV: shouldn't we check that it's been opened for read instead? */ 3942 ret = inode_permission(file_inode(cfile.file), MAY_READ); 3943 if (ret < 0) 3944 goto out_put_cfile; 3945 3946 /* 3947 * Determine the event callbacks and set them in @event. This used 3948 * to be done via struct cftype but cgroup core no longer knows 3949 * about these events. The following is crude but the whole thing 3950 * is for compatibility anyway. 3951 * 3952 * DO NOT ADD NEW FILES. 3953 */ 3954 name = cfile.file->f_path.dentry->d_name.name; 3955 3956 if (!strcmp(name, "memory.usage_in_bytes")) { 3957 event->register_event = mem_cgroup_usage_register_event; 3958 event->unregister_event = mem_cgroup_usage_unregister_event; 3959 } else if (!strcmp(name, "memory.oom_control")) { 3960 event->register_event = mem_cgroup_oom_register_event; 3961 event->unregister_event = mem_cgroup_oom_unregister_event; 3962 } else if (!strcmp(name, "memory.pressure_level")) { 3963 event->register_event = vmpressure_register_event; 3964 event->unregister_event = vmpressure_unregister_event; 3965 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { 3966 event->register_event = memsw_cgroup_usage_register_event; 3967 event->unregister_event = memsw_cgroup_usage_unregister_event; 3968 } else { 3969 ret = -EINVAL; 3970 goto out_put_cfile; 3971 } 3972 3973 /* 3974 * Verify @cfile should belong to @css. Also, remaining events are 3975 * automatically removed on cgroup destruction but the removal is 3976 * asynchronous, so take an extra ref on @css. 3977 */ 3978 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, 3979 &memory_cgrp_subsys); 3980 ret = -EINVAL; 3981 if (IS_ERR(cfile_css)) 3982 goto out_put_cfile; 3983 if (cfile_css != css) { 3984 css_put(cfile_css); 3985 goto out_put_cfile; 3986 } 3987 3988 ret = event->register_event(memcg, event->eventfd, buf); 3989 if (ret) 3990 goto out_put_css; 3991 3992 efile.file->f_op->poll(efile.file, &event->pt); 3993 3994 spin_lock(&memcg->event_list_lock); 3995 list_add(&event->list, &memcg->event_list); 3996 spin_unlock(&memcg->event_list_lock); 3997 3998 fdput(cfile); 3999 fdput(efile); 4000 4001 return nbytes; 4002 4003out_put_css: 4004 css_put(css); 4005out_put_cfile: 4006 fdput(cfile); 4007out_put_eventfd: 4008 eventfd_ctx_put(event->eventfd); 4009out_put_efile: 4010 fdput(efile); 4011out_kfree: 4012 kfree(event); 4013 4014 return ret; 4015} 4016 4017static struct cftype mem_cgroup_legacy_files[] = { 4018 { 4019 .name = "usage_in_bytes", 4020 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 4021 .read_u64 = mem_cgroup_read_u64, 4022 }, 4023 { 4024 .name = "max_usage_in_bytes", 4025 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), 4026 .write = mem_cgroup_reset, 4027 .read_u64 = mem_cgroup_read_u64, 4028 }, 4029 { 4030 .name = "limit_in_bytes", 4031 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), 4032 .write = mem_cgroup_write, 4033 .read_u64 = mem_cgroup_read_u64, 4034 }, 4035 { 4036 .name = "soft_limit_in_bytes", 4037 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), 4038 .write = mem_cgroup_write, 4039 .read_u64 = mem_cgroup_read_u64, 4040 }, 4041 { 4042 .name = "failcnt", 4043 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), 4044 .write = mem_cgroup_reset, 4045 .read_u64 = mem_cgroup_read_u64, 4046 }, 4047 { 4048 .name = "stat", 4049 .seq_show = memcg_stat_show, 4050 }, 4051 { 4052 .name = "force_empty", 4053 .write = mem_cgroup_force_empty_write, 4054 }, 4055 { 4056 .name = "use_hierarchy", 4057 .write_u64 = mem_cgroup_hierarchy_write, 4058 .read_u64 = mem_cgroup_hierarchy_read, 4059 }, 4060 { 4061 .name = "cgroup.event_control", /* XXX: for compat */ 4062 .write = memcg_write_event_control, 4063 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE, 4064 }, 4065 { 4066 .name = "swappiness", 4067 .read_u64 = mem_cgroup_swappiness_read, 4068 .write_u64 = mem_cgroup_swappiness_write, 4069 }, 4070 { 4071 .name = "move_charge_at_immigrate", 4072 .read_u64 = mem_cgroup_move_charge_read, 4073 .write_u64 = mem_cgroup_move_charge_write, 4074 }, 4075 { 4076 .name = "oom_control", 4077 .seq_show = mem_cgroup_oom_control_read, 4078 .write_u64 = mem_cgroup_oom_control_write, 4079 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4080 }, 4081 { 4082 .name = "pressure_level", 4083 }, 4084#ifdef CONFIG_NUMA 4085 { 4086 .name = "numa_stat", 4087 .seq_show = memcg_numa_stat_show, 4088 }, 4089#endif 4090#ifdef CONFIG_MEMCG_KMEM 4091 { 4092 .name = "kmem.limit_in_bytes", 4093 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 4094 .write = mem_cgroup_write, 4095 .read_u64 = mem_cgroup_read_u64, 4096 }, 4097 { 4098 .name = "kmem.usage_in_bytes", 4099 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE), 4100 .read_u64 = mem_cgroup_read_u64, 4101 }, 4102 { 4103 .name = "kmem.failcnt", 4104 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT), 4105 .write = mem_cgroup_reset, 4106 .read_u64 = mem_cgroup_read_u64, 4107 }, 4108 { 4109 .name = "kmem.max_usage_in_bytes", 4110 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE), 4111 .write = mem_cgroup_reset, 4112 .read_u64 = mem_cgroup_read_u64, 4113 }, 4114#ifdef CONFIG_SLABINFO 4115 { 4116 .name = "kmem.slabinfo", 4117 .seq_start = slab_start, 4118 .seq_next = slab_next, 4119 .seq_stop = slab_stop, 4120 .seq_show = memcg_slab_show, 4121 }, 4122#endif 4123#endif 4124 { }, /* terminate */ 4125}; 4126 4127static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4128{ 4129 struct mem_cgroup_per_node *pn; 4130 struct mem_cgroup_per_zone *mz; 4131 int zone, tmp = node; 4132 /* 4133 * This routine is called against possible nodes. 4134 * But it's BUG to call kmalloc() against offline node. 4135 * 4136 * TODO: this routine can waste much memory for nodes which will 4137 * never be onlined. It's better to use memory hotplug callback 4138 * function. 4139 */ 4140 if (!node_state(node, N_NORMAL_MEMORY)) 4141 tmp = -1; 4142 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); 4143 if (!pn) 4144 return 1; 4145 4146 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4147 mz = &pn->zoneinfo[zone]; 4148 lruvec_init(&mz->lruvec); 4149 mz->usage_in_excess = 0; 4150 mz->on_tree = false; 4151 mz->memcg = memcg; 4152 } 4153 memcg->nodeinfo[node] = pn; 4154 return 0; 4155} 4156 4157static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) 4158{ 4159 kfree(memcg->nodeinfo[node]); 4160} 4161 4162static struct mem_cgroup *mem_cgroup_alloc(void) 4163{ 4164 struct mem_cgroup *memcg; 4165 size_t size; 4166 4167 size = sizeof(struct mem_cgroup); 4168 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); 4169 4170 memcg = kzalloc(size, GFP_KERNEL); 4171 if (!memcg) 4172 return NULL; 4173 4174 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4175 if (!memcg->stat) 4176 goto out_free; 4177 4178 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) 4179 goto out_free_stat; 4180 4181 return memcg; 4182 4183out_free_stat: 4184 free_percpu(memcg->stat); 4185out_free: 4186 kfree(memcg); 4187 return NULL; 4188} 4189 4190/* 4191 * At destroying mem_cgroup, references from swap_cgroup can remain. 4192 * (scanning all at force_empty is too costly...) 4193 * 4194 * Instead of clearing all references at force_empty, we remember 4195 * the number of reference from swap_cgroup and free mem_cgroup when 4196 * it goes down to 0. 4197 * 4198 * Removal of cgroup itself succeeds regardless of refs from swap. 4199 */ 4200 4201static void __mem_cgroup_free(struct mem_cgroup *memcg) 4202{ 4203 int node; 4204 4205 mem_cgroup_remove_from_trees(memcg); 4206 4207 for_each_node(node) 4208 free_mem_cgroup_per_zone_info(memcg, node); 4209 4210 free_percpu(memcg->stat); 4211 memcg_wb_domain_exit(memcg); 4212 kfree(memcg); 4213} 4214 4215/* 4216 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. 4217 */ 4218struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 4219{ 4220 if (!memcg->memory.parent) 4221 return NULL; 4222 return mem_cgroup_from_counter(memcg->memory.parent, memory); 4223} 4224EXPORT_SYMBOL(parent_mem_cgroup); 4225 4226static struct cgroup_subsys_state * __ref 4227mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4228{ 4229 struct mem_cgroup *memcg; 4230 long error = -ENOMEM; 4231 int node; 4232 4233 memcg = mem_cgroup_alloc(); 4234 if (!memcg) 4235 return ERR_PTR(error); 4236 4237 for_each_node(node) 4238 if (alloc_mem_cgroup_per_zone_info(memcg, node)) 4239 goto free_out; 4240 4241 /* root ? */ 4242 if (parent_css == NULL) { 4243 root_mem_cgroup = memcg; 4244 mem_cgroup_root_css = &memcg->css; 4245 page_counter_init(&memcg->memory, NULL); 4246 memcg->high = PAGE_COUNTER_MAX; 4247 memcg->soft_limit = PAGE_COUNTER_MAX; 4248 page_counter_init(&memcg->memsw, NULL); 4249 page_counter_init(&memcg->kmem, NULL); 4250 } 4251 4252 memcg->last_scanned_node = MAX_NUMNODES; 4253 INIT_LIST_HEAD(&memcg->oom_notify); 4254 memcg->move_charge_at_immigrate = 0; 4255 mutex_init(&memcg->thresholds_lock); 4256 spin_lock_init(&memcg->move_lock); 4257 vmpressure_init(&memcg->vmpressure); 4258 INIT_LIST_HEAD(&memcg->event_list); 4259 spin_lock_init(&memcg->event_list_lock); 4260#ifdef CONFIG_MEMCG_KMEM 4261 memcg->kmemcg_id = -1; 4262#endif 4263#ifdef CONFIG_CGROUP_WRITEBACK 4264 INIT_LIST_HEAD(&memcg->cgwb_list); 4265#endif 4266 return &memcg->css; 4267 4268free_out: 4269 __mem_cgroup_free(memcg); 4270 return ERR_PTR(error); 4271} 4272 4273static int 4274mem_cgroup_css_online(struct cgroup_subsys_state *css) 4275{ 4276 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4277 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 4278 int ret; 4279 4280 if (css->id > MEM_CGROUP_ID_MAX) 4281 return -ENOSPC; 4282 4283 if (!parent) 4284 return 0; 4285 4286 mutex_lock(&memcg_create_mutex); 4287 4288 memcg->use_hierarchy = parent->use_hierarchy; 4289 memcg->oom_kill_disable = parent->oom_kill_disable; 4290 memcg->swappiness = mem_cgroup_swappiness(parent); 4291 4292 if (parent->use_hierarchy) { 4293 page_counter_init(&memcg->memory, &parent->memory); 4294 memcg->high = PAGE_COUNTER_MAX; 4295 memcg->soft_limit = PAGE_COUNTER_MAX; 4296 page_counter_init(&memcg->memsw, &parent->memsw); 4297 page_counter_init(&memcg->kmem, &parent->kmem); 4298 4299 /* 4300 * No need to take a reference to the parent because cgroup 4301 * core guarantees its existence. 4302 */ 4303 } else { 4304 page_counter_init(&memcg->memory, NULL); 4305 memcg->high = PAGE_COUNTER_MAX; 4306 memcg->soft_limit = PAGE_COUNTER_MAX; 4307 page_counter_init(&memcg->memsw, NULL); 4308 page_counter_init(&memcg->kmem, NULL); 4309 /* 4310 * Deeper hierachy with use_hierarchy == false doesn't make 4311 * much sense so let cgroup subsystem know about this 4312 * unfortunate state in our controller. 4313 */ 4314 if (parent != root_mem_cgroup) 4315 memory_cgrp_subsys.broken_hierarchy = true; 4316 } 4317 mutex_unlock(&memcg_create_mutex); 4318 4319 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); 4320 if (ret) 4321 return ret; 4322 4323 /* 4324 * Make sure the memcg is initialized: mem_cgroup_iter() 4325 * orders reading memcg->initialized against its callers 4326 * reading the memcg members. 4327 */ 4328 smp_store_release(&memcg->initialized, 1); 4329 4330 return 0; 4331} 4332 4333static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) 4334{ 4335 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4336 struct mem_cgroup_event *event, *tmp; 4337 4338 /* 4339 * Unregister events and notify userspace. 4340 * Notify userspace about cgroup removing only after rmdir of cgroup 4341 * directory to avoid race between userspace and kernelspace. 4342 */ 4343 spin_lock(&memcg->event_list_lock); 4344 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { 4345 list_del_init(&event->list); 4346 schedule_work(&event->remove); 4347 } 4348 spin_unlock(&memcg->event_list_lock); 4349 4350 vmpressure_cleanup(&memcg->vmpressure); 4351 4352 memcg_deactivate_kmem(memcg); 4353 4354 wb_memcg_offline(memcg); 4355} 4356 4357static void mem_cgroup_css_released(struct cgroup_subsys_state *css) 4358{ 4359 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4360 4361 invalidate_reclaim_iterators(memcg); 4362} 4363 4364static void mem_cgroup_css_free(struct cgroup_subsys_state *css) 4365{ 4366 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4367 4368 memcg_destroy_kmem(memcg); 4369 __mem_cgroup_free(memcg); 4370} 4371 4372/** 4373 * mem_cgroup_css_reset - reset the states of a mem_cgroup 4374 * @css: the target css 4375 * 4376 * Reset the states of the mem_cgroup associated with @css. This is 4377 * invoked when the userland requests disabling on the default hierarchy 4378 * but the memcg is pinned through dependency. The memcg should stop 4379 * applying policies and should revert to the vanilla state as it may be 4380 * made visible again. 4381 * 4382 * The current implementation only resets the essential configurations. 4383 * This needs to be expanded to cover all the visible parts. 4384 */ 4385static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) 4386{ 4387 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4388 4389 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4390 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4391 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4392 memcg->low = 0; 4393 memcg->high = PAGE_COUNTER_MAX; 4394 memcg->soft_limit = PAGE_COUNTER_MAX; 4395 memcg_wb_domain_size_changed(memcg); 4396} 4397 4398#ifdef CONFIG_MMU 4399/* Handlers for move charge at task migration. */ 4400static int mem_cgroup_do_precharge(unsigned long count) 4401{ 4402 int ret; 4403 4404 /* Try a single bulk charge without reclaim first, kswapd may wake */ 4405 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count); 4406 if (!ret) { 4407 mc.precharge += count; 4408 return ret; 4409 } 4410 4411 /* Try charges one by one with reclaim */ 4412 while (count--) { 4413 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4414 if (ret) 4415 return ret; 4416 mc.precharge++; 4417 cond_resched(); 4418 } 4419 return 0; 4420} 4421 4422/** 4423 * get_mctgt_type - get target type of moving charge 4424 * @vma: the vma the pte to be checked belongs 4425 * @addr: the address corresponding to the pte to be checked 4426 * @ptent: the pte to be checked 4427 * @target: the pointer the target page or swap ent will be stored(can be NULL) 4428 * 4429 * Returns 4430 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 4431 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 4432 * move charge. if @target is not NULL, the page is stored in target->page 4433 * with extra refcnt got(Callers should handle it). 4434 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 4435 * target for charge migration. if @target is not NULL, the entry is stored 4436 * in target->ent. 4437 * 4438 * Called with pte lock held. 4439 */ 4440union mc_target { 4441 struct page *page; 4442 swp_entry_t ent; 4443}; 4444 4445enum mc_target_type { 4446 MC_TARGET_NONE = 0, 4447 MC_TARGET_PAGE, 4448 MC_TARGET_SWAP, 4449}; 4450 4451static struct page *mc_handle_present_pte(struct vm_area_struct *vma, 4452 unsigned long addr, pte_t ptent) 4453{ 4454 struct page *page = vm_normal_page(vma, addr, ptent); 4455 4456 if (!page || !page_mapped(page)) 4457 return NULL; 4458 if (PageAnon(page)) { 4459 if (!(mc.flags & MOVE_ANON)) 4460 return NULL; 4461 } else { 4462 if (!(mc.flags & MOVE_FILE)) 4463 return NULL; 4464 } 4465 if (!get_page_unless_zero(page)) 4466 return NULL; 4467 4468 return page; 4469} 4470 4471#ifdef CONFIG_SWAP 4472static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4473 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4474{ 4475 struct page *page = NULL; 4476 swp_entry_t ent = pte_to_swp_entry(ptent); 4477 4478 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) 4479 return NULL; 4480 /* 4481 * Because lookup_swap_cache() updates some statistics counter, 4482 * we call find_get_page() with swapper_space directly. 4483 */ 4484 page = find_get_page(swap_address_space(ent), ent.val); 4485 if (do_swap_account) 4486 entry->val = ent.val; 4487 4488 return page; 4489} 4490#else 4491static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 4492 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4493{ 4494 return NULL; 4495} 4496#endif 4497 4498static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 4499 unsigned long addr, pte_t ptent, swp_entry_t *entry) 4500{ 4501 struct page *page = NULL; 4502 struct address_space *mapping; 4503 pgoff_t pgoff; 4504 4505 if (!vma->vm_file) /* anonymous vma */ 4506 return NULL; 4507 if (!(mc.flags & MOVE_FILE)) 4508 return NULL; 4509 4510 mapping = vma->vm_file->f_mapping; 4511 pgoff = linear_page_index(vma, addr); 4512 4513 /* page is moved even if it's not RSS of this task(page-faulted). */ 4514#ifdef CONFIG_SWAP 4515 /* shmem/tmpfs may report page out on swap: account for that too. */ 4516 if (shmem_mapping(mapping)) { 4517 page = find_get_entry(mapping, pgoff); 4518 if (radix_tree_exceptional_entry(page)) { 4519 swp_entry_t swp = radix_to_swp_entry(page); 4520 if (do_swap_account) 4521 *entry = swp; 4522 page = find_get_page(swap_address_space(swp), swp.val); 4523 } 4524 } else 4525 page = find_get_page(mapping, pgoff); 4526#else 4527 page = find_get_page(mapping, pgoff); 4528#endif 4529 return page; 4530} 4531 4532/** 4533 * mem_cgroup_move_account - move account of the page 4534 * @page: the page 4535 * @nr_pages: number of regular pages (>1 for huge pages) 4536 * @from: mem_cgroup which the page is moved from. 4537 * @to: mem_cgroup which the page is moved to. @from != @to. 4538 * 4539 * The caller must confirm following. 4540 * - page is not on LRU (isolate_page() is useful.) 4541 * - compound_lock is held when nr_pages > 1 4542 * 4543 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge" 4544 * from old cgroup. 4545 */ 4546static int mem_cgroup_move_account(struct page *page, 4547 unsigned int nr_pages, 4548 struct mem_cgroup *from, 4549 struct mem_cgroup *to) 4550{ 4551 unsigned long flags; 4552 int ret; 4553 bool anon; 4554 4555 VM_BUG_ON(from == to); 4556 VM_BUG_ON_PAGE(PageLRU(page), page); 4557 /* 4558 * The page is isolated from LRU. So, collapse function 4559 * will not handle this page. But page splitting can happen. 4560 * Do this check under compound_page_lock(). The caller should 4561 * hold it. 4562 */ 4563 ret = -EBUSY; 4564 if (nr_pages > 1 && !PageTransHuge(page)) 4565 goto out; 4566 4567 /* 4568 * Prevent mem_cgroup_replace_page() from looking at 4569 * page->mem_cgroup of its source page while we change it. 4570 */ 4571 if (!trylock_page(page)) 4572 goto out; 4573 4574 ret = -EINVAL; 4575 if (page->mem_cgroup != from) 4576 goto out_unlock; 4577 4578 anon = PageAnon(page); 4579 4580 spin_lock_irqsave(&from->move_lock, flags); 4581 4582 if (!anon && page_mapped(page)) { 4583 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4584 nr_pages); 4585 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], 4586 nr_pages); 4587 } 4588 4589 /* 4590 * move_lock grabbed above and caller set from->moving_account, so 4591 * mem_cgroup_update_page_stat() will serialize updates to PageDirty. 4592 * So mapping should be stable for dirty pages. 4593 */ 4594 if (!anon && PageDirty(page)) { 4595 struct address_space *mapping = page_mapping(page); 4596 4597 if (mapping_cap_account_dirty(mapping)) { 4598 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY], 4599 nr_pages); 4600 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY], 4601 nr_pages); 4602 } 4603 } 4604 4605 if (PageWriteback(page)) { 4606 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4607 nr_pages); 4608 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], 4609 nr_pages); 4610 } 4611 4612 /* 4613 * It is safe to change page->mem_cgroup here because the page 4614 * is referenced, charged, and isolated - we can't race with 4615 * uncharging, charging, migration, or LRU putback. 4616 */ 4617 4618 /* caller should have done css_get */ 4619 page->mem_cgroup = to; 4620 spin_unlock_irqrestore(&from->move_lock, flags); 4621 4622 ret = 0; 4623 4624 local_irq_disable(); 4625 mem_cgroup_charge_statistics(to, page, nr_pages); 4626 memcg_check_events(to, page); 4627 mem_cgroup_charge_statistics(from, page, -nr_pages); 4628 memcg_check_events(from, page); 4629 local_irq_enable(); 4630out_unlock: 4631 unlock_page(page); 4632out: 4633 return ret; 4634} 4635 4636static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4637 unsigned long addr, pte_t ptent, union mc_target *target) 4638{ 4639 struct page *page = NULL; 4640 enum mc_target_type ret = MC_TARGET_NONE; 4641 swp_entry_t ent = { .val = 0 }; 4642 4643 if (pte_present(ptent)) 4644 page = mc_handle_present_pte(vma, addr, ptent); 4645 else if (is_swap_pte(ptent)) 4646 page = mc_handle_swap_pte(vma, addr, ptent, &ent); 4647 else if (pte_none(ptent)) 4648 page = mc_handle_file_pte(vma, addr, ptent, &ent); 4649 4650 if (!page && !ent.val) 4651 return ret; 4652 if (page) { 4653 /* 4654 * Do only loose check w/o serialization. 4655 * mem_cgroup_move_account() checks the page is valid or 4656 * not under LRU exclusion. 4657 */ 4658 if (page->mem_cgroup == mc.from) { 4659 ret = MC_TARGET_PAGE; 4660 if (target) 4661 target->page = page; 4662 } 4663 if (!ret || !target) 4664 put_page(page); 4665 } 4666 /* There is a swap entry and a page doesn't exist or isn't charged */ 4667 if (ent.val && !ret && 4668 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { 4669 ret = MC_TARGET_SWAP; 4670 if (target) 4671 target->ent = ent; 4672 } 4673 return ret; 4674} 4675 4676#ifdef CONFIG_TRANSPARENT_HUGEPAGE 4677/* 4678 * We don't consider swapping or file mapped pages because THP does not 4679 * support them for now. 4680 * Caller should make sure that pmd_trans_huge(pmd) is true. 4681 */ 4682static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4683 unsigned long addr, pmd_t pmd, union mc_target *target) 4684{ 4685 struct page *page = NULL; 4686 enum mc_target_type ret = MC_TARGET_NONE; 4687 4688 page = pmd_page(pmd); 4689 VM_BUG_ON_PAGE(!page || !PageHead(page), page); 4690 if (!(mc.flags & MOVE_ANON)) 4691 return ret; 4692 if (page->mem_cgroup == mc.from) { 4693 ret = MC_TARGET_PAGE; 4694 if (target) { 4695 get_page(page); 4696 target->page = page; 4697 } 4698 } 4699 return ret; 4700} 4701#else 4702static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, 4703 unsigned long addr, pmd_t pmd, union mc_target *target) 4704{ 4705 return MC_TARGET_NONE; 4706} 4707#endif 4708 4709static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, 4710 unsigned long addr, unsigned long end, 4711 struct mm_walk *walk) 4712{ 4713 struct vm_area_struct *vma = walk->vma; 4714 pte_t *pte; 4715 spinlock_t *ptl; 4716 4717 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4718 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4719 mc.precharge += HPAGE_PMD_NR; 4720 spin_unlock(ptl); 4721 return 0; 4722 } 4723 4724 if (pmd_trans_unstable(pmd)) 4725 return 0; 4726 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4727 for (; addr != end; pte++, addr += PAGE_SIZE) 4728 if (get_mctgt_type(vma, addr, *pte, NULL)) 4729 mc.precharge++; /* increment precharge temporarily */ 4730 pte_unmap_unlock(pte - 1, ptl); 4731 cond_resched(); 4732 4733 return 0; 4734} 4735 4736static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) 4737{ 4738 unsigned long precharge; 4739 4740 struct mm_walk mem_cgroup_count_precharge_walk = { 4741 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4742 .mm = mm, 4743 }; 4744 down_read(&mm->mmap_sem); 4745 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk); 4746 up_read(&mm->mmap_sem); 4747 4748 precharge = mc.precharge; 4749 mc.precharge = 0; 4750 4751 return precharge; 4752} 4753 4754static int mem_cgroup_precharge_mc(struct mm_struct *mm) 4755{ 4756 unsigned long precharge = mem_cgroup_count_precharge(mm); 4757 4758 VM_BUG_ON(mc.moving_task); 4759 mc.moving_task = current; 4760 return mem_cgroup_do_precharge(precharge); 4761} 4762 4763/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ 4764static void __mem_cgroup_clear_mc(void) 4765{ 4766 struct mem_cgroup *from = mc.from; 4767 struct mem_cgroup *to = mc.to; 4768 4769 /* we must uncharge all the leftover precharges from mc.to */ 4770 if (mc.precharge) { 4771 cancel_charge(mc.to, mc.precharge); 4772 mc.precharge = 0; 4773 } 4774 /* 4775 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4776 * we must uncharge here. 4777 */ 4778 if (mc.moved_charge) { 4779 cancel_charge(mc.from, mc.moved_charge); 4780 mc.moved_charge = 0; 4781 } 4782 /* we must fixup refcnts and charges */ 4783 if (mc.moved_swap) { 4784 /* uncharge swap account from the old cgroup */ 4785 if (!mem_cgroup_is_root(mc.from)) 4786 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); 4787 4788 /* 4789 * we charged both to->memory and to->memsw, so we 4790 * should uncharge to->memory. 4791 */ 4792 if (!mem_cgroup_is_root(mc.to)) 4793 page_counter_uncharge(&mc.to->memory, mc.moved_swap); 4794 4795 css_put_many(&mc.from->css, mc.moved_swap); 4796 4797 /* we've already done css_get(mc.to) */ 4798 mc.moved_swap = 0; 4799 } 4800 memcg_oom_recover(from); 4801 memcg_oom_recover(to); 4802 wake_up_all(&mc.waitq); 4803} 4804 4805static void mem_cgroup_clear_mc(void) 4806{ 4807 struct mm_struct *mm = mc.mm; 4808 4809 /* 4810 * we must clear moving_task before waking up waiters at the end of 4811 * task migration. 4812 */ 4813 mc.moving_task = NULL; 4814 __mem_cgroup_clear_mc(); 4815 spin_lock(&mc.lock); 4816 mc.from = NULL; 4817 mc.to = NULL; 4818 mc.mm = NULL; 4819 spin_unlock(&mc.lock); 4820 4821 mmput(mm); 4822} 4823 4824static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4825{ 4826 struct cgroup_subsys_state *css; 4827 struct mem_cgroup *memcg; 4828 struct mem_cgroup *from; 4829 struct task_struct *leader, *p; 4830 struct mm_struct *mm; 4831 unsigned long move_flags; 4832 int ret = 0; 4833 4834 /* charge immigration isn't supported on the default hierarchy */ 4835 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 4836 return 0; 4837 4838 /* 4839 * Multi-process migrations only happen on the default hierarchy 4840 * where charge immigration is not used. Perform charge 4841 * immigration if @tset contains a leader and whine if there are 4842 * multiple. 4843 */ 4844 p = NULL; 4845 cgroup_taskset_for_each_leader(leader, css, tset) { 4846 WARN_ON_ONCE(p); 4847 p = leader; 4848 memcg = mem_cgroup_from_css(css); 4849 } 4850 if (!p) 4851 return 0; 4852 4853 /* 4854 * We are now commited to this value whatever it is. Changes in this 4855 * tunable will only affect upcoming migrations, not the current one. 4856 * So we need to save it, and keep it going. 4857 */ 4858 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); 4859 if (!move_flags) 4860 return 0; 4861 4862 from = mem_cgroup_from_task(p); 4863 4864 VM_BUG_ON(from == memcg); 4865 4866 mm = get_task_mm(p); 4867 if (!mm) 4868 return 0; 4869 /* We move charges only when we move a owner of the mm */ 4870 if (mm->owner == p) { 4871 VM_BUG_ON(mc.from); 4872 VM_BUG_ON(mc.to); 4873 VM_BUG_ON(mc.precharge); 4874 VM_BUG_ON(mc.moved_charge); 4875 VM_BUG_ON(mc.moved_swap); 4876 4877 spin_lock(&mc.lock); 4878 mc.mm = mm; 4879 mc.from = from; 4880 mc.to = memcg; 4881 mc.flags = move_flags; 4882 spin_unlock(&mc.lock); 4883 /* We set mc.moving_task later */ 4884 4885 ret = mem_cgroup_precharge_mc(mm); 4886 if (ret) 4887 mem_cgroup_clear_mc(); 4888 } else { 4889 mmput(mm); 4890 } 4891 return ret; 4892} 4893 4894static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4895{ 4896 if (mc.to) 4897 mem_cgroup_clear_mc(); 4898} 4899 4900static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, 4901 unsigned long addr, unsigned long end, 4902 struct mm_walk *walk) 4903{ 4904 int ret = 0; 4905 struct vm_area_struct *vma = walk->vma; 4906 pte_t *pte; 4907 spinlock_t *ptl; 4908 enum mc_target_type target_type; 4909 union mc_target target; 4910 struct page *page; 4911 4912 /* 4913 * We don't take compound_lock() here but no race with splitting thp 4914 * happens because: 4915 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 4916 * under splitting, which means there's no concurrent thp split, 4917 * - if another thread runs into split_huge_page() just after we 4918 * entered this if-block, the thread must wait for page table lock 4919 * to be unlocked in __split_huge_page_splitting(), where the main 4920 * part of thp split is not executed yet. 4921 */ 4922 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4923 if (mc.precharge < HPAGE_PMD_NR) { 4924 spin_unlock(ptl); 4925 return 0; 4926 } 4927 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); 4928 if (target_type == MC_TARGET_PAGE) { 4929 page = target.page; 4930 if (!isolate_lru_page(page)) { 4931 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 4932 mc.from, mc.to)) { 4933 mc.precharge -= HPAGE_PMD_NR; 4934 mc.moved_charge += HPAGE_PMD_NR; 4935 } 4936 putback_lru_page(page); 4937 } 4938 put_page(page); 4939 } 4940 spin_unlock(ptl); 4941 return 0; 4942 } 4943 4944 if (pmd_trans_unstable(pmd)) 4945 return 0; 4946retry: 4947 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4948 for (; addr != end; addr += PAGE_SIZE) { 4949 pte_t ptent = *(pte++); 4950 swp_entry_t ent; 4951 4952 if (!mc.precharge) 4953 break; 4954 4955 switch (get_mctgt_type(vma, addr, ptent, &target)) { 4956 case MC_TARGET_PAGE: 4957 page = target.page; 4958 if (isolate_lru_page(page)) 4959 goto put; 4960 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { 4961 mc.precharge--; 4962 /* we uncharge from mc.from later. */ 4963 mc.moved_charge++; 4964 } 4965 putback_lru_page(page); 4966put: /* get_mctgt_type() gets the page */ 4967 put_page(page); 4968 break; 4969 case MC_TARGET_SWAP: 4970 ent = target.ent; 4971 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { 4972 mc.precharge--; 4973 /* we fixup refcnts and charges later. */ 4974 mc.moved_swap++; 4975 } 4976 break; 4977 default: 4978 break; 4979 } 4980 } 4981 pte_unmap_unlock(pte - 1, ptl); 4982 cond_resched(); 4983 4984 if (addr != end) { 4985 /* 4986 * We have consumed all precharges we got in can_attach(). 4987 * We try charge one by one, but don't do any additional 4988 * charges to mc.to if we have failed in charge once in attach() 4989 * phase. 4990 */ 4991 ret = mem_cgroup_do_precharge(1); 4992 if (!ret) 4993 goto retry; 4994 } 4995 4996 return ret; 4997} 4998 4999static void mem_cgroup_move_charge(void) 5000{ 5001 struct mm_walk mem_cgroup_move_charge_walk = { 5002 .pmd_entry = mem_cgroup_move_charge_pte_range, 5003 .mm = mc.mm, 5004 }; 5005 5006 lru_add_drain_all(); 5007 /* 5008 * Signal mem_cgroup_begin_page_stat() to take the memcg's 5009 * move_lock while we're moving its pages to another memcg. 5010 * Then wait for already started RCU-only updates to finish. 5011 */ 5012 atomic_inc(&mc.from->moving_account); 5013 synchronize_rcu(); 5014retry: 5015 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { 5016 /* 5017 * Someone who are holding the mmap_sem might be waiting in 5018 * waitq. So we cancel all extra charges, wake up all waiters, 5019 * and retry. Because we cancel precharges, we might not be able 5020 * to move enough charges, but moving charge is a best-effort 5021 * feature anyway, so it wouldn't be a big problem. 5022 */ 5023 __mem_cgroup_clear_mc(); 5024 cond_resched(); 5025 goto retry; 5026 } 5027 /* 5028 * When we have consumed all precharges and failed in doing 5029 * additional charge, the page walk just aborts. 5030 */ 5031 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 5032 up_read(&mc.mm->mmap_sem); 5033 atomic_dec(&mc.from->moving_account); 5034} 5035 5036static void mem_cgroup_move_task(void) 5037{ 5038 if (mc.to) { 5039 mem_cgroup_move_charge(); 5040 mem_cgroup_clear_mc(); 5041 } 5042} 5043#else /* !CONFIG_MMU */ 5044static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 5045{ 5046 return 0; 5047} 5048static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 5049{ 5050} 5051static void mem_cgroup_move_task(void) 5052{ 5053} 5054#endif 5055 5056/* 5057 * Cgroup retains root cgroups across [un]mount cycles making it necessary 5058 * to verify whether we're attached to the default hierarchy on each mount 5059 * attempt. 5060 */ 5061static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) 5062{ 5063 /* 5064 * use_hierarchy is forced on the default hierarchy. cgroup core 5065 * guarantees that @root doesn't have any children, so turning it 5066 * on for the root memcg is enough. 5067 */ 5068 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 5069 root_mem_cgroup->use_hierarchy = true; 5070 else 5071 root_mem_cgroup->use_hierarchy = false; 5072} 5073 5074static u64 memory_current_read(struct cgroup_subsys_state *css, 5075 struct cftype *cft) 5076{ 5077 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5078 5079 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; 5080} 5081 5082static int memory_low_show(struct seq_file *m, void *v) 5083{ 5084 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5085 unsigned long low = READ_ONCE(memcg->low); 5086 5087 if (low == PAGE_COUNTER_MAX) 5088 seq_puts(m, "max\n"); 5089 else 5090 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); 5091 5092 return 0; 5093} 5094 5095static ssize_t memory_low_write(struct kernfs_open_file *of, 5096 char *buf, size_t nbytes, loff_t off) 5097{ 5098 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5099 unsigned long low; 5100 int err; 5101 5102 buf = strstrip(buf); 5103 err = page_counter_memparse(buf, "max", &low); 5104 if (err) 5105 return err; 5106 5107 memcg->low = low; 5108 5109 return nbytes; 5110} 5111 5112static int memory_high_show(struct seq_file *m, void *v) 5113{ 5114 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5115 unsigned long high = READ_ONCE(memcg->high); 5116 5117 if (high == PAGE_COUNTER_MAX) 5118 seq_puts(m, "max\n"); 5119 else 5120 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); 5121 5122 return 0; 5123} 5124 5125static ssize_t memory_high_write(struct kernfs_open_file *of, 5126 char *buf, size_t nbytes, loff_t off) 5127{ 5128 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5129 unsigned long nr_pages; 5130 unsigned long high; 5131 int err; 5132 5133 buf = strstrip(buf); 5134 err = page_counter_memparse(buf, "max", &high); 5135 if (err) 5136 return err; 5137 5138 memcg->high = high; 5139 5140 nr_pages = page_counter_read(&memcg->memory); 5141 if (nr_pages > high) 5142 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, 5143 GFP_KERNEL, true); 5144 5145 memcg_wb_domain_size_changed(memcg); 5146 return nbytes; 5147} 5148 5149static int memory_max_show(struct seq_file *m, void *v) 5150{ 5151 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5152 unsigned long max = READ_ONCE(memcg->memory.limit); 5153 5154 if (max == PAGE_COUNTER_MAX) 5155 seq_puts(m, "max\n"); 5156 else 5157 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); 5158 5159 return 0; 5160} 5161 5162static ssize_t memory_max_write(struct kernfs_open_file *of, 5163 char *buf, size_t nbytes, loff_t off) 5164{ 5165 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); 5166 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES; 5167 bool drained = false; 5168 unsigned long max; 5169 int err; 5170 5171 buf = strstrip(buf); 5172 err = page_counter_memparse(buf, "max", &max); 5173 if (err) 5174 return err; 5175 5176 xchg(&memcg->memory.limit, max); 5177 5178 for (;;) { 5179 unsigned long nr_pages = page_counter_read(&memcg->memory); 5180 5181 if (nr_pages <= max) 5182 break; 5183 5184 if (signal_pending(current)) { 5185 err = -EINTR; 5186 break; 5187 } 5188 5189 if (!drained) { 5190 drain_all_stock(memcg); 5191 drained = true; 5192 continue; 5193 } 5194 5195 if (nr_reclaims) { 5196 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, 5197 GFP_KERNEL, true)) 5198 nr_reclaims--; 5199 continue; 5200 } 5201 5202 mem_cgroup_events(memcg, MEMCG_OOM, 1); 5203 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) 5204 break; 5205 } 5206 5207 memcg_wb_domain_size_changed(memcg); 5208 return nbytes; 5209} 5210 5211static int memory_events_show(struct seq_file *m, void *v) 5212{ 5213 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 5214 5215 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); 5216 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); 5217 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); 5218 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); 5219 5220 return 0; 5221} 5222 5223static struct cftype memory_files[] = { 5224 { 5225 .name = "current", 5226 .flags = CFTYPE_NOT_ON_ROOT, 5227 .read_u64 = memory_current_read, 5228 }, 5229 { 5230 .name = "low", 5231 .flags = CFTYPE_NOT_ON_ROOT, 5232 .seq_show = memory_low_show, 5233 .write = memory_low_write, 5234 }, 5235 { 5236 .name = "high", 5237 .flags = CFTYPE_NOT_ON_ROOT, 5238 .seq_show = memory_high_show, 5239 .write = memory_high_write, 5240 }, 5241 { 5242 .name = "max", 5243 .flags = CFTYPE_NOT_ON_ROOT, 5244 .seq_show = memory_max_show, 5245 .write = memory_max_write, 5246 }, 5247 { 5248 .name = "events", 5249 .flags = CFTYPE_NOT_ON_ROOT, 5250 .file_offset = offsetof(struct mem_cgroup, events_file), 5251 .seq_show = memory_events_show, 5252 }, 5253 { } /* terminate */ 5254}; 5255 5256struct cgroup_subsys memory_cgrp_subsys = { 5257 .css_alloc = mem_cgroup_css_alloc, 5258 .css_online = mem_cgroup_css_online, 5259 .css_offline = mem_cgroup_css_offline, 5260 .css_released = mem_cgroup_css_released, 5261 .css_free = mem_cgroup_css_free, 5262 .css_reset = mem_cgroup_css_reset, 5263 .can_attach = mem_cgroup_can_attach, 5264 .cancel_attach = mem_cgroup_cancel_attach, 5265 .post_attach = mem_cgroup_move_task, 5266 .bind = mem_cgroup_bind, 5267 .dfl_cftypes = memory_files, 5268 .legacy_cftypes = mem_cgroup_legacy_files, 5269 .early_init = 0, 5270}; 5271 5272/** 5273 * mem_cgroup_low - check if memory consumption is below the normal range 5274 * @root: the highest ancestor to consider 5275 * @memcg: the memory cgroup to check 5276 * 5277 * Returns %true if memory consumption of @memcg, and that of all 5278 * configurable ancestors up to @root, is below the normal range. 5279 */ 5280bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) 5281{ 5282 if (mem_cgroup_disabled()) 5283 return false; 5284 5285 /* 5286 * The toplevel group doesn't have a configurable range, so 5287 * it's never low when looked at directly, and it is not 5288 * considered an ancestor when assessing the hierarchy. 5289 */ 5290 5291 if (memcg == root_mem_cgroup) 5292 return false; 5293 5294 if (page_counter_read(&memcg->memory) >= memcg->low) 5295 return false; 5296 5297 while (memcg != root) { 5298 memcg = parent_mem_cgroup(memcg); 5299 5300 if (memcg == root_mem_cgroup) 5301 break; 5302 5303 if (page_counter_read(&memcg->memory) >= memcg->low) 5304 return false; 5305 } 5306 return true; 5307} 5308 5309/** 5310 * mem_cgroup_try_charge - try charging a page 5311 * @page: page to charge 5312 * @mm: mm context of the victim 5313 * @gfp_mask: reclaim mode 5314 * @memcgp: charged memcg return 5315 * 5316 * Try to charge @page to the memcg that @mm belongs to, reclaiming 5317 * pages according to @gfp_mask if necessary. 5318 * 5319 * Returns 0 on success, with *@memcgp pointing to the charged memcg. 5320 * Otherwise, an error code is returned. 5321 * 5322 * After page->mapping has been set up, the caller must finalize the 5323 * charge with mem_cgroup_commit_charge(). Or abort the transaction 5324 * with mem_cgroup_cancel_charge() in case page instantiation fails. 5325 */ 5326int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 5327 gfp_t gfp_mask, struct mem_cgroup **memcgp) 5328{ 5329 struct mem_cgroup *memcg = NULL; 5330 unsigned int nr_pages = 1; 5331 int ret = 0; 5332 5333 if (mem_cgroup_disabled()) 5334 goto out; 5335 5336 if (PageSwapCache(page)) { 5337 /* 5338 * Every swap fault against a single page tries to charge the 5339 * page, bail as early as possible. shmem_unuse() encounters 5340 * already charged pages, too. The USED bit is protected by 5341 * the page lock, which serializes swap cache removal, which 5342 * in turn serializes uncharging. 5343 */ 5344 VM_BUG_ON_PAGE(!PageLocked(page), page); 5345 if (page->mem_cgroup) 5346 goto out; 5347 5348 if (do_swap_account) { 5349 swp_entry_t ent = { .val = page_private(page), }; 5350 unsigned short id = lookup_swap_cgroup_id(ent); 5351 5352 rcu_read_lock(); 5353 memcg = mem_cgroup_from_id(id); 5354 if (memcg && !css_tryget_online(&memcg->css)) 5355 memcg = NULL; 5356 rcu_read_unlock(); 5357 } 5358 } 5359 5360 if (PageTransHuge(page)) { 5361 nr_pages <<= compound_order(page); 5362 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5363 } 5364 5365 if (!memcg) 5366 memcg = get_mem_cgroup_from_mm(mm); 5367 5368 ret = try_charge(memcg, gfp_mask, nr_pages); 5369 5370 css_put(&memcg->css); 5371out: 5372 *memcgp = memcg; 5373 return ret; 5374} 5375 5376/** 5377 * mem_cgroup_commit_charge - commit a page charge 5378 * @page: page to charge 5379 * @memcg: memcg to charge the page to 5380 * @lrucare: page might be on LRU already 5381 * 5382 * Finalize a charge transaction started by mem_cgroup_try_charge(), 5383 * after page->mapping has been set up. This must happen atomically 5384 * as part of the page instantiation, i.e. under the page table lock 5385 * for anonymous pages, under the page lock for page and swap cache. 5386 * 5387 * In addition, the page must not be on the LRU during the commit, to 5388 * prevent racing with task migration. If it might be, use @lrucare. 5389 * 5390 * Use mem_cgroup_cancel_charge() to cancel the transaction instead. 5391 */ 5392void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 5393 bool lrucare) 5394{ 5395 unsigned int nr_pages = 1; 5396 5397 VM_BUG_ON_PAGE(!page->mapping, page); 5398 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page); 5399 5400 if (mem_cgroup_disabled()) 5401 return; 5402 /* 5403 * Swap faults will attempt to charge the same page multiple 5404 * times. But reuse_swap_page() might have removed the page 5405 * from swapcache already, so we can't check PageSwapCache(). 5406 */ 5407 if (!memcg) 5408 return; 5409 5410 commit_charge(page, memcg, lrucare); 5411 5412 if (PageTransHuge(page)) { 5413 nr_pages <<= compound_order(page); 5414 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5415 } 5416 5417 local_irq_disable(); 5418 mem_cgroup_charge_statistics(memcg, page, nr_pages); 5419 memcg_check_events(memcg, page); 5420 local_irq_enable(); 5421 5422 if (do_swap_account && PageSwapCache(page)) { 5423 swp_entry_t entry = { .val = page_private(page) }; 5424 /* 5425 * The swap entry might not get freed for a long time, 5426 * let's not wait for it. The page already received a 5427 * memory+swap charge, drop the swap entry duplicate. 5428 */ 5429 mem_cgroup_uncharge_swap(entry); 5430 } 5431} 5432 5433/** 5434 * mem_cgroup_cancel_charge - cancel a page charge 5435 * @page: page to charge 5436 * @memcg: memcg to charge the page to 5437 * 5438 * Cancel a charge transaction started by mem_cgroup_try_charge(). 5439 */ 5440void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) 5441{ 5442 unsigned int nr_pages = 1; 5443 5444 if (mem_cgroup_disabled()) 5445 return; 5446 /* 5447 * Swap faults will attempt to charge the same page multiple 5448 * times. But reuse_swap_page() might have removed the page 5449 * from swapcache already, so we can't check PageSwapCache(). 5450 */ 5451 if (!memcg) 5452 return; 5453 5454 if (PageTransHuge(page)) { 5455 nr_pages <<= compound_order(page); 5456 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5457 } 5458 5459 cancel_charge(memcg, nr_pages); 5460} 5461 5462static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5463 unsigned long nr_anon, unsigned long nr_file, 5464 unsigned long nr_huge, struct page *dummy_page) 5465{ 5466 unsigned long nr_pages = nr_anon + nr_file; 5467 unsigned long flags; 5468 5469 if (!mem_cgroup_is_root(memcg)) { 5470 page_counter_uncharge(&memcg->memory, nr_pages); 5471 if (do_swap_account) 5472 page_counter_uncharge(&memcg->memsw, nr_pages); 5473 memcg_oom_recover(memcg); 5474 } 5475 5476 local_irq_save(flags); 5477 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); 5478 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); 5479 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); 5480 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); 5481 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5482 memcg_check_events(memcg, dummy_page); 5483 local_irq_restore(flags); 5484 5485 if (!mem_cgroup_is_root(memcg)) 5486 css_put_many(&memcg->css, nr_pages); 5487} 5488 5489static void uncharge_list(struct list_head *page_list) 5490{ 5491 struct mem_cgroup *memcg = NULL; 5492 unsigned long nr_anon = 0; 5493 unsigned long nr_file = 0; 5494 unsigned long nr_huge = 0; 5495 unsigned long pgpgout = 0; 5496 struct list_head *next; 5497 struct page *page; 5498 5499 next = page_list->next; 5500 do { 5501 unsigned int nr_pages = 1; 5502 5503 page = list_entry(next, struct page, lru); 5504 next = page->lru.next; 5505 5506 VM_BUG_ON_PAGE(PageLRU(page), page); 5507 VM_BUG_ON_PAGE(page_count(page), page); 5508 5509 if (!page->mem_cgroup) 5510 continue; 5511 5512 /* 5513 * Nobody should be changing or seriously looking at 5514 * page->mem_cgroup at this point, we have fully 5515 * exclusive access to the page. 5516 */ 5517 5518 if (memcg != page->mem_cgroup) { 5519 if (memcg) { 5520 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5521 nr_huge, page); 5522 pgpgout = nr_anon = nr_file = nr_huge = 0; 5523 } 5524 memcg = page->mem_cgroup; 5525 } 5526 5527 if (PageTransHuge(page)) { 5528 nr_pages <<= compound_order(page); 5529 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 5530 nr_huge += nr_pages; 5531 } 5532 5533 if (PageAnon(page)) 5534 nr_anon += nr_pages; 5535 else 5536 nr_file += nr_pages; 5537 5538 page->mem_cgroup = NULL; 5539 5540 pgpgout++; 5541 } while (next != page_list); 5542 5543 if (memcg) 5544 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5545 nr_huge, page); 5546} 5547 5548/** 5549 * mem_cgroup_uncharge - uncharge a page 5550 * @page: page to uncharge 5551 * 5552 * Uncharge a page previously charged with mem_cgroup_try_charge() and 5553 * mem_cgroup_commit_charge(). 5554 */ 5555void mem_cgroup_uncharge(struct page *page) 5556{ 5557 if (mem_cgroup_disabled()) 5558 return; 5559 5560 /* Don't touch page->lru of any random page, pre-check: */ 5561 if (!page->mem_cgroup) 5562 return; 5563 5564 INIT_LIST_HEAD(&page->lru); 5565 uncharge_list(&page->lru); 5566} 5567 5568/** 5569 * mem_cgroup_uncharge_list - uncharge a list of page 5570 * @page_list: list of pages to uncharge 5571 * 5572 * Uncharge a list of pages previously charged with 5573 * mem_cgroup_try_charge() and mem_cgroup_commit_charge(). 5574 */ 5575void mem_cgroup_uncharge_list(struct list_head *page_list) 5576{ 5577 if (mem_cgroup_disabled()) 5578 return; 5579 5580 if (!list_empty(page_list)) 5581 uncharge_list(page_list); 5582} 5583 5584/** 5585 * mem_cgroup_replace_page - migrate a charge to another page 5586 * @oldpage: currently charged page 5587 * @newpage: page to transfer the charge to 5588 * 5589 * Migrate the charge from @oldpage to @newpage. 5590 * 5591 * Both pages must be locked, @newpage->mapping must be set up. 5592 * Either or both pages might be on the LRU already. 5593 */ 5594void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5595{ 5596 struct mem_cgroup *memcg; 5597 int isolated; 5598 5599 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5600 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5601 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 5602 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 5603 newpage); 5604 5605 if (mem_cgroup_disabled()) 5606 return; 5607 5608 /* Page cache replacement: new page already charged? */ 5609 if (newpage->mem_cgroup) 5610 return; 5611 5612 /* Swapcache readahead pages can get replaced before being charged */ 5613 memcg = oldpage->mem_cgroup; 5614 if (!memcg) 5615 return; 5616 5617 lock_page_lru(oldpage, &isolated); 5618 oldpage->mem_cgroup = NULL; 5619 unlock_page_lru(oldpage, isolated); 5620 5621 commit_charge(newpage, memcg, true); 5622} 5623 5624/* 5625 * subsys_initcall() for memory controller. 5626 * 5627 * Some parts like hotcpu_notifier() have to be initialized from this context 5628 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically 5629 * everything that doesn't depend on a specific mem_cgroup structure should 5630 * be initialized from here. 5631 */ 5632static int __init mem_cgroup_init(void) 5633{ 5634 int cpu, node; 5635 5636 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5637 5638 for_each_possible_cpu(cpu) 5639 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5640 drain_local_stock); 5641 5642 for_each_node(node) { 5643 struct mem_cgroup_tree_per_node *rtpn; 5644 int zone; 5645 5646 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5647 node_online(node) ? node : NUMA_NO_NODE); 5648 5649 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5650 struct mem_cgroup_tree_per_zone *rtpz; 5651 5652 rtpz = &rtpn->rb_tree_per_zone[zone]; 5653 rtpz->rb_root = RB_ROOT; 5654 spin_lock_init(&rtpz->lock); 5655 } 5656 soft_limit_tree.rb_tree_per_node[node] = rtpn; 5657 } 5658 5659 return 0; 5660} 5661subsys_initcall(mem_cgroup_init); 5662 5663#ifdef CONFIG_MEMCG_SWAP 5664/** 5665 * mem_cgroup_swapout - transfer a memsw charge to swap 5666 * @page: page whose memsw charge to transfer 5667 * @entry: swap entry to move the charge to 5668 * 5669 * Transfer the memsw charge of @page to @entry. 5670 */ 5671void mem_cgroup_swapout(struct page *page, swp_entry_t entry) 5672{ 5673 struct mem_cgroup *memcg; 5674 unsigned short oldid; 5675 5676 VM_BUG_ON_PAGE(PageLRU(page), page); 5677 VM_BUG_ON_PAGE(page_count(page), page); 5678 5679 if (!do_swap_account) 5680 return; 5681 5682 memcg = page->mem_cgroup; 5683 5684 /* Readahead page, never charged */ 5685 if (!memcg) 5686 return; 5687 5688 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); 5689 VM_BUG_ON_PAGE(oldid, page); 5690 mem_cgroup_swap_statistics(memcg, true); 5691 5692 page->mem_cgroup = NULL; 5693 5694 if (!mem_cgroup_is_root(memcg)) 5695 page_counter_uncharge(&memcg->memory, 1); 5696 5697 /* 5698 * Interrupts should be disabled here because the caller holds the 5699 * mapping->tree_lock lock which is taken with interrupts-off. It is 5700 * important here to have the interrupts disabled because it is the 5701 * only synchronisation we have for udpating the per-CPU variables. 5702 */ 5703 VM_BUG_ON(!irqs_disabled()); 5704 mem_cgroup_charge_statistics(memcg, page, -1); 5705 memcg_check_events(memcg, page); 5706} 5707 5708/** 5709 * mem_cgroup_uncharge_swap - uncharge a swap entry 5710 * @entry: swap entry to uncharge 5711 * 5712 * Drop the memsw charge associated with @entry. 5713 */ 5714void mem_cgroup_uncharge_swap(swp_entry_t entry) 5715{ 5716 struct mem_cgroup *memcg; 5717 unsigned short id; 5718 5719 if (!do_swap_account) 5720 return; 5721 5722 id = swap_cgroup_record(entry, 0); 5723 rcu_read_lock(); 5724 memcg = mem_cgroup_from_id(id); 5725 if (memcg) { 5726 if (!mem_cgroup_is_root(memcg)) 5727 page_counter_uncharge(&memcg->memsw, 1); 5728 mem_cgroup_swap_statistics(memcg, false); 5729 css_put(&memcg->css); 5730 } 5731 rcu_read_unlock(); 5732} 5733 5734/* for remember boot option*/ 5735#ifdef CONFIG_MEMCG_SWAP_ENABLED 5736static int really_do_swap_account __initdata = 1; 5737#else 5738static int really_do_swap_account __initdata; 5739#endif 5740 5741static int __init enable_swap_account(char *s) 5742{ 5743 if (!strcmp(s, "1")) 5744 really_do_swap_account = 1; 5745 else if (!strcmp(s, "0")) 5746 really_do_swap_account = 0; 5747 return 1; 5748} 5749__setup("swapaccount=", enable_swap_account); 5750 5751static struct cftype memsw_cgroup_files[] = { 5752 { 5753 .name = "memsw.usage_in_bytes", 5754 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 5755 .read_u64 = mem_cgroup_read_u64, 5756 }, 5757 { 5758 .name = "memsw.max_usage_in_bytes", 5759 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), 5760 .write = mem_cgroup_reset, 5761 .read_u64 = mem_cgroup_read_u64, 5762 }, 5763 { 5764 .name = "memsw.limit_in_bytes", 5765 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), 5766 .write = mem_cgroup_write, 5767 .read_u64 = mem_cgroup_read_u64, 5768 }, 5769 { 5770 .name = "memsw.failcnt", 5771 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), 5772 .write = mem_cgroup_reset, 5773 .read_u64 = mem_cgroup_read_u64, 5774 }, 5775 { }, /* terminate */ 5776}; 5777 5778static int __init mem_cgroup_swap_init(void) 5779{ 5780 if (!mem_cgroup_disabled() && really_do_swap_account) { 5781 do_swap_account = 1; 5782 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, 5783 memsw_cgroup_files)); 5784 } 5785 return 0; 5786} 5787subsys_initcall(mem_cgroup_swap_init); 5788 5789#endif /* CONFIG_MEMCG_SWAP */ 5790