root/include/linux/list_lru.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. list_lru_shrink_count
  2. list_lru_count

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   4  * Authors: David Chinner and Glauber Costa
   5  *
   6  * Generic LRU infrastructure
   7  */
   8 #ifndef _LRU_LIST_H
   9 #define _LRU_LIST_H
  10 
  11 #include <linux/list.h>
  12 #include <linux/nodemask.h>
  13 #include <linux/shrinker.h>
  14 
  15 struct mem_cgroup;
  16 
  17 /* list_lru_walk_cb has to always return one of those */
  18 enum lru_status {
  19         LRU_REMOVED,            /* item removed from list */
  20         LRU_REMOVED_RETRY,      /* item removed, but lock has been
  21                                    dropped and reacquired */
  22         LRU_ROTATE,             /* item referenced, give another pass */
  23         LRU_SKIP,               /* item cannot be locked, skip */
  24         LRU_RETRY,              /* item not freeable. May drop the lock
  25                                    internally, but has to return locked. */
  26 };
  27 
  28 struct list_lru_one {
  29         struct list_head        list;
  30         /* may become negative during memcg reparenting */
  31         long                    nr_items;
  32 };
  33 
  34 struct list_lru_memcg {
  35         struct rcu_head         rcu;
  36         /* array of per cgroup lists, indexed by memcg_cache_id */
  37         struct list_lru_one     *lru[0];
  38 };
  39 
  40 struct list_lru_node {
  41         /* protects all lists on the node, including per cgroup */
  42         spinlock_t              lock;
  43         /* global list, used for the root cgroup in cgroup aware lrus */
  44         struct list_lru_one     lru;
  45 #ifdef CONFIG_MEMCG_KMEM
  46         /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
  47         struct list_lru_memcg   __rcu *memcg_lrus;
  48 #endif
  49         long nr_items;
  50 } ____cacheline_aligned_in_smp;
  51 
  52 struct list_lru {
  53         struct list_lru_node    *node;
  54 #ifdef CONFIG_MEMCG_KMEM
  55         struct list_head        list;
  56         int                     shrinker_id;
  57         bool                    memcg_aware;
  58 #endif
  59 };
  60 
  61 void list_lru_destroy(struct list_lru *lru);
  62 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  63                     struct lock_class_key *key, struct shrinker *shrinker);
  64 
  65 #define list_lru_init(lru)                              \
  66         __list_lru_init((lru), false, NULL, NULL)
  67 #define list_lru_init_key(lru, key)                     \
  68         __list_lru_init((lru), false, (key), NULL)
  69 #define list_lru_init_memcg(lru, shrinker)              \
  70         __list_lru_init((lru), true, NULL, shrinker)
  71 
  72 int memcg_update_all_list_lrus(int num_memcgs);
  73 void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
  74 
  75 /**
  76  * list_lru_add: add an element to the lru list's tail
  77  * @list_lru: the lru pointer
  78  * @item: the item to be added.
  79  *
  80  * If the element is already part of a list, this function returns doing
  81  * nothing. Therefore the caller does not need to keep state about whether or
  82  * not the element already belongs in the list and is allowed to lazy update
  83  * it. Note however that this is valid for *a* list, not *this* list. If
  84  * the caller organize itself in a way that elements can be in more than
  85  * one type of list, it is up to the caller to fully remove the item from
  86  * the previous list (with list_lru_del() for instance) before moving it
  87  * to @list_lru
  88  *
  89  * Return value: true if the list was updated, false otherwise
  90  */
  91 bool list_lru_add(struct list_lru *lru, struct list_head *item);
  92 
  93 /**
  94  * list_lru_del: delete an element to the lru list
  95  * @list_lru: the lru pointer
  96  * @item: the item to be deleted.
  97  *
  98  * This function works analogously as list_lru_add in terms of list
  99  * manipulation. The comments about an element already pertaining to
 100  * a list are also valid for list_lru_del.
 101  *
 102  * Return value: true if the list was updated, false otherwise
 103  */
 104 bool list_lru_del(struct list_lru *lru, struct list_head *item);
 105 
 106 /**
 107  * list_lru_count_one: return the number of objects currently held by @lru
 108  * @lru: the lru pointer.
 109  * @nid: the node id to count from.
 110  * @memcg: the cgroup to count from.
 111  *
 112  * Always return a non-negative number, 0 for empty lists. There is no
 113  * guarantee that the list is not updated while the count is being computed.
 114  * Callers that want such a guarantee need to provide an outer lock.
 115  */
 116 unsigned long list_lru_count_one(struct list_lru *lru,
 117                                  int nid, struct mem_cgroup *memcg);
 118 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
 119 
 120 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
 121                                                   struct shrink_control *sc)
 122 {
 123         return list_lru_count_one(lru, sc->nid, sc->memcg);
 124 }
 125 
 126 static inline unsigned long list_lru_count(struct list_lru *lru)
 127 {
 128         long count = 0;
 129         int nid;
 130 
 131         for_each_node_state(nid, N_NORMAL_MEMORY)
 132                 count += list_lru_count_node(lru, nid);
 133 
 134         return count;
 135 }
 136 
 137 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
 138 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
 139                            struct list_head *head);
 140 
 141 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
 142                 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
 143 
 144 /**
 145  * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
 146  * @lru: the lru pointer.
 147  * @nid: the node id to scan from.
 148  * @memcg: the cgroup to scan from.
 149  * @isolate: callback function that is resposible for deciding what to do with
 150  *  the item currently being scanned
 151  * @cb_arg: opaque type that will be passed to @isolate
 152  * @nr_to_walk: how many items to scan.
 153  *
 154  * This function will scan all elements in a particular list_lru, calling the
 155  * @isolate callback for each of those items, along with the current list
 156  * spinlock and a caller-provided opaque. The @isolate callback can choose to
 157  * drop the lock internally, but *must* return with the lock held. The callback
 158  * will return an enum lru_status telling the list_lru infrastructure what to
 159  * do with the object being scanned.
 160  *
 161  * Please note that nr_to_walk does not mean how many objects will be freed,
 162  * just how many objects will be scanned.
 163  *
 164  * Return value: the number of objects effectively removed from the LRU.
 165  */
 166 unsigned long list_lru_walk_one(struct list_lru *lru,
 167                                 int nid, struct mem_cgroup *memcg,
 168                                 list_lru_walk_cb isolate, void *cb_arg,
 169                                 unsigned long *nr_to_walk);
 170 /**
 171  * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
 172  * @lru: the lru pointer.
 173  * @nid: the node id to scan from.
 174  * @memcg: the cgroup to scan from.
 175  * @isolate: callback function that is resposible for deciding what to do with
 176  *  the item currently being scanned
 177  * @cb_arg: opaque type that will be passed to @isolate
 178  * @nr_to_walk: how many items to scan.
 179  *
 180  * Same as @list_lru_walk_one except that the spinlock is acquired with
 181  * spin_lock_irq().
 182  */
 183 unsigned long list_lru_walk_one_irq(struct list_lru *lru,
 184                                     int nid, struct mem_cgroup *memcg,
 185                                     list_lru_walk_cb isolate, void *cb_arg,
 186                                     unsigned long *nr_to_walk);
 187 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 188                                  list_lru_walk_cb isolate, void *cb_arg,
 189                                  unsigned long *nr_to_walk);
 190 
 191 static inline unsigned long
 192 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
 193                      list_lru_walk_cb isolate, void *cb_arg)
 194 {
 195         return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
 196                                  &sc->nr_to_scan);
 197 }
 198 
 199 static inline unsigned long
 200 list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
 201                          list_lru_walk_cb isolate, void *cb_arg)
 202 {
 203         return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
 204                                      &sc->nr_to_scan);
 205 }
 206 
 207 static inline unsigned long
 208 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
 209               void *cb_arg, unsigned long nr_to_walk)
 210 {
 211         long isolated = 0;
 212         int nid;
 213 
 214         for_each_node_state(nid, N_NORMAL_MEMORY) {
 215                 isolated += list_lru_walk_node(lru, nid, isolate,
 216                                                cb_arg, &nr_to_walk);
 217                 if (nr_to_walk <= 0)
 218                         break;
 219         }
 220         return isolated;
 221 }
 222 #endif /* _LRU_LIST_H */

/* [<][>][^][v][top][bottom][index][help] */