root/mm/swap_slots.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. deactivate_swap_slots_cache
  2. reactivate_swap_slots_cache
  3. disable_swap_slots_cache_lock
  4. __reenable_swap_slots_cache
  5. reenable_swap_slots_cache_unlock
  6. check_cache_active
  7. alloc_swap_slot_cache
  8. drain_slots_cache_cpu
  9. __drain_swap_slots_cache
  10. free_slot_cache
  11. enable_swap_slots_cache
  12. refill_swap_slots_cache
  13. free_swap_slot
  14. get_swap_page

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Manage cache of swap slots to be used for and returned from
   4  * swap.
   5  *
   6  * Copyright(c) 2016 Intel Corporation.
   7  *
   8  * Author: Tim Chen <tim.c.chen@linux.intel.com>
   9  *
  10  * We allocate the swap slots from the global pool and put
  11  * it into local per cpu caches.  This has the advantage
  12  * of no needing to acquire the swap_info lock every time
  13  * we need a new slot.
  14  *
  15  * There is also opportunity to simply return the slot
  16  * to local caches without needing to acquire swap_info
  17  * lock.  We do not reuse the returned slots directly but
  18  * move them back to the global pool in a batch.  This
  19  * allows the slots to coaellesce and reduce fragmentation.
  20  *
  21  * The swap entry allocated is marked with SWAP_HAS_CACHE
  22  * flag in map_count that prevents it from being allocated
  23  * again from the global pool.
  24  *
  25  * The swap slots cache is protected by a mutex instead of
  26  * a spin lock as when we search for slots with scan_swap_map,
  27  * we can possibly sleep.
  28  */
  29 
  30 #include <linux/swap_slots.h>
  31 #include <linux/cpu.h>
  32 #include <linux/cpumask.h>
  33 #include <linux/vmalloc.h>
  34 #include <linux/mutex.h>
  35 #include <linux/mm.h>
  36 
  37 static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
  38 static bool     swap_slot_cache_active;
  39 bool    swap_slot_cache_enabled;
  40 static bool     swap_slot_cache_initialized;
  41 static DEFINE_MUTEX(swap_slots_cache_mutex);
  42 /* Serialize swap slots cache enable/disable operations */
  43 static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
  44 
  45 static void __drain_swap_slots_cache(unsigned int type);
  46 static void deactivate_swap_slots_cache(void);
  47 static void reactivate_swap_slots_cache(void);
  48 
  49 #define use_swap_slot_cache (swap_slot_cache_active && \
  50                 swap_slot_cache_enabled && swap_slot_cache_initialized)
  51 #define SLOTS_CACHE 0x1
  52 #define SLOTS_CACHE_RET 0x2
  53 
  54 static void deactivate_swap_slots_cache(void)
  55 {
  56         mutex_lock(&swap_slots_cache_mutex);
  57         swap_slot_cache_active = false;
  58         __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  59         mutex_unlock(&swap_slots_cache_mutex);
  60 }
  61 
  62 static void reactivate_swap_slots_cache(void)
  63 {
  64         mutex_lock(&swap_slots_cache_mutex);
  65         swap_slot_cache_active = true;
  66         mutex_unlock(&swap_slots_cache_mutex);
  67 }
  68 
  69 /* Must not be called with cpu hot plug lock */
  70 void disable_swap_slots_cache_lock(void)
  71 {
  72         mutex_lock(&swap_slots_cache_enable_mutex);
  73         swap_slot_cache_enabled = false;
  74         if (swap_slot_cache_initialized) {
  75                 /* serialize with cpu hotplug operations */
  76                 get_online_cpus();
  77                 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  78                 put_online_cpus();
  79         }
  80 }
  81 
  82 static void __reenable_swap_slots_cache(void)
  83 {
  84         swap_slot_cache_enabled = has_usable_swap();
  85 }
  86 
  87 void reenable_swap_slots_cache_unlock(void)
  88 {
  89         __reenable_swap_slots_cache();
  90         mutex_unlock(&swap_slots_cache_enable_mutex);
  91 }
  92 
  93 static bool check_cache_active(void)
  94 {
  95         long pages;
  96 
  97         if (!swap_slot_cache_enabled || !swap_slot_cache_initialized)
  98                 return false;
  99 
 100         pages = get_nr_swap_pages();
 101         if (!swap_slot_cache_active) {
 102                 if (pages > num_online_cpus() *
 103                     THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
 104                         reactivate_swap_slots_cache();
 105                 goto out;
 106         }
 107 
 108         /* if global pool of slot caches too low, deactivate cache */
 109         if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
 110                 deactivate_swap_slots_cache();
 111 out:
 112         return swap_slot_cache_active;
 113 }
 114 
 115 static int alloc_swap_slot_cache(unsigned int cpu)
 116 {
 117         struct swap_slots_cache *cache;
 118         swp_entry_t *slots, *slots_ret;
 119 
 120         /*
 121          * Do allocation outside swap_slots_cache_mutex
 122          * as kvzalloc could trigger reclaim and get_swap_page,
 123          * which can lock swap_slots_cache_mutex.
 124          */
 125         slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
 126                          GFP_KERNEL);
 127         if (!slots)
 128                 return -ENOMEM;
 129 
 130         slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
 131                              GFP_KERNEL);
 132         if (!slots_ret) {
 133                 kvfree(slots);
 134                 return -ENOMEM;
 135         }
 136 
 137         mutex_lock(&swap_slots_cache_mutex);
 138         cache = &per_cpu(swp_slots, cpu);
 139         if (cache->slots || cache->slots_ret)
 140                 /* cache already allocated */
 141                 goto out;
 142         if (!cache->lock_initialized) {
 143                 mutex_init(&cache->alloc_lock);
 144                 spin_lock_init(&cache->free_lock);
 145                 cache->lock_initialized = true;
 146         }
 147         cache->nr = 0;
 148         cache->cur = 0;
 149         cache->n_ret = 0;
 150         /*
 151          * We initialized alloc_lock and free_lock earlier.  We use
 152          * !cache->slots or !cache->slots_ret to know if it is safe to acquire
 153          * the corresponding lock and use the cache.  Memory barrier below
 154          * ensures the assumption.
 155          */
 156         mb();
 157         cache->slots = slots;
 158         slots = NULL;
 159         cache->slots_ret = slots_ret;
 160         slots_ret = NULL;
 161 out:
 162         mutex_unlock(&swap_slots_cache_mutex);
 163         if (slots)
 164                 kvfree(slots);
 165         if (slots_ret)
 166                 kvfree(slots_ret);
 167         return 0;
 168 }
 169 
 170 static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
 171                                   bool free_slots)
 172 {
 173         struct swap_slots_cache *cache;
 174         swp_entry_t *slots = NULL;
 175 
 176         cache = &per_cpu(swp_slots, cpu);
 177         if ((type & SLOTS_CACHE) && cache->slots) {
 178                 mutex_lock(&cache->alloc_lock);
 179                 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
 180                 cache->cur = 0;
 181                 cache->nr = 0;
 182                 if (free_slots && cache->slots) {
 183                         kvfree(cache->slots);
 184                         cache->slots = NULL;
 185                 }
 186                 mutex_unlock(&cache->alloc_lock);
 187         }
 188         if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
 189                 spin_lock_irq(&cache->free_lock);
 190                 swapcache_free_entries(cache->slots_ret, cache->n_ret);
 191                 cache->n_ret = 0;
 192                 if (free_slots && cache->slots_ret) {
 193                         slots = cache->slots_ret;
 194                         cache->slots_ret = NULL;
 195                 }
 196                 spin_unlock_irq(&cache->free_lock);
 197                 if (slots)
 198                         kvfree(slots);
 199         }
 200 }
 201 
 202 static void __drain_swap_slots_cache(unsigned int type)
 203 {
 204         unsigned int cpu;
 205 
 206         /*
 207          * This function is called during
 208          *      1) swapoff, when we have to make sure no
 209          *         left over slots are in cache when we remove
 210          *         a swap device;
 211          *      2) disabling of swap slot cache, when we run low
 212          *         on swap slots when allocating memory and need
 213          *         to return swap slots to global pool.
 214          *
 215          * We cannot acquire cpu hot plug lock here as
 216          * this function can be invoked in the cpu
 217          * hot plug path:
 218          * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
 219          *   -> memory allocation -> direct reclaim -> get_swap_page
 220          *   -> drain_swap_slots_cache
 221          *
 222          * Hence the loop over current online cpu below could miss cpu that
 223          * is being brought online but not yet marked as online.
 224          * That is okay as we do not schedule and run anything on a
 225          * cpu before it has been marked online. Hence, we will not
 226          * fill any swap slots in slots cache of such cpu.
 227          * There are no slots on such cpu that need to be drained.
 228          */
 229         for_each_online_cpu(cpu)
 230                 drain_slots_cache_cpu(cpu, type, false);
 231 }
 232 
 233 static int free_slot_cache(unsigned int cpu)
 234 {
 235         mutex_lock(&swap_slots_cache_mutex);
 236         drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
 237         mutex_unlock(&swap_slots_cache_mutex);
 238         return 0;
 239 }
 240 
 241 int enable_swap_slots_cache(void)
 242 {
 243         int ret = 0;
 244 
 245         mutex_lock(&swap_slots_cache_enable_mutex);
 246         if (swap_slot_cache_initialized) {
 247                 __reenable_swap_slots_cache();
 248                 goto out_unlock;
 249         }
 250 
 251         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
 252                                 alloc_swap_slot_cache, free_slot_cache);
 253         if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
 254                                "without swap slots cache.\n", __func__))
 255                 goto out_unlock;
 256 
 257         swap_slot_cache_initialized = true;
 258         __reenable_swap_slots_cache();
 259 out_unlock:
 260         mutex_unlock(&swap_slots_cache_enable_mutex);
 261         return 0;
 262 }
 263 
 264 /* called with swap slot cache's alloc lock held */
 265 static int refill_swap_slots_cache(struct swap_slots_cache *cache)
 266 {
 267         if (!use_swap_slot_cache || cache->nr)
 268                 return 0;
 269 
 270         cache->cur = 0;
 271         if (swap_slot_cache_active)
 272                 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
 273                                            cache->slots, 1);
 274 
 275         return cache->nr;
 276 }
 277 
 278 int free_swap_slot(swp_entry_t entry)
 279 {
 280         struct swap_slots_cache *cache;
 281 
 282         cache = raw_cpu_ptr(&swp_slots);
 283         if (likely(use_swap_slot_cache && cache->slots_ret)) {
 284                 spin_lock_irq(&cache->free_lock);
 285                 /* Swap slots cache may be deactivated before acquiring lock */
 286                 if (!use_swap_slot_cache || !cache->slots_ret) {
 287                         spin_unlock_irq(&cache->free_lock);
 288                         goto direct_free;
 289                 }
 290                 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
 291                         /*
 292                          * Return slots to global pool.
 293                          * The current swap_map value is SWAP_HAS_CACHE.
 294                          * Set it to 0 to indicate it is available for
 295                          * allocation in global pool
 296                          */
 297                         swapcache_free_entries(cache->slots_ret, cache->n_ret);
 298                         cache->n_ret = 0;
 299                 }
 300                 cache->slots_ret[cache->n_ret++] = entry;
 301                 spin_unlock_irq(&cache->free_lock);
 302         } else {
 303 direct_free:
 304                 swapcache_free_entries(&entry, 1);
 305         }
 306 
 307         return 0;
 308 }
 309 
 310 swp_entry_t get_swap_page(struct page *page)
 311 {
 312         swp_entry_t entry, *pentry;
 313         struct swap_slots_cache *cache;
 314 
 315         entry.val = 0;
 316 
 317         if (PageTransHuge(page)) {
 318                 if (IS_ENABLED(CONFIG_THP_SWAP))
 319                         get_swap_pages(1, &entry, HPAGE_PMD_NR);
 320                 goto out;
 321         }
 322 
 323         /*
 324          * Preemption is allowed here, because we may sleep
 325          * in refill_swap_slots_cache().  But it is safe, because
 326          * accesses to the per-CPU data structure are protected by the
 327          * mutex cache->alloc_lock.
 328          *
 329          * The alloc path here does not touch cache->slots_ret
 330          * so cache->free_lock is not taken.
 331          */
 332         cache = raw_cpu_ptr(&swp_slots);
 333 
 334         if (likely(check_cache_active() && cache->slots)) {
 335                 mutex_lock(&cache->alloc_lock);
 336                 if (cache->slots) {
 337 repeat:
 338                         if (cache->nr) {
 339                                 pentry = &cache->slots[cache->cur++];
 340                                 entry = *pentry;
 341                                 pentry->val = 0;
 342                                 cache->nr--;
 343                         } else {
 344                                 if (refill_swap_slots_cache(cache))
 345                                         goto repeat;
 346                         }
 347                 }
 348                 mutex_unlock(&cache->alloc_lock);
 349                 if (entry.val)
 350                         goto out;
 351         }
 352 
 353         get_swap_pages(1, &entry, 1);
 354 out:
 355         if (mem_cgroup_try_charge_swap(page, entry)) {
 356                 put_swap_page(page, entry);
 357                 entry.val = 0;
 358         }
 359         return entry;
 360 }

/* [<][>][^][v][top][bottom][index][help] */