root/include/linux/swap.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. current_is_kswapd
  2. vm_swap_full
  3. get_nr_swap_pages
  4. put_swap_device
  5. swap_readpage
  6. swp_swap_info
  7. show_swap_cache_info
  8. add_swap_count_continuation
  9. swap_shmem_alloc
  10. swap_duplicate
  11. swap_free
  12. put_swap_page
  13. swap_cluster_readahead
  14. swapin_readahead
  15. swap_writepage
  16. lookup_swap_cache
  17. add_to_swap
  18. add_to_swap_cache
  19. __delete_from_swap_cache
  20. delete_from_swap_cache
  21. page_swapcount
  22. __swap_count
  23. __swp_swapcount
  24. swp_swapcount
  25. try_to_free_swap
  26. get_swap_page
  27. split_swap_cluster
  28. mem_cgroup_swappiness
  29. mem_cgroup_swappiness
  30. mem_cgroup_throttle_swaprate
  31. mem_cgroup_swapout
  32. mem_cgroup_try_charge_swap
  33. mem_cgroup_uncharge_swap
  34. mem_cgroup_get_nr_swap_pages
  35. mem_cgroup_swap_full

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_SWAP_H
   3 #define _LINUX_SWAP_H
   4 
   5 #include <linux/spinlock.h>
   6 #include <linux/linkage.h>
   7 #include <linux/mmzone.h>
   8 #include <linux/list.h>
   9 #include <linux/memcontrol.h>
  10 #include <linux/sched.h>
  11 #include <linux/node.h>
  12 #include <linux/fs.h>
  13 #include <linux/atomic.h>
  14 #include <linux/page-flags.h>
  15 #include <asm/page.h>
  16 
  17 struct notifier_block;
  18 
  19 struct bio;
  20 
  21 struct pagevec;
  22 
  23 #define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
  24 #define SWAP_FLAG_PRIO_MASK     0x7fff
  25 #define SWAP_FLAG_PRIO_SHIFT    0
  26 #define SWAP_FLAG_DISCARD       0x10000 /* enable discard for swap */
  27 #define SWAP_FLAG_DISCARD_ONCE  0x20000 /* discard swap area at swapon-time */
  28 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  29 
  30 #define SWAP_FLAGS_VALID        (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  31                                  SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  32                                  SWAP_FLAG_DISCARD_PAGES)
  33 #define SWAP_BATCH 64
  34 
  35 static inline int current_is_kswapd(void)
  36 {
  37         return current->flags & PF_KSWAPD;
  38 }
  39 
  40 /*
  41  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  42  * be swapped to.  The swap type and the offset into that swap type are
  43  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
  44  * for the type means that the maximum number of swapcache pages is 27 bits
  45  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
  46  * the type/offset into the pte as 5/27 as well.
  47  */
  48 #define MAX_SWAPFILES_SHIFT     5
  49 
  50 /*
  51  * Use some of the swap files numbers for other purposes. This
  52  * is a convenient way to hook into the VM to trigger special
  53  * actions on faults.
  54  */
  55 
  56 /*
  57  * Unaddressable device memory support. See include/linux/hmm.h and
  58  * Documentation/vm/hmm.rst. Short description is we need struct pages for
  59  * device memory that is unaddressable (inaccessible) by CPU, so that we can
  60  * migrate part of a process memory to device memory.
  61  *
  62  * When a page is migrated from CPU to device, we set the CPU page table entry
  63  * to a special SWP_DEVICE_* entry.
  64  */
  65 #ifdef CONFIG_DEVICE_PRIVATE
  66 #define SWP_DEVICE_NUM 2
  67 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  68 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  69 #else
  70 #define SWP_DEVICE_NUM 0
  71 #endif
  72 
  73 /*
  74  * NUMA node memory migration support
  75  */
  76 #ifdef CONFIG_MIGRATION
  77 #define SWP_MIGRATION_NUM 2
  78 #define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  79 #define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  80 #else
  81 #define SWP_MIGRATION_NUM 0
  82 #endif
  83 
  84 /*
  85  * Handling of hardware poisoned pages with memory corruption.
  86  */
  87 #ifdef CONFIG_MEMORY_FAILURE
  88 #define SWP_HWPOISON_NUM 1
  89 #define SWP_HWPOISON            MAX_SWAPFILES
  90 #else
  91 #define SWP_HWPOISON_NUM 0
  92 #endif
  93 
  94 #define MAX_SWAPFILES \
  95         ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  96         SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  97 
  98 /*
  99  * Magic header for a swap area. The first part of the union is
 100  * what the swap magic looks like for the old (limited to 128MB)
 101  * swap area format, the second part of the union adds - in the
 102  * old reserved area - some extra information. Note that the first
 103  * kilobyte is reserved for boot loader or disk label stuff...
 104  *
 105  * Having the magic at the end of the PAGE_SIZE makes detecting swap
 106  * areas somewhat tricky on machines that support multiple page sizes.
 107  * For 2.5 we'll probably want to move the magic to just beyond the
 108  * bootbits...
 109  */
 110 union swap_header {
 111         struct {
 112                 char reserved[PAGE_SIZE - 10];
 113                 char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
 114         } magic;
 115         struct {
 116                 char            bootbits[1024]; /* Space for disklabel etc. */
 117                 __u32           version;
 118                 __u32           last_page;
 119                 __u32           nr_badpages;
 120                 unsigned char   sws_uuid[16];
 121                 unsigned char   sws_volume[16];
 122                 __u32           padding[117];
 123                 __u32           badpages[1];
 124         } info;
 125 };
 126 
 127 /*
 128  * current->reclaim_state points to one of these when a task is running
 129  * memory reclaim
 130  */
 131 struct reclaim_state {
 132         unsigned long reclaimed_slab;
 133 };
 134 
 135 #ifdef __KERNEL__
 136 
 137 struct address_space;
 138 struct sysinfo;
 139 struct writeback_control;
 140 struct zone;
 141 
 142 /*
 143  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
 144  * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
 145  * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
 146  * from setup, they're handled identically.
 147  *
 148  * We always assume that blocks are of size PAGE_SIZE.
 149  */
 150 struct swap_extent {
 151         struct rb_node rb_node;
 152         pgoff_t start_page;
 153         pgoff_t nr_pages;
 154         sector_t start_block;
 155 };
 156 
 157 /*
 158  * Max bad pages in the new format..
 159  */
 160 #define MAX_SWAP_BADPAGES \
 161         ((offsetof(union swap_header, magic.magic) - \
 162           offsetof(union swap_header, info.badpages)) / sizeof(int))
 163 
 164 enum {
 165         SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
 166         SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
 167         SWP_DISCARDABLE = (1 << 2),     /* blkdev support discard */
 168         SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
 169         SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
 170         SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
 171         SWP_BLKDEV      = (1 << 6),     /* its a block device */
 172         SWP_ACTIVATED   = (1 << 7),     /* set after swap_activate success */
 173         SWP_FS          = (1 << 8),     /* swap file goes through fs */
 174         SWP_AREA_DISCARD = (1 << 9),    /* single-time swap area discards */
 175         SWP_PAGE_DISCARD = (1 << 10),   /* freed swap page-cluster discards */
 176         SWP_STABLE_WRITES = (1 << 11),  /* no overwrite PG_writeback pages */
 177         SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
 178         SWP_VALID       = (1 << 13),    /* swap is valid to be operated on? */
 179                                         /* add others here before... */
 180         SWP_SCANNING    = (1 << 14),    /* refcount in scan_swap_map */
 181 };
 182 
 183 #define SWAP_CLUSTER_MAX 32UL
 184 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 185 
 186 #define SWAP_MAP_MAX    0x3e    /* Max duplication count, in first swap_map */
 187 #define SWAP_MAP_BAD    0x3f    /* Note pageblock is bad, in first swap_map */
 188 #define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
 189 #define SWAP_CONT_MAX   0x7f    /* Max count, in each swap_map continuation */
 190 #define COUNT_CONTINUED 0x80    /* See swap_map continuation for full count */
 191 #define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs, in first swap_map */
 192 
 193 /*
 194  * We use this to track usage of a cluster. A cluster is a block of swap disk
 195  * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
 196  * free clusters are organized into a list. We fetch an entry from the list to
 197  * get a free cluster.
 198  *
 199  * The data field stores next cluster if the cluster is free or cluster usage
 200  * counter otherwise. The flags field determines if a cluster is free. This is
 201  * protected by swap_info_struct.lock.
 202  */
 203 struct swap_cluster_info {
 204         spinlock_t lock;        /*
 205                                  * Protect swap_cluster_info fields
 206                                  * and swap_info_struct->swap_map
 207                                  * elements correspond to the swap
 208                                  * cluster
 209                                  */
 210         unsigned int data:24;
 211         unsigned int flags:8;
 212 };
 213 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
 214 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
 215 #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
 216 
 217 /*
 218  * We assign a cluster to each CPU, so each CPU can allocate swap entry from
 219  * its own cluster and swapout sequentially. The purpose is to optimize swapout
 220  * throughput.
 221  */
 222 struct percpu_cluster {
 223         struct swap_cluster_info index; /* Current cluster index */
 224         unsigned int next; /* Likely next allocation offset */
 225 };
 226 
 227 struct swap_cluster_list {
 228         struct swap_cluster_info head;
 229         struct swap_cluster_info tail;
 230 };
 231 
 232 /*
 233  * The in-memory structure used to track swap areas.
 234  */
 235 struct swap_info_struct {
 236         unsigned long   flags;          /* SWP_USED etc: see above */
 237         signed short    prio;           /* swap priority of this type */
 238         struct plist_node list;         /* entry in swap_active_head */
 239         signed char     type;           /* strange name for an index */
 240         unsigned int    max;            /* extent of the swap_map */
 241         unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
 242         struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
 243         struct swap_cluster_list free_clusters; /* free clusters list */
 244         unsigned int lowest_bit;        /* index of first free in swap_map */
 245         unsigned int highest_bit;       /* index of last free in swap_map */
 246         unsigned int pages;             /* total of usable pages of swap */
 247         unsigned int inuse_pages;       /* number of those currently in use */
 248         unsigned int cluster_next;      /* likely index for next allocation */
 249         unsigned int cluster_nr;        /* countdown to next cluster search */
 250         struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
 251         struct rb_root swap_extent_root;/* root of the swap extent rbtree */
 252         struct block_device *bdev;      /* swap device or bdev of swap file */
 253         struct file *swap_file;         /* seldom referenced */
 254         unsigned int old_block_size;    /* seldom referenced */
 255 #ifdef CONFIG_FRONTSWAP
 256         unsigned long *frontswap_map;   /* frontswap in-use, one bit per page */
 257         atomic_t frontswap_pages;       /* frontswap pages in-use counter */
 258 #endif
 259         spinlock_t lock;                /*
 260                                          * protect map scan related fields like
 261                                          * swap_map, lowest_bit, highest_bit,
 262                                          * inuse_pages, cluster_next,
 263                                          * cluster_nr, lowest_alloc,
 264                                          * highest_alloc, free/discard cluster
 265                                          * list. other fields are only changed
 266                                          * at swapon/swapoff, so are protected
 267                                          * by swap_lock. changing flags need
 268                                          * hold this lock and swap_lock. If
 269                                          * both locks need hold, hold swap_lock
 270                                          * first.
 271                                          */
 272         spinlock_t cont_lock;           /*
 273                                          * protect swap count continuation page
 274                                          * list.
 275                                          */
 276         struct work_struct discard_work; /* discard worker */
 277         struct swap_cluster_list discard_clusters; /* discard clusters list */
 278         struct plist_node avail_lists[0]; /*
 279                                            * entries in swap_avail_heads, one
 280                                            * entry per node.
 281                                            * Must be last as the number of the
 282                                            * array is nr_node_ids, which is not
 283                                            * a fixed value so have to allocate
 284                                            * dynamically.
 285                                            * And it has to be an array so that
 286                                            * plist_for_each_* can work.
 287                                            */
 288 };
 289 
 290 #ifdef CONFIG_64BIT
 291 #define SWAP_RA_ORDER_CEILING   5
 292 #else
 293 /* Avoid stack overflow, because we need to save part of page table */
 294 #define SWAP_RA_ORDER_CEILING   3
 295 #define SWAP_RA_PTE_CACHE_SIZE  (1 << SWAP_RA_ORDER_CEILING)
 296 #endif
 297 
 298 struct vma_swap_readahead {
 299         unsigned short win;
 300         unsigned short offset;
 301         unsigned short nr_pte;
 302 #ifdef CONFIG_64BIT
 303         pte_t *ptes;
 304 #else
 305         pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
 306 #endif
 307 };
 308 
 309 /* linux/mm/workingset.c */
 310 void *workingset_eviction(struct page *page);
 311 void workingset_refault(struct page *page, void *shadow);
 312 void workingset_activation(struct page *page);
 313 
 314 /* Only track the nodes of mappings with shadow entries */
 315 void workingset_update_node(struct xa_node *node);
 316 #define mapping_set_update(xas, mapping) do {                           \
 317         if (!dax_mapping(mapping) && !shmem_mapping(mapping))           \
 318                 xas_set_update(xas, workingset_update_node);            \
 319 } while (0)
 320 
 321 /* linux/mm/page_alloc.c */
 322 extern unsigned long totalreserve_pages;
 323 extern unsigned long nr_free_buffer_pages(void);
 324 extern unsigned long nr_free_pagecache_pages(void);
 325 
 326 /* Definition of global_zone_page_state not available yet */
 327 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
 328 
 329 
 330 /* linux/mm/swap.c */
 331 extern void lru_cache_add(struct page *);
 332 extern void lru_cache_add_anon(struct page *page);
 333 extern void lru_cache_add_file(struct page *page);
 334 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 335                          struct lruvec *lruvec, struct list_head *head);
 336 extern void activate_page(struct page *);
 337 extern void mark_page_accessed(struct page *);
 338 extern void lru_add_drain(void);
 339 extern void lru_add_drain_cpu(int cpu);
 340 extern void lru_add_drain_all(void);
 341 extern void rotate_reclaimable_page(struct page *page);
 342 extern void deactivate_file_page(struct page *page);
 343 extern void deactivate_page(struct page *page);
 344 extern void mark_page_lazyfree(struct page *page);
 345 extern void swap_setup(void);
 346 
 347 extern void lru_cache_add_active_or_unevictable(struct page *page,
 348                                                 struct vm_area_struct *vma);
 349 
 350 /* linux/mm/vmscan.c */
 351 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 352 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 353                                         gfp_t gfp_mask, nodemask_t *mask);
 354 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
 355 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 356                                                   unsigned long nr_pages,
 357                                                   gfp_t gfp_mask,
 358                                                   bool may_swap);
 359 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
 360                                                 gfp_t gfp_mask, bool noswap,
 361                                                 pg_data_t *pgdat,
 362                                                 unsigned long *nr_scanned);
 363 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 364 extern int vm_swappiness;
 365 extern int remove_mapping(struct address_space *mapping, struct page *page);
 366 extern unsigned long vm_total_pages;
 367 
 368 extern unsigned long reclaim_pages(struct list_head *page_list);
 369 #ifdef CONFIG_NUMA
 370 extern int node_reclaim_mode;
 371 extern int sysctl_min_unmapped_ratio;
 372 extern int sysctl_min_slab_ratio;
 373 #else
 374 #define node_reclaim_mode 0
 375 #endif
 376 
 377 extern int page_evictable(struct page *page);
 378 extern void check_move_unevictable_pages(struct pagevec *pvec);
 379 
 380 extern int kswapd_run(int nid);
 381 extern void kswapd_stop(int nid);
 382 
 383 #ifdef CONFIG_SWAP
 384 
 385 #include <linux/blk_types.h> /* for bio_end_io_t */
 386 
 387 /* linux/mm/page_io.c */
 388 extern int swap_readpage(struct page *page, bool do_poll);
 389 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 390 extern void end_swap_bio_write(struct bio *bio);
 391 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
 392         bio_end_io_t end_write_func);
 393 extern int swap_set_page_dirty(struct page *page);
 394 
 395 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
 396                 unsigned long nr_pages, sector_t start_block);
 397 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 398                 sector_t *);
 399 
 400 /* linux/mm/swap_state.c */
 401 /* One swap address space for each 64M swap space */
 402 #define SWAP_ADDRESS_SPACE_SHIFT        14
 403 #define SWAP_ADDRESS_SPACE_PAGES        (1 << SWAP_ADDRESS_SPACE_SHIFT)
 404 extern struct address_space *swapper_spaces[];
 405 #define swap_address_space(entry)                           \
 406         (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 407                 >> SWAP_ADDRESS_SPACE_SHIFT])
 408 extern unsigned long total_swapcache_pages(void);
 409 extern void show_swap_cache_info(void);
 410 extern int add_to_swap(struct page *page);
 411 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
 412 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
 413 extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
 414 extern void delete_from_swap_cache(struct page *);
 415 extern void free_page_and_swap_cache(struct page *);
 416 extern void free_pages_and_swap_cache(struct page **, int);
 417 extern struct page *lookup_swap_cache(swp_entry_t entry,
 418                                       struct vm_area_struct *vma,
 419                                       unsigned long addr);
 420 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 421                         struct vm_area_struct *vma, unsigned long addr,
 422                         bool do_poll);
 423 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 424                         struct vm_area_struct *vma, unsigned long addr,
 425                         bool *new_page_allocated);
 426 extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
 427                                 struct vm_fault *vmf);
 428 extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 429                                 struct vm_fault *vmf);
 430 
 431 /* linux/mm/swapfile.c */
 432 extern atomic_long_t nr_swap_pages;
 433 extern long total_swap_pages;
 434 extern atomic_t nr_rotate_swap;
 435 extern bool has_usable_swap(void);
 436 
 437 /* Swap 50% full? Release swapcache more aggressively.. */
 438 static inline bool vm_swap_full(void)
 439 {
 440         return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
 441 }
 442 
 443 static inline long get_nr_swap_pages(void)
 444 {
 445         return atomic_long_read(&nr_swap_pages);
 446 }
 447 
 448 extern void si_swapinfo(struct sysinfo *);
 449 extern swp_entry_t get_swap_page(struct page *page);
 450 extern void put_swap_page(struct page *page, swp_entry_t entry);
 451 extern swp_entry_t get_swap_page_of_type(int);
 452 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
 453 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 454 extern void swap_shmem_alloc(swp_entry_t);
 455 extern int swap_duplicate(swp_entry_t);
 456 extern int swapcache_prepare(swp_entry_t);
 457 extern void swap_free(swp_entry_t);
 458 extern void swapcache_free_entries(swp_entry_t *entries, int n);
 459 extern int free_swap_and_cache(swp_entry_t);
 460 extern int swap_type_of(dev_t, sector_t, struct block_device **);
 461 extern unsigned int count_swap_pages(int, int);
 462 extern sector_t map_swap_page(struct page *, struct block_device **);
 463 extern sector_t swapdev_block(int, pgoff_t);
 464 extern int page_swapcount(struct page *);
 465 extern int __swap_count(swp_entry_t entry);
 466 extern int __swp_swapcount(swp_entry_t entry);
 467 extern int swp_swapcount(swp_entry_t entry);
 468 extern struct swap_info_struct *page_swap_info(struct page *);
 469 extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
 470 extern bool reuse_swap_page(struct page *, int *);
 471 extern int try_to_free_swap(struct page *);
 472 struct backing_dev_info;
 473 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
 474 extern void exit_swap_address_space(unsigned int type);
 475 extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
 476 
 477 static inline void put_swap_device(struct swap_info_struct *si)
 478 {
 479         rcu_read_unlock();
 480 }
 481 
 482 #else /* CONFIG_SWAP */
 483 
 484 static inline int swap_readpage(struct page *page, bool do_poll)
 485 {
 486         return 0;
 487 }
 488 
 489 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 490 {
 491         return NULL;
 492 }
 493 
 494 #define swap_address_space(entry)               (NULL)
 495 #define get_nr_swap_pages()                     0L
 496 #define total_swap_pages                        0L
 497 #define total_swapcache_pages()                 0UL
 498 #define vm_swap_full()                          0
 499 
 500 #define si_swapinfo(val) \
 501         do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 502 /* only sparc can not include linux/pagemap.h in this file
 503  * so leave put_page and release_pages undeclared... */
 504 #define free_page_and_swap_cache(page) \
 505         put_page(page)
 506 #define free_pages_and_swap_cache(pages, nr) \
 507         release_pages((pages), (nr));
 508 
 509 static inline void show_swap_cache_info(void)
 510 {
 511 }
 512 
 513 #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 514 #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
 515 
 516 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
 517 {
 518         return 0;
 519 }
 520 
 521 static inline void swap_shmem_alloc(swp_entry_t swp)
 522 {
 523 }
 524 
 525 static inline int swap_duplicate(swp_entry_t swp)
 526 {
 527         return 0;
 528 }
 529 
 530 static inline void swap_free(swp_entry_t swp)
 531 {
 532 }
 533 
 534 static inline void put_swap_page(struct page *page, swp_entry_t swp)
 535 {
 536 }
 537 
 538 static inline struct page *swap_cluster_readahead(swp_entry_t entry,
 539                                 gfp_t gfp_mask, struct vm_fault *vmf)
 540 {
 541         return NULL;
 542 }
 543 
 544 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
 545                         struct vm_fault *vmf)
 546 {
 547         return NULL;
 548 }
 549 
 550 static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
 551 {
 552         return 0;
 553 }
 554 
 555 static inline struct page *lookup_swap_cache(swp_entry_t swp,
 556                                              struct vm_area_struct *vma,
 557                                              unsigned long addr)
 558 {
 559         return NULL;
 560 }
 561 
 562 static inline int add_to_swap(struct page *page)
 563 {
 564         return 0;
 565 }
 566 
 567 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
 568                                                         gfp_t gfp_mask)
 569 {
 570         return -1;
 571 }
 572 
 573 static inline void __delete_from_swap_cache(struct page *page,
 574                                                         swp_entry_t entry)
 575 {
 576 }
 577 
 578 static inline void delete_from_swap_cache(struct page *page)
 579 {
 580 }
 581 
 582 static inline int page_swapcount(struct page *page)
 583 {
 584         return 0;
 585 }
 586 
 587 static inline int __swap_count(swp_entry_t entry)
 588 {
 589         return 0;
 590 }
 591 
 592 static inline int __swp_swapcount(swp_entry_t entry)
 593 {
 594         return 0;
 595 }
 596 
 597 static inline int swp_swapcount(swp_entry_t entry)
 598 {
 599         return 0;
 600 }
 601 
 602 #define reuse_swap_page(page, total_map_swapcount) \
 603         (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
 604 
 605 static inline int try_to_free_swap(struct page *page)
 606 {
 607         return 0;
 608 }
 609 
 610 static inline swp_entry_t get_swap_page(struct page *page)
 611 {
 612         swp_entry_t entry;
 613         entry.val = 0;
 614         return entry;
 615 }
 616 
 617 #endif /* CONFIG_SWAP */
 618 
 619 #ifdef CONFIG_THP_SWAP
 620 extern int split_swap_cluster(swp_entry_t entry);
 621 #else
 622 static inline int split_swap_cluster(swp_entry_t entry)
 623 {
 624         return 0;
 625 }
 626 #endif
 627 
 628 #ifdef CONFIG_MEMCG
 629 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 630 {
 631         /* Cgroup2 doesn't have per-cgroup swappiness */
 632         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 633                 return vm_swappiness;
 634 
 635         /* root ? */
 636         if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
 637                 return vm_swappiness;
 638 
 639         return memcg->swappiness;
 640 }
 641 #else
 642 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
 643 {
 644         return vm_swappiness;
 645 }
 646 #endif
 647 
 648 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
 649 extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
 650                                          gfp_t gfp_mask);
 651 #else
 652 static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
 653                                                 int node, gfp_t gfp_mask)
 654 {
 655 }
 656 #endif
 657 
 658 #ifdef CONFIG_MEMCG_SWAP
 659 extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
 660 extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
 661 extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
 662 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
 663 extern bool mem_cgroup_swap_full(struct page *page);
 664 #else
 665 static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
 666 {
 667 }
 668 
 669 static inline int mem_cgroup_try_charge_swap(struct page *page,
 670                                              swp_entry_t entry)
 671 {
 672         return 0;
 673 }
 674 
 675 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
 676                                             unsigned int nr_pages)
 677 {
 678 }
 679 
 680 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 681 {
 682         return get_nr_swap_pages();
 683 }
 684 
 685 static inline bool mem_cgroup_swap_full(struct page *page)
 686 {
 687         return vm_swap_full();
 688 }
 689 #endif
 690 
 691 #endif /* __KERNEL__*/
 692 #endif /* _LINUX_SWAP_H */

/* [<][>][^][v][top][bottom][index][help] */