1/*
2 * zswap.c - zswap driver file
3 *
4 * zswap is a backend for frontswap that takes pages that are in the process
5 * of being swapped out and attempts to compress and store them in a
6 * RAM-based memory pool.  This can result in a significant I/O reduction on
7 * the swap device and, in the case where decompressing from RAM is faster
8 * than reading from the swap device, can also improve workload performance.
9 *
10 * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 * GNU General Public License for more details.
21*/
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/module.h>
26#include <linux/cpu.h>
27#include <linux/highmem.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <linux/types.h>
31#include <linux/atomic.h>
32#include <linux/frontswap.h>
33#include <linux/rbtree.h>
34#include <linux/swap.h>
35#include <linux/crypto.h>
36#include <linux/mempool.h>
37#include <linux/zpool.h>
38
39#include <linux/mm_types.h>
40#include <linux/page-flags.h>
41#include <linux/swapops.h>
42#include <linux/writeback.h>
43#include <linux/pagemap.h>
44
45/*********************************
46* statistics
47**********************************/
48/* Total bytes used by the compressed storage */
49static u64 zswap_pool_total_size;
50/* The number of compressed pages currently stored in zswap */
51static atomic_t zswap_stored_pages = ATOMIC_INIT(0);
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate.  However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
66/* Compressed page was too big for the allocator to (optimally) store */
67static u64 zswap_reject_compress_poor;
68/* Store failed because underlying allocator could not get memory */
69static u64 zswap_reject_alloc_fail;
70/* Store failed because the entry metadata could not be allocated (rare) */
71static u64 zswap_reject_kmemcache_fail;
72/* Duplicate store was encountered (rare) */
73static u64 zswap_duplicate_entry;
74
75/*********************************
76* tunables
77**********************************/
78/* Enable/disable zswap (disabled by default, fixed at boot for now) */
79static bool zswap_enabled __read_mostly;
80module_param_named(enabled, zswap_enabled, bool, 0444);
81
82/* Compressor to be used by zswap (fixed at boot for now) */
83#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
84static char *zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
85module_param_named(compressor, zswap_compressor, charp, 0444);
86
87/* The maximum percentage of memory that the compressed pool can occupy */
88static unsigned int zswap_max_pool_percent = 20;
89module_param_named(max_pool_percent,
90			zswap_max_pool_percent, uint, 0644);
91
92/* Compressed storage to use */
93#define ZSWAP_ZPOOL_DEFAULT "zbud"
94static char *zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
95module_param_named(zpool, zswap_zpool_type, charp, 0444);
96
97/* zpool is shared by all of zswap backend  */
98static struct zpool *zswap_pool;
99
100/*********************************
101* compression functions
102**********************************/
103/* per-cpu compression transforms */
104static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms;
105
106enum comp_op {
107	ZSWAP_COMPOP_COMPRESS,
108	ZSWAP_COMPOP_DECOMPRESS
109};
110
111static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen,
112				u8 *dst, unsigned int *dlen)
113{
114	struct crypto_comp *tfm;
115	int ret;
116
117	tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu());
118	switch (op) {
119	case ZSWAP_COMPOP_COMPRESS:
120		ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
121		break;
122	case ZSWAP_COMPOP_DECOMPRESS:
123		ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
124		break;
125	default:
126		ret = -EINVAL;
127	}
128
129	put_cpu();
130	return ret;
131}
132
133static int __init zswap_comp_init(void)
134{
135	if (!crypto_has_comp(zswap_compressor, 0, 0)) {
136		pr_info("%s compressor not available\n", zswap_compressor);
137		/* fall back to default compressor */
138		zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
139		if (!crypto_has_comp(zswap_compressor, 0, 0))
140			/* can't even load the default compressor */
141			return -ENODEV;
142	}
143	pr_info("using %s compressor\n", zswap_compressor);
144
145	/* alloc percpu transforms */
146	zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
147	if (!zswap_comp_pcpu_tfms)
148		return -ENOMEM;
149	return 0;
150}
151
152static void __init zswap_comp_exit(void)
153{
154	/* free percpu transforms */
155	free_percpu(zswap_comp_pcpu_tfms);
156}
157
158/*********************************
159* data structures
160**********************************/
161/*
162 * struct zswap_entry
163 *
164 * This structure contains the metadata for tracking a single compressed
165 * page within zswap.
166 *
167 * rbnode - links the entry into red-black tree for the appropriate swap type
168 * refcount - the number of outstanding reference to the entry. This is needed
169 *            to protect against premature freeing of the entry by code
170 *            concurrent calls to load, invalidate, and writeback.  The lock
171 *            for the zswap_tree structure that contains the entry must
172 *            be held while changing the refcount.  Since the lock must
173 *            be held, there is no reason to also make refcount atomic.
174 * offset - the swap offset for the entry.  Index into the red-black tree.
175 * handle - zpool allocation handle that stores the compressed page data
176 * length - the length in bytes of the compressed page data.  Needed during
177 *          decompression
178 */
179struct zswap_entry {
180	struct rb_node rbnode;
181	pgoff_t offset;
182	int refcount;
183	unsigned int length;
184	unsigned long handle;
185};
186
187struct zswap_header {
188	swp_entry_t swpentry;
189};
190
191/*
192 * The tree lock in the zswap_tree struct protects a few things:
193 * - the rbtree
194 * - the refcount field of each entry in the tree
195 */
196struct zswap_tree {
197	struct rb_root rbroot;
198	spinlock_t lock;
199};
200
201static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
202
203/*********************************
204* zswap entry functions
205**********************************/
206static struct kmem_cache *zswap_entry_cache;
207
208static int __init zswap_entry_cache_create(void)
209{
210	zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
211	return zswap_entry_cache == NULL;
212}
213
214static void __init zswap_entry_cache_destroy(void)
215{
216	kmem_cache_destroy(zswap_entry_cache);
217}
218
219static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
220{
221	struct zswap_entry *entry;
222	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
223	if (!entry)
224		return NULL;
225	entry->refcount = 1;
226	RB_CLEAR_NODE(&entry->rbnode);
227	return entry;
228}
229
230static void zswap_entry_cache_free(struct zswap_entry *entry)
231{
232	kmem_cache_free(zswap_entry_cache, entry);
233}
234
235/*********************************
236* rbtree functions
237**********************************/
238static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
239{
240	struct rb_node *node = root->rb_node;
241	struct zswap_entry *entry;
242
243	while (node) {
244		entry = rb_entry(node, struct zswap_entry, rbnode);
245		if (entry->offset > offset)
246			node = node->rb_left;
247		else if (entry->offset < offset)
248			node = node->rb_right;
249		else
250			return entry;
251	}
252	return NULL;
253}
254
255/*
256 * In the case that a entry with the same offset is found, a pointer to
257 * the existing entry is stored in dupentry and the function returns -EEXIST
258 */
259static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
260			struct zswap_entry **dupentry)
261{
262	struct rb_node **link = &root->rb_node, *parent = NULL;
263	struct zswap_entry *myentry;
264
265	while (*link) {
266		parent = *link;
267		myentry = rb_entry(parent, struct zswap_entry, rbnode);
268		if (myentry->offset > entry->offset)
269			link = &(*link)->rb_left;
270		else if (myentry->offset < entry->offset)
271			link = &(*link)->rb_right;
272		else {
273			*dupentry = myentry;
274			return -EEXIST;
275		}
276	}
277	rb_link_node(&entry->rbnode, parent, link);
278	rb_insert_color(&entry->rbnode, root);
279	return 0;
280}
281
282static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
283{
284	if (!RB_EMPTY_NODE(&entry->rbnode)) {
285		rb_erase(&entry->rbnode, root);
286		RB_CLEAR_NODE(&entry->rbnode);
287	}
288}
289
290/*
291 * Carries out the common pattern of freeing and entry's zpool allocation,
292 * freeing the entry itself, and decrementing the number of stored pages.
293 */
294static void zswap_free_entry(struct zswap_entry *entry)
295{
296	zpool_free(zswap_pool, entry->handle);
297	zswap_entry_cache_free(entry);
298	atomic_dec(&zswap_stored_pages);
299	zswap_pool_total_size = zpool_get_total_size(zswap_pool);
300}
301
302/* caller must hold the tree lock */
303static void zswap_entry_get(struct zswap_entry *entry)
304{
305	entry->refcount++;
306}
307
308/* caller must hold the tree lock
309* remove from the tree and free it, if nobody reference the entry
310*/
311static void zswap_entry_put(struct zswap_tree *tree,
312			struct zswap_entry *entry)
313{
314	int refcount = --entry->refcount;
315
316	BUG_ON(refcount < 0);
317	if (refcount == 0) {
318		zswap_rb_erase(&tree->rbroot, entry);
319		zswap_free_entry(entry);
320	}
321}
322
323/* caller must hold the tree lock */
324static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
325				pgoff_t offset)
326{
327	struct zswap_entry *entry = NULL;
328
329	entry = zswap_rb_search(root, offset);
330	if (entry)
331		zswap_entry_get(entry);
332
333	return entry;
334}
335
336/*********************************
337* per-cpu code
338**********************************/
339static DEFINE_PER_CPU(u8 *, zswap_dstmem);
340
341static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
342{
343	struct crypto_comp *tfm;
344	u8 *dst;
345
346	switch (action) {
347	case CPU_UP_PREPARE:
348		tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
349		if (IS_ERR(tfm)) {
350			pr_err("can't allocate compressor transform\n");
351			return NOTIFY_BAD;
352		}
353		*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
354		dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
355		if (!dst) {
356			pr_err("can't allocate compressor buffer\n");
357			crypto_free_comp(tfm);
358			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
359			return NOTIFY_BAD;
360		}
361		per_cpu(zswap_dstmem, cpu) = dst;
362		break;
363	case CPU_DEAD:
364	case CPU_UP_CANCELED:
365		tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
366		if (tfm) {
367			crypto_free_comp(tfm);
368			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
369		}
370		dst = per_cpu(zswap_dstmem, cpu);
371		kfree(dst);
372		per_cpu(zswap_dstmem, cpu) = NULL;
373		break;
374	default:
375		break;
376	}
377	return NOTIFY_OK;
378}
379
380static int zswap_cpu_notifier(struct notifier_block *nb,
381				unsigned long action, void *pcpu)
382{
383	unsigned long cpu = (unsigned long)pcpu;
384	return __zswap_cpu_notifier(action, cpu);
385}
386
387static struct notifier_block zswap_cpu_notifier_block = {
388	.notifier_call = zswap_cpu_notifier
389};
390
391static int __init zswap_cpu_init(void)
392{
393	unsigned long cpu;
394
395	cpu_notifier_register_begin();
396	for_each_online_cpu(cpu)
397		if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
398			goto cleanup;
399	__register_cpu_notifier(&zswap_cpu_notifier_block);
400	cpu_notifier_register_done();
401	return 0;
402
403cleanup:
404	for_each_online_cpu(cpu)
405		__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
406	cpu_notifier_register_done();
407	return -ENOMEM;
408}
409
410/*********************************
411* helpers
412**********************************/
413static bool zswap_is_full(void)
414{
415	return totalram_pages * zswap_max_pool_percent / 100 <
416		DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
417}
418
419/*********************************
420* writeback code
421**********************************/
422/* return enum for zswap_get_swap_cache_page */
423enum zswap_get_swap_ret {
424	ZSWAP_SWAPCACHE_NEW,
425	ZSWAP_SWAPCACHE_EXIST,
426	ZSWAP_SWAPCACHE_FAIL,
427};
428
429/*
430 * zswap_get_swap_cache_page
431 *
432 * This is an adaption of read_swap_cache_async()
433 *
434 * This function tries to find a page with the given swap entry
435 * in the swapper_space address space (the swap cache).  If the page
436 * is found, it is returned in retpage.  Otherwise, a page is allocated,
437 * added to the swap cache, and returned in retpage.
438 *
439 * If success, the swap cache page is returned in retpage
440 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
441 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
442 *     the new page is added to swapcache and locked
443 * Returns ZSWAP_SWAPCACHE_FAIL on error
444 */
445static int zswap_get_swap_cache_page(swp_entry_t entry,
446				struct page **retpage)
447{
448	struct page *found_page, *new_page = NULL;
449	struct address_space *swapper_space = swap_address_space(entry);
450	int err;
451
452	*retpage = NULL;
453	do {
454		/*
455		 * First check the swap cache.  Since this is normally
456		 * called after lookup_swap_cache() failed, re-calling
457		 * that would confuse statistics.
458		 */
459		found_page = find_get_page(swapper_space, entry.val);
460		if (found_page)
461			break;
462
463		/*
464		 * Get a new page to read into from swap.
465		 */
466		if (!new_page) {
467			new_page = alloc_page(GFP_KERNEL);
468			if (!new_page)
469				break; /* Out of memory */
470		}
471
472		/*
473		 * call radix_tree_preload() while we can wait.
474		 */
475		err = radix_tree_preload(GFP_KERNEL);
476		if (err)
477			break;
478
479		/*
480		 * Swap entry may have been freed since our caller observed it.
481		 */
482		err = swapcache_prepare(entry);
483		if (err == -EEXIST) { /* seems racy */
484			radix_tree_preload_end();
485			continue;
486		}
487		if (err) { /* swp entry is obsolete ? */
488			radix_tree_preload_end();
489			break;
490		}
491
492		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
493		__set_page_locked(new_page);
494		SetPageSwapBacked(new_page);
495		err = __add_to_swap_cache(new_page, entry);
496		if (likely(!err)) {
497			radix_tree_preload_end();
498			lru_cache_add_anon(new_page);
499			*retpage = new_page;
500			return ZSWAP_SWAPCACHE_NEW;
501		}
502		radix_tree_preload_end();
503		ClearPageSwapBacked(new_page);
504		__clear_page_locked(new_page);
505		/*
506		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
507		 * clear SWAP_HAS_CACHE flag.
508		 */
509		swapcache_free(entry);
510	} while (err != -ENOMEM);
511
512	if (new_page)
513		page_cache_release(new_page);
514	if (!found_page)
515		return ZSWAP_SWAPCACHE_FAIL;
516	*retpage = found_page;
517	return ZSWAP_SWAPCACHE_EXIST;
518}
519
520/*
521 * Attempts to free an entry by adding a page to the swap cache,
522 * decompressing the entry data into the page, and issuing a
523 * bio write to write the page back to the swap device.
524 *
525 * This can be thought of as a "resumed writeback" of the page
526 * to the swap device.  We are basically resuming the same swap
527 * writeback path that was intercepted with the frontswap_store()
528 * in the first place.  After the page has been decompressed into
529 * the swap cache, the compressed version stored by zswap can be
530 * freed.
531 */
532static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
533{
534	struct zswap_header *zhdr;
535	swp_entry_t swpentry;
536	struct zswap_tree *tree;
537	pgoff_t offset;
538	struct zswap_entry *entry;
539	struct page *page;
540	u8 *src, *dst;
541	unsigned int dlen;
542	int ret;
543	struct writeback_control wbc = {
544		.sync_mode = WB_SYNC_NONE,
545	};
546
547	/* extract swpentry from data */
548	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
549	swpentry = zhdr->swpentry; /* here */
550	zpool_unmap_handle(pool, handle);
551	tree = zswap_trees[swp_type(swpentry)];
552	offset = swp_offset(swpentry);
553
554	/* find and ref zswap entry */
555	spin_lock(&tree->lock);
556	entry = zswap_entry_find_get(&tree->rbroot, offset);
557	if (!entry) {
558		/* entry was invalidated */
559		spin_unlock(&tree->lock);
560		return 0;
561	}
562	spin_unlock(&tree->lock);
563	BUG_ON(offset != entry->offset);
564
565	/* try to allocate swap cache page */
566	switch (zswap_get_swap_cache_page(swpentry, &page)) {
567	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
568		ret = -ENOMEM;
569		goto fail;
570
571	case ZSWAP_SWAPCACHE_EXIST:
572		/* page is already in the swap cache, ignore for now */
573		page_cache_release(page);
574		ret = -EEXIST;
575		goto fail;
576
577	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
578		/* decompress */
579		dlen = PAGE_SIZE;
580		src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
581				ZPOOL_MM_RO) + sizeof(struct zswap_header);
582		dst = kmap_atomic(page);
583		ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
584				entry->length, dst, &dlen);
585		kunmap_atomic(dst);
586		zpool_unmap_handle(zswap_pool, entry->handle);
587		BUG_ON(ret);
588		BUG_ON(dlen != PAGE_SIZE);
589
590		/* page is up to date */
591		SetPageUptodate(page);
592	}
593
594	/* move it to the tail of the inactive list after end_writeback */
595	SetPageReclaim(page);
596
597	/* start writeback */
598	__swap_writepage(page, &wbc, end_swap_bio_write);
599	page_cache_release(page);
600	zswap_written_back_pages++;
601
602	spin_lock(&tree->lock);
603	/* drop local reference */
604	zswap_entry_put(tree, entry);
605
606	/*
607	* There are two possible situations for entry here:
608	* (1) refcount is 1(normal case),  entry is valid and on the tree
609	* (2) refcount is 0, entry is freed and not on the tree
610	*     because invalidate happened during writeback
611	*  search the tree and free the entry if find entry
612	*/
613	if (entry == zswap_rb_search(&tree->rbroot, offset))
614		zswap_entry_put(tree, entry);
615	spin_unlock(&tree->lock);
616
617	goto end;
618
619	/*
620	* if we get here due to ZSWAP_SWAPCACHE_EXIST
621	* a load may happening concurrently
622	* it is safe and okay to not free the entry
623	* if we free the entry in the following put
624	* it it either okay to return !0
625	*/
626fail:
627	spin_lock(&tree->lock);
628	zswap_entry_put(tree, entry);
629	spin_unlock(&tree->lock);
630
631end:
632	return ret;
633}
634
635/*********************************
636* frontswap hooks
637**********************************/
638/* attempts to compress and store an single page */
639static int zswap_frontswap_store(unsigned type, pgoff_t offset,
640				struct page *page)
641{
642	struct zswap_tree *tree = zswap_trees[type];
643	struct zswap_entry *entry, *dupentry;
644	int ret;
645	unsigned int dlen = PAGE_SIZE, len;
646	unsigned long handle;
647	char *buf;
648	u8 *src, *dst;
649	struct zswap_header *zhdr;
650
651	if (!tree) {
652		ret = -ENODEV;
653		goto reject;
654	}
655
656	/* reclaim space if needed */
657	if (zswap_is_full()) {
658		zswap_pool_limit_hit++;
659		if (zpool_shrink(zswap_pool, 1, NULL)) {
660			zswap_reject_reclaim_fail++;
661			ret = -ENOMEM;
662			goto reject;
663		}
664	}
665
666	/* allocate entry */
667	entry = zswap_entry_cache_alloc(GFP_KERNEL);
668	if (!entry) {
669		zswap_reject_kmemcache_fail++;
670		ret = -ENOMEM;
671		goto reject;
672	}
673
674	/* compress */
675	dst = get_cpu_var(zswap_dstmem);
676	src = kmap_atomic(page);
677	ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen);
678	kunmap_atomic(src);
679	if (ret) {
680		ret = -EINVAL;
681		goto freepage;
682	}
683
684	/* store */
685	len = dlen + sizeof(struct zswap_header);
686	ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN,
687		&handle);
688	if (ret == -ENOSPC) {
689		zswap_reject_compress_poor++;
690		goto freepage;
691	}
692	if (ret) {
693		zswap_reject_alloc_fail++;
694		goto freepage;
695	}
696	zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW);
697	zhdr->swpentry = swp_entry(type, offset);
698	buf = (u8 *)(zhdr + 1);
699	memcpy(buf, dst, dlen);
700	zpool_unmap_handle(zswap_pool, handle);
701	put_cpu_var(zswap_dstmem);
702
703	/* populate entry */
704	entry->offset = offset;
705	entry->handle = handle;
706	entry->length = dlen;
707
708	/* map */
709	spin_lock(&tree->lock);
710	do {
711		ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
712		if (ret == -EEXIST) {
713			zswap_duplicate_entry++;
714			/* remove from rbtree */
715			zswap_rb_erase(&tree->rbroot, dupentry);
716			zswap_entry_put(tree, dupentry);
717		}
718	} while (ret == -EEXIST);
719	spin_unlock(&tree->lock);
720
721	/* update stats */
722	atomic_inc(&zswap_stored_pages);
723	zswap_pool_total_size = zpool_get_total_size(zswap_pool);
724
725	return 0;
726
727freepage:
728	put_cpu_var(zswap_dstmem);
729	zswap_entry_cache_free(entry);
730reject:
731	return ret;
732}
733
734/*
735 * returns 0 if the page was successfully decompressed
736 * return -1 on entry not found or error
737*/
738static int zswap_frontswap_load(unsigned type, pgoff_t offset,
739				struct page *page)
740{
741	struct zswap_tree *tree = zswap_trees[type];
742	struct zswap_entry *entry;
743	u8 *src, *dst;
744	unsigned int dlen;
745	int ret;
746
747	/* find */
748	spin_lock(&tree->lock);
749	entry = zswap_entry_find_get(&tree->rbroot, offset);
750	if (!entry) {
751		/* entry was written back */
752		spin_unlock(&tree->lock);
753		return -1;
754	}
755	spin_unlock(&tree->lock);
756
757	/* decompress */
758	dlen = PAGE_SIZE;
759	src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
760			ZPOOL_MM_RO) + sizeof(struct zswap_header);
761	dst = kmap_atomic(page);
762	ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length,
763		dst, &dlen);
764	kunmap_atomic(dst);
765	zpool_unmap_handle(zswap_pool, entry->handle);
766	BUG_ON(ret);
767
768	spin_lock(&tree->lock);
769	zswap_entry_put(tree, entry);
770	spin_unlock(&tree->lock);
771
772	return 0;
773}
774
775/* frees an entry in zswap */
776static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
777{
778	struct zswap_tree *tree = zswap_trees[type];
779	struct zswap_entry *entry;
780
781	/* find */
782	spin_lock(&tree->lock);
783	entry = zswap_rb_search(&tree->rbroot, offset);
784	if (!entry) {
785		/* entry was written back */
786		spin_unlock(&tree->lock);
787		return;
788	}
789
790	/* remove from rbtree */
791	zswap_rb_erase(&tree->rbroot, entry);
792
793	/* drop the initial reference from entry creation */
794	zswap_entry_put(tree, entry);
795
796	spin_unlock(&tree->lock);
797}
798
799/* frees all zswap entries for the given swap type */
800static void zswap_frontswap_invalidate_area(unsigned type)
801{
802	struct zswap_tree *tree = zswap_trees[type];
803	struct zswap_entry *entry, *n;
804
805	if (!tree)
806		return;
807
808	/* walk the tree and free everything */
809	spin_lock(&tree->lock);
810	rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
811		zswap_free_entry(entry);
812	tree->rbroot = RB_ROOT;
813	spin_unlock(&tree->lock);
814	kfree(tree);
815	zswap_trees[type] = NULL;
816}
817
818static struct zpool_ops zswap_zpool_ops = {
819	.evict = zswap_writeback_entry
820};
821
822static void zswap_frontswap_init(unsigned type)
823{
824	struct zswap_tree *tree;
825
826	tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
827	if (!tree) {
828		pr_err("alloc failed, zswap disabled for swap type %d\n", type);
829		return;
830	}
831
832	tree->rbroot = RB_ROOT;
833	spin_lock_init(&tree->lock);
834	zswap_trees[type] = tree;
835}
836
837static struct frontswap_ops zswap_frontswap_ops = {
838	.store = zswap_frontswap_store,
839	.load = zswap_frontswap_load,
840	.invalidate_page = zswap_frontswap_invalidate_page,
841	.invalidate_area = zswap_frontswap_invalidate_area,
842	.init = zswap_frontswap_init
843};
844
845/*********************************
846* debugfs functions
847**********************************/
848#ifdef CONFIG_DEBUG_FS
849#include <linux/debugfs.h>
850
851static struct dentry *zswap_debugfs_root;
852
853static int __init zswap_debugfs_init(void)
854{
855	if (!debugfs_initialized())
856		return -ENODEV;
857
858	zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
859	if (!zswap_debugfs_root)
860		return -ENOMEM;
861
862	debugfs_create_u64("pool_limit_hit", S_IRUGO,
863			zswap_debugfs_root, &zswap_pool_limit_hit);
864	debugfs_create_u64("reject_reclaim_fail", S_IRUGO,
865			zswap_debugfs_root, &zswap_reject_reclaim_fail);
866	debugfs_create_u64("reject_alloc_fail", S_IRUGO,
867			zswap_debugfs_root, &zswap_reject_alloc_fail);
868	debugfs_create_u64("reject_kmemcache_fail", S_IRUGO,
869			zswap_debugfs_root, &zswap_reject_kmemcache_fail);
870	debugfs_create_u64("reject_compress_poor", S_IRUGO,
871			zswap_debugfs_root, &zswap_reject_compress_poor);
872	debugfs_create_u64("written_back_pages", S_IRUGO,
873			zswap_debugfs_root, &zswap_written_back_pages);
874	debugfs_create_u64("duplicate_entry", S_IRUGO,
875			zswap_debugfs_root, &zswap_duplicate_entry);
876	debugfs_create_u64("pool_total_size", S_IRUGO,
877			zswap_debugfs_root, &zswap_pool_total_size);
878	debugfs_create_atomic_t("stored_pages", S_IRUGO,
879			zswap_debugfs_root, &zswap_stored_pages);
880
881	return 0;
882}
883
884static void __exit zswap_debugfs_exit(void)
885{
886	debugfs_remove_recursive(zswap_debugfs_root);
887}
888#else
889static int __init zswap_debugfs_init(void)
890{
891	return 0;
892}
893
894static void __exit zswap_debugfs_exit(void) { }
895#endif
896
897/*********************************
898* module init and exit
899**********************************/
900static int __init init_zswap(void)
901{
902	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN;
903
904	if (!zswap_enabled)
905		return 0;
906
907	pr_info("loading zswap\n");
908
909	zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
910					&zswap_zpool_ops);
911	if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
912		pr_info("%s zpool not available\n", zswap_zpool_type);
913		zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
914		zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp,
915					&zswap_zpool_ops);
916	}
917	if (!zswap_pool) {
918		pr_err("%s zpool not available\n", zswap_zpool_type);
919		pr_err("zpool creation failed\n");
920		goto error;
921	}
922	pr_info("using %s pool\n", zswap_zpool_type);
923
924	if (zswap_entry_cache_create()) {
925		pr_err("entry cache creation failed\n");
926		goto cachefail;
927	}
928	if (zswap_comp_init()) {
929		pr_err("compressor initialization failed\n");
930		goto compfail;
931	}
932	if (zswap_cpu_init()) {
933		pr_err("per-cpu initialization failed\n");
934		goto pcpufail;
935	}
936
937	frontswap_register_ops(&zswap_frontswap_ops);
938	if (zswap_debugfs_init())
939		pr_warn("debugfs initialization failed\n");
940	return 0;
941pcpufail:
942	zswap_comp_exit();
943compfail:
944	zswap_entry_cache_destroy();
945cachefail:
946	zpool_destroy_pool(zswap_pool);
947error:
948	return -ENOMEM;
949}
950/* must be late so crypto has time to come up */
951late_initcall(init_zswap);
952
953MODULE_LICENSE("GPL");
954MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
955MODULE_DESCRIPTION("Compressed cache for swap pages");
956