1/*
2 * zbud.c
3 *
4 * Copyright (C) 2013, Seth Jennings, IBM
5 *
6 * Concepts based on zcache internal zbud allocator by Dan Magenheimer.
7 *
8 * zbud is an special purpose allocator for storing compressed pages.  Contrary
9 * to what its name may suggest, zbud is not a buddy allocator, but rather an
10 * allocator that "buddies" two compressed pages together in a single memory
11 * page.
12 *
13 * While this design limits storage density, it has simple and deterministic
14 * reclaim properties that make it preferable to a higher density approach when
15 * reclaim will be used.
16 *
17 * zbud works by storing compressed pages, or "zpages", together in pairs in a
18 * single memory page called a "zbud page".  The first buddy is "left
19 * justified" at the beginning of the zbud page, and the last buddy is "right
20 * justified" at the end of the zbud page.  The benefit is that if either
21 * buddy is freed, the freed buddy space, coalesced with whatever slack space
22 * that existed between the buddies, results in the largest possible free region
23 * within the zbud page.
24 *
25 * zbud also provides an attractive lower bound on density. The ratio of zpages
26 * to zbud pages can not be less than 1.  This ensures that zbud can never "do
27 * harm" by using more pages to store zpages than the uncompressed zpages would
28 * have used on their own.
29 *
30 * zbud pages are divided into "chunks".  The size of the chunks is fixed at
31 * compile time and determined by NCHUNKS_ORDER below.  Dividing zbud pages
32 * into chunks allows organizing unbuddied zbud pages into a manageable number
33 * of unbuddied lists according to the number of free chunks available in the
34 * zbud page.
35 *
36 * The zbud API differs from that of conventional allocators in that the
37 * allocation function, zbud_alloc(), returns an opaque handle to the user,
38 * not a dereferenceable pointer.  The user must map the handle using
39 * zbud_map() in order to get a usable pointer by which to access the
40 * allocation data and unmap the handle with zbud_unmap() when operations
41 * on the allocation data are complete.
42 */
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/atomic.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/module.h>
50#include <linux/preempt.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/zbud.h>
54#include <linux/zpool.h>
55
56/*****************
57 * Structures
58*****************/
59/*
60 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
61 * adjusting internal fragmentation.  It also determines the number of
62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
64 * in allocated page is occupied by zbud header, NCHUNKS will be calculated to
65 * 63 which shows the max number of free chunks in zbud page, also there will be
66 * 63 freelists per pool.
67 */
68#define NCHUNKS_ORDER	6
69
70#define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
71#define CHUNK_SIZE	(1 << CHUNK_SHIFT)
72#define ZHDR_SIZE_ALIGNED CHUNK_SIZE
73#define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
74
75/**
76 * struct zbud_pool - stores metadata for each zbud pool
77 * @lock:	protects all pool fields and first|last_chunk fields of any
78 *		zbud page in the pool
79 * @unbuddied:	array of lists tracking zbud pages that only contain one buddy;
80 *		the lists each zbud page is added to depends on the size of
81 *		its free region.
82 * @buddied:	list tracking the zbud pages that contain two buddies;
83 *		these zbud pages are full
84 * @lru:	list tracking the zbud pages in LRU order by most recently
85 *		added buddy.
86 * @pages_nr:	number of zbud pages in the pool.
87 * @ops:	pointer to a structure of user defined operations specified at
88 *		pool creation time.
89 *
90 * This structure is allocated at pool creation time and maintains metadata
91 * pertaining to a particular zbud pool.
92 */
93struct zbud_pool {
94	spinlock_t lock;
95	struct list_head unbuddied[NCHUNKS];
96	struct list_head buddied;
97	struct list_head lru;
98	u64 pages_nr;
99	struct zbud_ops *ops;
100};
101
102/*
103 * struct zbud_header - zbud page metadata occupying the first chunk of each
104 *			zbud page.
105 * @buddy:	links the zbud page into the unbuddied/buddied lists in the pool
106 * @lru:	links the zbud page into the lru list in the pool
107 * @first_chunks:	the size of the first buddy in chunks, 0 if free
108 * @last_chunks:	the size of the last buddy in chunks, 0 if free
109 */
110struct zbud_header {
111	struct list_head buddy;
112	struct list_head lru;
113	unsigned int first_chunks;
114	unsigned int last_chunks;
115	bool under_reclaim;
116};
117
118/*****************
119 * zpool
120 ****************/
121
122#ifdef CONFIG_ZPOOL
123
124static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
125{
126	return zpool_evict(pool, handle);
127}
128
129static struct zbud_ops zbud_zpool_ops = {
130	.evict =	zbud_zpool_evict
131};
132
133static void *zbud_zpool_create(char *name, gfp_t gfp,
134			struct zpool_ops *zpool_ops)
135{
136	return zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
137}
138
139static void zbud_zpool_destroy(void *pool)
140{
141	zbud_destroy_pool(pool);
142}
143
144static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
145			unsigned long *handle)
146{
147	return zbud_alloc(pool, size, gfp, handle);
148}
149static void zbud_zpool_free(void *pool, unsigned long handle)
150{
151	zbud_free(pool, handle);
152}
153
154static int zbud_zpool_shrink(void *pool, unsigned int pages,
155			unsigned int *reclaimed)
156{
157	unsigned int total = 0;
158	int ret = -EINVAL;
159
160	while (total < pages) {
161		ret = zbud_reclaim_page(pool, 8);
162		if (ret < 0)
163			break;
164		total++;
165	}
166
167	if (reclaimed)
168		*reclaimed = total;
169
170	return ret;
171}
172
173static void *zbud_zpool_map(void *pool, unsigned long handle,
174			enum zpool_mapmode mm)
175{
176	return zbud_map(pool, handle);
177}
178static void zbud_zpool_unmap(void *pool, unsigned long handle)
179{
180	zbud_unmap(pool, handle);
181}
182
183static u64 zbud_zpool_total_size(void *pool)
184{
185	return zbud_get_pool_size(pool) * PAGE_SIZE;
186}
187
188static struct zpool_driver zbud_zpool_driver = {
189	.type =		"zbud",
190	.owner =	THIS_MODULE,
191	.create =	zbud_zpool_create,
192	.destroy =	zbud_zpool_destroy,
193	.malloc =	zbud_zpool_malloc,
194	.free =		zbud_zpool_free,
195	.shrink =	zbud_zpool_shrink,
196	.map =		zbud_zpool_map,
197	.unmap =	zbud_zpool_unmap,
198	.total_size =	zbud_zpool_total_size,
199};
200
201MODULE_ALIAS("zpool-zbud");
202#endif /* CONFIG_ZPOOL */
203
204/*****************
205 * Helpers
206*****************/
207/* Just to make the code easier to read */
208enum buddy {
209	FIRST,
210	LAST
211};
212
213/* Converts an allocation size in bytes to size in zbud chunks */
214static int size_to_chunks(size_t size)
215{
216	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
217}
218
219#define for_each_unbuddied_list(_iter, _begin) \
220	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
221
222/* Initializes the zbud header of a newly allocated zbud page */
223static struct zbud_header *init_zbud_page(struct page *page)
224{
225	struct zbud_header *zhdr = page_address(page);
226	zhdr->first_chunks = 0;
227	zhdr->last_chunks = 0;
228	INIT_LIST_HEAD(&zhdr->buddy);
229	INIT_LIST_HEAD(&zhdr->lru);
230	zhdr->under_reclaim = 0;
231	return zhdr;
232}
233
234/* Resets the struct page fields and frees the page */
235static void free_zbud_page(struct zbud_header *zhdr)
236{
237	__free_page(virt_to_page(zhdr));
238}
239
240/*
241 * Encodes the handle of a particular buddy within a zbud page
242 * Pool lock should be held as this function accesses first|last_chunks
243 */
244static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
245{
246	unsigned long handle;
247
248	/*
249	 * For now, the encoded handle is actually just the pointer to the data
250	 * but this might not always be the case.  A little information hiding.
251	 * Add CHUNK_SIZE to the handle if it is the first allocation to jump
252	 * over the zbud header in the first chunk.
253	 */
254	handle = (unsigned long)zhdr;
255	if (bud == FIRST)
256		/* skip over zbud header */
257		handle += ZHDR_SIZE_ALIGNED;
258	else /* bud == LAST */
259		handle += PAGE_SIZE - (zhdr->last_chunks  << CHUNK_SHIFT);
260	return handle;
261}
262
263/* Returns the zbud page where a given handle is stored */
264static struct zbud_header *handle_to_zbud_header(unsigned long handle)
265{
266	return (struct zbud_header *)(handle & PAGE_MASK);
267}
268
269/* Returns the number of free chunks in a zbud page */
270static int num_free_chunks(struct zbud_header *zhdr)
271{
272	/*
273	 * Rather than branch for different situations, just use the fact that
274	 * free buddies have a length of zero to simplify everything.
275	 */
276	return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
277}
278
279/*****************
280 * API Functions
281*****************/
282/**
283 * zbud_create_pool() - create a new zbud pool
284 * @gfp:	gfp flags when allocating the zbud pool structure
285 * @ops:	user-defined operations for the zbud pool
286 *
287 * Return: pointer to the new zbud pool or NULL if the metadata allocation
288 * failed.
289 */
290struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops)
291{
292	struct zbud_pool *pool;
293	int i;
294
295	pool = kmalloc(sizeof(struct zbud_pool), gfp);
296	if (!pool)
297		return NULL;
298	spin_lock_init(&pool->lock);
299	for_each_unbuddied_list(i, 0)
300		INIT_LIST_HEAD(&pool->unbuddied[i]);
301	INIT_LIST_HEAD(&pool->buddied);
302	INIT_LIST_HEAD(&pool->lru);
303	pool->pages_nr = 0;
304	pool->ops = ops;
305	return pool;
306}
307
308/**
309 * zbud_destroy_pool() - destroys an existing zbud pool
310 * @pool:	the zbud pool to be destroyed
311 *
312 * The pool should be emptied before this function is called.
313 */
314void zbud_destroy_pool(struct zbud_pool *pool)
315{
316	kfree(pool);
317}
318
319/**
320 * zbud_alloc() - allocates a region of a given size
321 * @pool:	zbud pool from which to allocate
322 * @size:	size in bytes of the desired allocation
323 * @gfp:	gfp flags used if the pool needs to grow
324 * @handle:	handle of the new allocation
325 *
326 * This function will attempt to find a free region in the pool large enough to
327 * satisfy the allocation request.  A search of the unbuddied lists is
328 * performed first. If no suitable free region is found, then a new page is
329 * allocated and added to the pool to satisfy the request.
330 *
331 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
332 * as zbud pool pages.
333 *
334 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
335 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
336 * a new page.
337 */
338int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
339			unsigned long *handle)
340{
341	int chunks, i, freechunks;
342	struct zbud_header *zhdr = NULL;
343	enum buddy bud;
344	struct page *page;
345
346	if (!size || (gfp & __GFP_HIGHMEM))
347		return -EINVAL;
348	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
349		return -ENOSPC;
350	chunks = size_to_chunks(size);
351	spin_lock(&pool->lock);
352
353	/* First, try to find an unbuddied zbud page. */
354	zhdr = NULL;
355	for_each_unbuddied_list(i, chunks) {
356		if (!list_empty(&pool->unbuddied[i])) {
357			zhdr = list_first_entry(&pool->unbuddied[i],
358					struct zbud_header, buddy);
359			list_del(&zhdr->buddy);
360			if (zhdr->first_chunks == 0)
361				bud = FIRST;
362			else
363				bud = LAST;
364			goto found;
365		}
366	}
367
368	/* Couldn't find unbuddied zbud page, create new one */
369	spin_unlock(&pool->lock);
370	page = alloc_page(gfp);
371	if (!page)
372		return -ENOMEM;
373	spin_lock(&pool->lock);
374	pool->pages_nr++;
375	zhdr = init_zbud_page(page);
376	bud = FIRST;
377
378found:
379	if (bud == FIRST)
380		zhdr->first_chunks = chunks;
381	else
382		zhdr->last_chunks = chunks;
383
384	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
385		/* Add to unbuddied list */
386		freechunks = num_free_chunks(zhdr);
387		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
388	} else {
389		/* Add to buddied list */
390		list_add(&zhdr->buddy, &pool->buddied);
391	}
392
393	/* Add/move zbud page to beginning of LRU */
394	if (!list_empty(&zhdr->lru))
395		list_del(&zhdr->lru);
396	list_add(&zhdr->lru, &pool->lru);
397
398	*handle = encode_handle(zhdr, bud);
399	spin_unlock(&pool->lock);
400
401	return 0;
402}
403
404/**
405 * zbud_free() - frees the allocation associated with the given handle
406 * @pool:	pool in which the allocation resided
407 * @handle:	handle associated with the allocation returned by zbud_alloc()
408 *
409 * In the case that the zbud page in which the allocation resides is under
410 * reclaim, as indicated by the PG_reclaim flag being set, this function
411 * only sets the first|last_chunks to 0.  The page is actually freed
412 * once both buddies are evicted (see zbud_reclaim_page() below).
413 */
414void zbud_free(struct zbud_pool *pool, unsigned long handle)
415{
416	struct zbud_header *zhdr;
417	int freechunks;
418
419	spin_lock(&pool->lock);
420	zhdr = handle_to_zbud_header(handle);
421
422	/* If first buddy, handle will be page aligned */
423	if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
424		zhdr->last_chunks = 0;
425	else
426		zhdr->first_chunks = 0;
427
428	if (zhdr->under_reclaim) {
429		/* zbud page is under reclaim, reclaim will free */
430		spin_unlock(&pool->lock);
431		return;
432	}
433
434	/* Remove from existing buddy list */
435	list_del(&zhdr->buddy);
436
437	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
438		/* zbud page is empty, free */
439		list_del(&zhdr->lru);
440		free_zbud_page(zhdr);
441		pool->pages_nr--;
442	} else {
443		/* Add to unbuddied list */
444		freechunks = num_free_chunks(zhdr);
445		list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
446	}
447
448	spin_unlock(&pool->lock);
449}
450
451#define list_tail_entry(ptr, type, member) \
452	list_entry((ptr)->prev, type, member)
453
454/**
455 * zbud_reclaim_page() - evicts allocations from a pool page and frees it
456 * @pool:	pool from which a page will attempt to be evicted
457 * @retires:	number of pages on the LRU list for which eviction will
458 *		be attempted before failing
459 *
460 * zbud reclaim is different from normal system reclaim in that the reclaim is
461 * done from the bottom, up.  This is because only the bottom layer, zbud, has
462 * information on how the allocations are organized within each zbud page. This
463 * has the potential to create interesting locking situations between zbud and
464 * the user, however.
465 *
466 * To avoid these, this is how zbud_reclaim_page() should be called:
467
468 * The user detects a page should be reclaimed and calls zbud_reclaim_page().
469 * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
470 * the user-defined eviction handler with the pool and handle as arguments.
471 *
472 * If the handle can not be evicted, the eviction handler should return
473 * non-zero. zbud_reclaim_page() will add the zbud page back to the
474 * appropriate list and try the next zbud page on the LRU up to
475 * a user defined number of retries.
476 *
477 * If the handle is successfully evicted, the eviction handler should
478 * return 0 _and_ should have called zbud_free() on the handle. zbud_free()
479 * contains logic to delay freeing the page if the page is under reclaim,
480 * as indicated by the setting of the PG_reclaim flag on the underlying page.
481 *
482 * If all buddies in the zbud page are successfully evicted, then the
483 * zbud page can be freed.
484 *
485 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
486 * no pages to evict or an eviction handler is not registered, -EAGAIN if
487 * the retry limit was hit.
488 */
489int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
490{
491	int i, ret, freechunks;
492	struct zbud_header *zhdr;
493	unsigned long first_handle = 0, last_handle = 0;
494
495	spin_lock(&pool->lock);
496	if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
497			retries == 0) {
498		spin_unlock(&pool->lock);
499		return -EINVAL;
500	}
501	for (i = 0; i < retries; i++) {
502		zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru);
503		list_del(&zhdr->lru);
504		list_del(&zhdr->buddy);
505		/* Protect zbud page against free */
506		zhdr->under_reclaim = true;
507		/*
508		 * We need encode the handles before unlocking, since we can
509		 * race with free that will set (first|last)_chunks to 0
510		 */
511		first_handle = 0;
512		last_handle = 0;
513		if (zhdr->first_chunks)
514			first_handle = encode_handle(zhdr, FIRST);
515		if (zhdr->last_chunks)
516			last_handle = encode_handle(zhdr, LAST);
517		spin_unlock(&pool->lock);
518
519		/* Issue the eviction callback(s) */
520		if (first_handle) {
521			ret = pool->ops->evict(pool, first_handle);
522			if (ret)
523				goto next;
524		}
525		if (last_handle) {
526			ret = pool->ops->evict(pool, last_handle);
527			if (ret)
528				goto next;
529		}
530next:
531		spin_lock(&pool->lock);
532		zhdr->under_reclaim = false;
533		if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
534			/*
535			 * Both buddies are now free, free the zbud page and
536			 * return success.
537			 */
538			free_zbud_page(zhdr);
539			pool->pages_nr--;
540			spin_unlock(&pool->lock);
541			return 0;
542		} else if (zhdr->first_chunks == 0 ||
543				zhdr->last_chunks == 0) {
544			/* add to unbuddied list */
545			freechunks = num_free_chunks(zhdr);
546			list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
547		} else {
548			/* add to buddied list */
549			list_add(&zhdr->buddy, &pool->buddied);
550		}
551
552		/* add to beginning of LRU */
553		list_add(&zhdr->lru, &pool->lru);
554	}
555	spin_unlock(&pool->lock);
556	return -EAGAIN;
557}
558
559/**
560 * zbud_map() - maps the allocation associated with the given handle
561 * @pool:	pool in which the allocation resides
562 * @handle:	handle associated with the allocation to be mapped
563 *
564 * While trivial for zbud, the mapping functions for others allocators
565 * implementing this allocation API could have more complex information encoded
566 * in the handle and could create temporary mappings to make the data
567 * accessible to the user.
568 *
569 * Returns: a pointer to the mapped allocation
570 */
571void *zbud_map(struct zbud_pool *pool, unsigned long handle)
572{
573	return (void *)(handle);
574}
575
576/**
577 * zbud_unmap() - maps the allocation associated with the given handle
578 * @pool:	pool in which the allocation resides
579 * @handle:	handle associated with the allocation to be unmapped
580 */
581void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
582{
583}
584
585/**
586 * zbud_get_pool_size() - gets the zbud pool size in pages
587 * @pool:	pool whose size is being queried
588 *
589 * Returns: size in pages of the given pool.  The pool lock need not be
590 * taken to access pages_nr.
591 */
592u64 zbud_get_pool_size(struct zbud_pool *pool)
593{
594	return pool->pages_nr;
595}
596
597static int __init init_zbud(void)
598{
599	/* Make sure the zbud header will fit in one chunk */
600	BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
601	pr_info("loaded\n");
602
603#ifdef CONFIG_ZPOOL
604	zpool_register_driver(&zbud_zpool_driver);
605#endif
606
607	return 0;
608}
609
610static void __exit exit_zbud(void)
611{
612#ifdef CONFIG_ZPOOL
613	zpool_unregister_driver(&zbud_zpool_driver);
614#endif
615
616	pr_info("unloaded\n");
617}
618
619module_init(init_zbud);
620module_exit(exit_zbud);
621
622MODULE_LICENSE("GPL");
623MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
624MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
625