This source file includes following definitions.
- zbud_zpool_evict
- zbud_zpool_create
- zbud_zpool_destroy
- zbud_zpool_malloc
- zbud_zpool_free
- zbud_zpool_shrink
- zbud_zpool_map
- zbud_zpool_unmap
- zbud_zpool_total_size
- size_to_chunks
- init_zbud_page
- free_zbud_page
- encode_handle
- handle_to_zbud_header
- num_free_chunks
- zbud_create_pool
- zbud_destroy_pool
- zbud_alloc
- zbud_free
- zbud_reclaim_page
- zbud_map
- zbud_unmap
- zbud_get_pool_size
- init_zbud
- exit_zbud
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/atomic.h>
48 #include <linux/list.h>
49 #include <linux/mm.h>
50 #include <linux/module.h>
51 #include <linux/preempt.h>
52 #include <linux/slab.h>
53 #include <linux/spinlock.h>
54 #include <linux/zbud.h>
55 #include <linux/zpool.h>
56
57
58
59
60
61
62
63
64
65
66
67
68
69 #define NCHUNKS_ORDER 6
70
71 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
72 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
73 #define ZHDR_SIZE_ALIGNED CHUNK_SIZE
74 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 struct zbud_pool {
95 spinlock_t lock;
96 struct list_head unbuddied[NCHUNKS];
97 struct list_head buddied;
98 struct list_head lru;
99 u64 pages_nr;
100 const struct zbud_ops *ops;
101 #ifdef CONFIG_ZPOOL
102 struct zpool *zpool;
103 const struct zpool_ops *zpool_ops;
104 #endif
105 };
106
107
108
109
110
111
112
113
114
115 struct zbud_header {
116 struct list_head buddy;
117 struct list_head lru;
118 unsigned int first_chunks;
119 unsigned int last_chunks;
120 bool under_reclaim;
121 };
122
123
124
125
126
127 #ifdef CONFIG_ZPOOL
128
129 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
130 {
131 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
132 return pool->zpool_ops->evict(pool->zpool, handle);
133 else
134 return -ENOENT;
135 }
136
137 static const struct zbud_ops zbud_zpool_ops = {
138 .evict = zbud_zpool_evict
139 };
140
141 static void *zbud_zpool_create(const char *name, gfp_t gfp,
142 const struct zpool_ops *zpool_ops,
143 struct zpool *zpool)
144 {
145 struct zbud_pool *pool;
146
147 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
148 if (pool) {
149 pool->zpool = zpool;
150 pool->zpool_ops = zpool_ops;
151 }
152 return pool;
153 }
154
155 static void zbud_zpool_destroy(void *pool)
156 {
157 zbud_destroy_pool(pool);
158 }
159
160 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
161 unsigned long *handle)
162 {
163 return zbud_alloc(pool, size, gfp, handle);
164 }
165 static void zbud_zpool_free(void *pool, unsigned long handle)
166 {
167 zbud_free(pool, handle);
168 }
169
170 static int zbud_zpool_shrink(void *pool, unsigned int pages,
171 unsigned int *reclaimed)
172 {
173 unsigned int total = 0;
174 int ret = -EINVAL;
175
176 while (total < pages) {
177 ret = zbud_reclaim_page(pool, 8);
178 if (ret < 0)
179 break;
180 total++;
181 }
182
183 if (reclaimed)
184 *reclaimed = total;
185
186 return ret;
187 }
188
189 static void *zbud_zpool_map(void *pool, unsigned long handle,
190 enum zpool_mapmode mm)
191 {
192 return zbud_map(pool, handle);
193 }
194 static void zbud_zpool_unmap(void *pool, unsigned long handle)
195 {
196 zbud_unmap(pool, handle);
197 }
198
199 static u64 zbud_zpool_total_size(void *pool)
200 {
201 return zbud_get_pool_size(pool) * PAGE_SIZE;
202 }
203
204 static struct zpool_driver zbud_zpool_driver = {
205 .type = "zbud",
206 .owner = THIS_MODULE,
207 .create = zbud_zpool_create,
208 .destroy = zbud_zpool_destroy,
209 .malloc = zbud_zpool_malloc,
210 .free = zbud_zpool_free,
211 .shrink = zbud_zpool_shrink,
212 .map = zbud_zpool_map,
213 .unmap = zbud_zpool_unmap,
214 .total_size = zbud_zpool_total_size,
215 };
216
217 MODULE_ALIAS("zpool-zbud");
218 #endif
219
220
221
222
223
224 enum buddy {
225 FIRST,
226 LAST
227 };
228
229
230 static int size_to_chunks(size_t size)
231 {
232 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
233 }
234
235 #define for_each_unbuddied_list(_iter, _begin) \
236 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
237
238
239 static struct zbud_header *init_zbud_page(struct page *page)
240 {
241 struct zbud_header *zhdr = page_address(page);
242 zhdr->first_chunks = 0;
243 zhdr->last_chunks = 0;
244 INIT_LIST_HEAD(&zhdr->buddy);
245 INIT_LIST_HEAD(&zhdr->lru);
246 zhdr->under_reclaim = 0;
247 return zhdr;
248 }
249
250
251 static void free_zbud_page(struct zbud_header *zhdr)
252 {
253 __free_page(virt_to_page(zhdr));
254 }
255
256
257
258
259
260 static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud)
261 {
262 unsigned long handle;
263
264
265
266
267
268
269
270 handle = (unsigned long)zhdr;
271 if (bud == FIRST)
272
273 handle += ZHDR_SIZE_ALIGNED;
274 else
275 handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
276 return handle;
277 }
278
279
280 static struct zbud_header *handle_to_zbud_header(unsigned long handle)
281 {
282 return (struct zbud_header *)(handle & PAGE_MASK);
283 }
284
285
286 static int num_free_chunks(struct zbud_header *zhdr)
287 {
288
289
290
291
292 return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
293 }
294
295
296
297
298
299
300
301
302
303
304
305
306 struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
307 {
308 struct zbud_pool *pool;
309 int i;
310
311 pool = kzalloc(sizeof(struct zbud_pool), gfp);
312 if (!pool)
313 return NULL;
314 spin_lock_init(&pool->lock);
315 for_each_unbuddied_list(i, 0)
316 INIT_LIST_HEAD(&pool->unbuddied[i]);
317 INIT_LIST_HEAD(&pool->buddied);
318 INIT_LIST_HEAD(&pool->lru);
319 pool->pages_nr = 0;
320 pool->ops = ops;
321 return pool;
322 }
323
324
325
326
327
328
329
330 void zbud_destroy_pool(struct zbud_pool *pool)
331 {
332 kfree(pool);
333 }
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
355 unsigned long *handle)
356 {
357 int chunks, i, freechunks;
358 struct zbud_header *zhdr = NULL;
359 enum buddy bud;
360 struct page *page;
361
362 if (!size || (gfp & __GFP_HIGHMEM))
363 return -EINVAL;
364 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
365 return -ENOSPC;
366 chunks = size_to_chunks(size);
367 spin_lock(&pool->lock);
368
369
370 zhdr = NULL;
371 for_each_unbuddied_list(i, chunks) {
372 if (!list_empty(&pool->unbuddied[i])) {
373 zhdr = list_first_entry(&pool->unbuddied[i],
374 struct zbud_header, buddy);
375 list_del(&zhdr->buddy);
376 if (zhdr->first_chunks == 0)
377 bud = FIRST;
378 else
379 bud = LAST;
380 goto found;
381 }
382 }
383
384
385 spin_unlock(&pool->lock);
386 page = alloc_page(gfp);
387 if (!page)
388 return -ENOMEM;
389 spin_lock(&pool->lock);
390 pool->pages_nr++;
391 zhdr = init_zbud_page(page);
392 bud = FIRST;
393
394 found:
395 if (bud == FIRST)
396 zhdr->first_chunks = chunks;
397 else
398 zhdr->last_chunks = chunks;
399
400 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) {
401
402 freechunks = num_free_chunks(zhdr);
403 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
404 } else {
405
406 list_add(&zhdr->buddy, &pool->buddied);
407 }
408
409
410 if (!list_empty(&zhdr->lru))
411 list_del(&zhdr->lru);
412 list_add(&zhdr->lru, &pool->lru);
413
414 *handle = encode_handle(zhdr, bud);
415 spin_unlock(&pool->lock);
416
417 return 0;
418 }
419
420
421
422
423
424
425
426
427
428
429
430 void zbud_free(struct zbud_pool *pool, unsigned long handle)
431 {
432 struct zbud_header *zhdr;
433 int freechunks;
434
435 spin_lock(&pool->lock);
436 zhdr = handle_to_zbud_header(handle);
437
438
439 if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK)
440 zhdr->last_chunks = 0;
441 else
442 zhdr->first_chunks = 0;
443
444 if (zhdr->under_reclaim) {
445
446 spin_unlock(&pool->lock);
447 return;
448 }
449
450
451 list_del(&zhdr->buddy);
452
453 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
454
455 list_del(&zhdr->lru);
456 free_zbud_page(zhdr);
457 pool->pages_nr--;
458 } else {
459
460 freechunks = num_free_chunks(zhdr);
461 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
462 }
463
464 spin_unlock(&pool->lock);
465 }
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
503 {
504 int i, ret, freechunks;
505 struct zbud_header *zhdr;
506 unsigned long first_handle = 0, last_handle = 0;
507
508 spin_lock(&pool->lock);
509 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
510 retries == 0) {
511 spin_unlock(&pool->lock);
512 return -EINVAL;
513 }
514 for (i = 0; i < retries; i++) {
515 zhdr = list_last_entry(&pool->lru, struct zbud_header, lru);
516 list_del(&zhdr->lru);
517 list_del(&zhdr->buddy);
518
519 zhdr->under_reclaim = true;
520
521
522
523
524 first_handle = 0;
525 last_handle = 0;
526 if (zhdr->first_chunks)
527 first_handle = encode_handle(zhdr, FIRST);
528 if (zhdr->last_chunks)
529 last_handle = encode_handle(zhdr, LAST);
530 spin_unlock(&pool->lock);
531
532
533 if (first_handle) {
534 ret = pool->ops->evict(pool, first_handle);
535 if (ret)
536 goto next;
537 }
538 if (last_handle) {
539 ret = pool->ops->evict(pool, last_handle);
540 if (ret)
541 goto next;
542 }
543 next:
544 spin_lock(&pool->lock);
545 zhdr->under_reclaim = false;
546 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
547
548
549
550
551 free_zbud_page(zhdr);
552 pool->pages_nr--;
553 spin_unlock(&pool->lock);
554 return 0;
555 } else if (zhdr->first_chunks == 0 ||
556 zhdr->last_chunks == 0) {
557
558 freechunks = num_free_chunks(zhdr);
559 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
560 } else {
561
562 list_add(&zhdr->buddy, &pool->buddied);
563 }
564
565
566 list_add(&zhdr->lru, &pool->lru);
567 }
568 spin_unlock(&pool->lock);
569 return -EAGAIN;
570 }
571
572
573
574
575
576
577
578
579
580
581
582
583
584 void *zbud_map(struct zbud_pool *pool, unsigned long handle)
585 {
586 return (void *)(handle);
587 }
588
589
590
591
592
593
594 void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
595 {
596 }
597
598
599
600
601
602
603
604
605 u64 zbud_get_pool_size(struct zbud_pool *pool)
606 {
607 return pool->pages_nr;
608 }
609
610 static int __init init_zbud(void)
611 {
612
613 BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED);
614 pr_info("loaded\n");
615
616 #ifdef CONFIG_ZPOOL
617 zpool_register_driver(&zbud_zpool_driver);
618 #endif
619
620 return 0;
621 }
622
623 static void __exit exit_zbud(void)
624 {
625 #ifdef CONFIG_ZPOOL
626 zpool_unregister_driver(&zbud_zpool_driver);
627 #endif
628
629 pr_info("unloaded\n");
630 }
631
632 module_init(init_zbud);
633 module_exit(exit_zbud);
634
635 MODULE_LICENSE("GPL");
636 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
637 MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");