This source file includes following definitions.
- ion_page_pool_alloc_pages
- ion_page_pool_free_pages
- ion_page_pool_add
- ion_page_pool_remove
- ion_page_pool_alloc
- ion_page_pool_free
- ion_page_pool_total
- ion_page_pool_shrink
- ion_page_pool_create
- ion_page_pool_destroy
1
2
3
4
5
6
7
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/swap.h>
11 #include <linux/sched/signal.h>
12
13 #include "ion.h"
14
15 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
16 {
17 if (fatal_signal_pending(current))
18 return NULL;
19 return alloc_pages(pool->gfp_mask, pool->order);
20 }
21
22 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
23 struct page *page)
24 {
25 __free_pages(page, pool->order);
26 }
27
28 static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
29 {
30 mutex_lock(&pool->mutex);
31 if (PageHighMem(page)) {
32 list_add_tail(&page->lru, &pool->high_items);
33 pool->high_count++;
34 } else {
35 list_add_tail(&page->lru, &pool->low_items);
36 pool->low_count++;
37 }
38
39 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
40 1 << pool->order);
41 mutex_unlock(&pool->mutex);
42 }
43
44 static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
45 {
46 struct page *page;
47
48 if (high) {
49 BUG_ON(!pool->high_count);
50 page = list_first_entry(&pool->high_items, struct page, lru);
51 pool->high_count--;
52 } else {
53 BUG_ON(!pool->low_count);
54 page = list_first_entry(&pool->low_items, struct page, lru);
55 pool->low_count--;
56 }
57
58 list_del(&page->lru);
59 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
60 -(1 << pool->order));
61 return page;
62 }
63
64 struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
65 {
66 struct page *page = NULL;
67
68 BUG_ON(!pool);
69
70 mutex_lock(&pool->mutex);
71 if (pool->high_count)
72 page = ion_page_pool_remove(pool, true);
73 else if (pool->low_count)
74 page = ion_page_pool_remove(pool, false);
75 mutex_unlock(&pool->mutex);
76
77 if (!page)
78 page = ion_page_pool_alloc_pages(pool);
79
80 return page;
81 }
82
83 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
84 {
85 BUG_ON(pool->order != compound_order(page));
86
87 ion_page_pool_add(pool, page);
88 }
89
90 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
91 {
92 int count = pool->low_count;
93
94 if (high)
95 count += pool->high_count;
96
97 return count << pool->order;
98 }
99
100 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
101 int nr_to_scan)
102 {
103 int freed = 0;
104 bool high;
105
106 if (current_is_kswapd())
107 high = true;
108 else
109 high = !!(gfp_mask & __GFP_HIGHMEM);
110
111 if (nr_to_scan == 0)
112 return ion_page_pool_total(pool, high);
113
114 while (freed < nr_to_scan) {
115 struct page *page;
116
117 mutex_lock(&pool->mutex);
118 if (pool->low_count) {
119 page = ion_page_pool_remove(pool, false);
120 } else if (high && pool->high_count) {
121 page = ion_page_pool_remove(pool, true);
122 } else {
123 mutex_unlock(&pool->mutex);
124 break;
125 }
126 mutex_unlock(&pool->mutex);
127 ion_page_pool_free_pages(pool, page);
128 freed += (1 << pool->order);
129 }
130
131 return freed;
132 }
133
134 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
135 {
136 struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
137
138 if (!pool)
139 return NULL;
140 pool->high_count = 0;
141 pool->low_count = 0;
142 INIT_LIST_HEAD(&pool->low_items);
143 INIT_LIST_HEAD(&pool->high_items);
144 pool->gfp_mask = gfp_mask | __GFP_COMP;
145 pool->order = order;
146 mutex_init(&pool->mutex);
147 plist_node_init(&pool->list, order);
148
149 return pool;
150 }
151
152 void ion_page_pool_destroy(struct ion_page_pool *pool)
153 {
154 kfree(pool);
155 }