This source file includes following definitions.
- set_migratetype_isolate
- unset_migratetype_isolate
- __first_valid_page
- start_isolate_page_range
- undo_isolate_page_range
- __test_page_isolated_in_pageblock
- test_pages_isolated
- alloc_migrate_target
1
2
3
4
5
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17
18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19 {
20 struct zone *zone;
21 unsigned long flags, pfn;
22 struct memory_isolate_notify arg;
23 int notifier_ret;
24 int ret = -EBUSY;
25
26 zone = page_zone(page);
27
28 spin_lock_irqsave(&zone->lock, flags);
29
30
31
32
33
34
35 if (is_migrate_isolate_page(page))
36 goto out;
37
38 pfn = page_to_pfn(page);
39 arg.start_pfn = pfn;
40 arg.nr_pages = pageblock_nr_pages;
41 arg.pages_found = 0;
42
43
44
45
46
47
48
49
50
51
52
53
54 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
55 notifier_ret = notifier_to_errno(notifier_ret);
56 if (notifier_ret)
57 goto out;
58
59
60
61
62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 isol_flags))
64 ret = 0;
65
66
67
68
69
70
71 out:
72 if (!ret) {
73 unsigned long nr_pages;
74 int mt = get_pageblock_migratetype(page);
75
76 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
77 zone->nr_isolate_pageblock++;
78 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
79 NULL);
80
81 __mod_zone_freepage_state(zone, -nr_pages, mt);
82 }
83
84 spin_unlock_irqrestore(&zone->lock, flags);
85 if (!ret)
86 drain_all_pages(zone);
87 return ret;
88 }
89
90 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
91 {
92 struct zone *zone;
93 unsigned long flags, nr_pages;
94 bool isolated_page = false;
95 unsigned int order;
96 unsigned long pfn, buddy_pfn;
97 struct page *buddy;
98
99 zone = page_zone(page);
100 spin_lock_irqsave(&zone->lock, flags);
101 if (!is_migrate_isolate_page(page))
102 goto out;
103
104
105
106
107
108
109
110
111
112 if (PageBuddy(page)) {
113 order = page_order(page);
114 if (order >= pageblock_order) {
115 pfn = page_to_pfn(page);
116 buddy_pfn = __find_buddy_pfn(pfn, order);
117 buddy = page + (buddy_pfn - pfn);
118
119 if (pfn_valid_within(buddy_pfn) &&
120 !is_migrate_isolate_page(buddy)) {
121 __isolate_free_page(page, order);
122 isolated_page = true;
123 }
124 }
125 }
126
127
128
129
130
131
132 if (!isolated_page) {
133 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
134 __mod_zone_freepage_state(zone, nr_pages, migratetype);
135 }
136 set_pageblock_migratetype(page, migratetype);
137 zone->nr_isolate_pageblock--;
138 out:
139 spin_unlock_irqrestore(&zone->lock, flags);
140 if (isolated_page) {
141 post_alloc_hook(page, order, __GFP_MOVABLE);
142 __free_pages(page, order);
143 }
144 }
145
146 static inline struct page *
147 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
148 {
149 int i;
150
151 for (i = 0; i < nr_pages; i++) {
152 struct page *page;
153
154 page = pfn_to_online_page(pfn + i);
155 if (!page)
156 continue;
157 return page;
158 }
159 return NULL;
160 }
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
194 unsigned migratetype, int flags)
195 {
196 unsigned long pfn;
197 unsigned long undo_pfn;
198 struct page *page;
199 int nr_isolate_pageblock = 0;
200
201 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
202 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
203
204 for (pfn = start_pfn;
205 pfn < end_pfn;
206 pfn += pageblock_nr_pages) {
207 page = __first_valid_page(pfn, pageblock_nr_pages);
208 if (page) {
209 if (set_migratetype_isolate(page, migratetype, flags)) {
210 undo_pfn = pfn;
211 goto undo;
212 }
213 nr_isolate_pageblock++;
214 }
215 }
216 return nr_isolate_pageblock;
217 undo:
218 for (pfn = start_pfn;
219 pfn < undo_pfn;
220 pfn += pageblock_nr_pages) {
221 struct page *page = pfn_to_online_page(pfn);
222 if (!page)
223 continue;
224 unset_migratetype_isolate(page, migratetype);
225 }
226
227 return -EBUSY;
228 }
229
230
231
232
233 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
234 unsigned migratetype)
235 {
236 unsigned long pfn;
237 struct page *page;
238
239 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
240 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
241
242 for (pfn = start_pfn;
243 pfn < end_pfn;
244 pfn += pageblock_nr_pages) {
245 page = __first_valid_page(pfn, pageblock_nr_pages);
246 if (!page || !is_migrate_isolate_page(page))
247 continue;
248 unset_migratetype_isolate(page, migratetype);
249 }
250 }
251
252
253
254
255
256
257
258 static unsigned long
259 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
260 bool skip_hwpoisoned_pages)
261 {
262 struct page *page;
263
264 while (pfn < end_pfn) {
265 if (!pfn_valid_within(pfn)) {
266 pfn++;
267 continue;
268 }
269 page = pfn_to_page(pfn);
270 if (PageBuddy(page))
271
272
273
274
275
276 pfn += 1 << page_order(page);
277 else if (skip_hwpoisoned_pages && PageHWPoison(page))
278
279 pfn++;
280 else
281 break;
282 }
283
284 return pfn;
285 }
286
287
288 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
289 bool skip_hwpoisoned_pages)
290 {
291 unsigned long pfn, flags;
292 struct page *page;
293 struct zone *zone;
294
295
296
297
298
299
300 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
301 page = __first_valid_page(pfn, pageblock_nr_pages);
302 if (page && !is_migrate_isolate_page(page))
303 break;
304 }
305 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
306 if ((pfn < end_pfn) || !page)
307 return -EBUSY;
308
309 zone = page_zone(page);
310 spin_lock_irqsave(&zone->lock, flags);
311 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
312 skip_hwpoisoned_pages);
313 spin_unlock_irqrestore(&zone->lock, flags);
314
315 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
316
317 return pfn < end_pfn ? -EBUSY : 0;
318 }
319
320 struct page *alloc_migrate_target(struct page *page, unsigned long private)
321 {
322 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
323 }