1/*
2 * linux/mm/page_isolation.c
3 */
4
5#include <linux/mm.h>
6#include <linux/page-isolation.h>
7#include <linux/pageblock-flags.h>
8#include <linux/memory.h>
9#include <linux/hugetlb.h>
10#include "internal.h"
11
12static int set_migratetype_isolate(struct page *page,
13				bool skip_hwpoisoned_pages)
14{
15	struct zone *zone;
16	unsigned long flags, pfn;
17	struct memory_isolate_notify arg;
18	int notifier_ret;
19	int ret = -EBUSY;
20
21	zone = page_zone(page);
22
23	spin_lock_irqsave(&zone->lock, flags);
24
25	pfn = page_to_pfn(page);
26	arg.start_pfn = pfn;
27	arg.nr_pages = pageblock_nr_pages;
28	arg.pages_found = 0;
29
30	/*
31	 * It may be possible to isolate a pageblock even if the
32	 * migratetype is not MIGRATE_MOVABLE. The memory isolation
33	 * notifier chain is used by balloon drivers to return the
34	 * number of pages in a range that are held by the balloon
35	 * driver to shrink memory. If all the pages are accounted for
36	 * by balloons, are free, or on the LRU, isolation can continue.
37	 * Later, for example, when memory hotplug notifier runs, these
38	 * pages reported as "can be isolated" should be isolated(freed)
39	 * by the balloon driver through the memory notifier chain.
40	 */
41	notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
42	notifier_ret = notifier_to_errno(notifier_ret);
43	if (notifier_ret)
44		goto out;
45	/*
46	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
47	 * We just check MOVABLE pages.
48	 */
49	if (!has_unmovable_pages(zone, page, arg.pages_found,
50				 skip_hwpoisoned_pages))
51		ret = 0;
52
53	/*
54	 * immobile means "not-on-lru" paes. If immobile is larger than
55	 * removable-by-driver pages reported by notifier, we'll fail.
56	 */
57
58out:
59	if (!ret) {
60		unsigned long nr_pages;
61		int migratetype = get_pageblock_migratetype(page);
62
63		set_pageblock_migratetype(page, MIGRATE_ISOLATE);
64		zone->nr_isolate_pageblock++;
65		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
66
67		__mod_zone_freepage_state(zone, -nr_pages, migratetype);
68	}
69
70	spin_unlock_irqrestore(&zone->lock, flags);
71	if (!ret)
72		drain_all_pages(zone);
73	return ret;
74}
75
76static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
77{
78	struct zone *zone;
79	unsigned long flags, nr_pages;
80	struct page *isolated_page = NULL;
81	unsigned int order;
82	unsigned long page_idx, buddy_idx;
83	struct page *buddy;
84
85	zone = page_zone(page);
86	spin_lock_irqsave(&zone->lock, flags);
87	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
88		goto out;
89
90	/*
91	 * Because freepage with more than pageblock_order on isolated
92	 * pageblock is restricted to merge due to freepage counting problem,
93	 * it is possible that there is free buddy page.
94	 * move_freepages_block() doesn't care of merge so we need other
95	 * approach in order to merge them. Isolation and free will make
96	 * these pages to be merged.
97	 */
98	if (PageBuddy(page)) {
99		order = page_order(page);
100		if (order >= pageblock_order) {
101			page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
102			buddy_idx = __find_buddy_index(page_idx, order);
103			buddy = page + (buddy_idx - page_idx);
104
105			if (pfn_valid_within(page_to_pfn(buddy)) &&
106			    !is_migrate_isolate_page(buddy)) {
107				__isolate_free_page(page, order);
108				kernel_map_pages(page, (1 << order), 1);
109				set_page_refcounted(page);
110				isolated_page = page;
111			}
112		}
113	}
114
115	/*
116	 * If we isolate freepage with more than pageblock_order, there
117	 * should be no freepage in the range, so we could avoid costly
118	 * pageblock scanning for freepage moving.
119	 */
120	if (!isolated_page) {
121		nr_pages = move_freepages_block(zone, page, migratetype);
122		__mod_zone_freepage_state(zone, nr_pages, migratetype);
123	}
124	set_pageblock_migratetype(page, migratetype);
125	zone->nr_isolate_pageblock--;
126out:
127	spin_unlock_irqrestore(&zone->lock, flags);
128	if (isolated_page)
129		__free_pages(isolated_page, order);
130}
131
132static inline struct page *
133__first_valid_page(unsigned long pfn, unsigned long nr_pages)
134{
135	int i;
136	for (i = 0; i < nr_pages; i++)
137		if (pfn_valid_within(pfn + i))
138			break;
139	if (unlikely(i == nr_pages))
140		return NULL;
141	return pfn_to_page(pfn + i);
142}
143
144/*
145 * start_isolate_page_range() -- make page-allocation-type of range of pages
146 * to be MIGRATE_ISOLATE.
147 * @start_pfn: The lower PFN of the range to be isolated.
148 * @end_pfn: The upper PFN of the range to be isolated.
149 * @migratetype: migrate type to set in error recovery.
150 *
151 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
152 * the range will never be allocated. Any free pages and pages freed in the
153 * future will not be allocated again.
154 *
155 * start_pfn/end_pfn must be aligned to pageblock_order.
156 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
157 */
158int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
159			     unsigned migratetype, bool skip_hwpoisoned_pages)
160{
161	unsigned long pfn;
162	unsigned long undo_pfn;
163	struct page *page;
164
165	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
166	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
167
168	for (pfn = start_pfn;
169	     pfn < end_pfn;
170	     pfn += pageblock_nr_pages) {
171		page = __first_valid_page(pfn, pageblock_nr_pages);
172		if (page &&
173		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
174			undo_pfn = pfn;
175			goto undo;
176		}
177	}
178	return 0;
179undo:
180	for (pfn = start_pfn;
181	     pfn < undo_pfn;
182	     pfn += pageblock_nr_pages)
183		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
184
185	return -EBUSY;
186}
187
188/*
189 * Make isolated pages available again.
190 */
191int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
192			    unsigned migratetype)
193{
194	unsigned long pfn;
195	struct page *page;
196	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
197	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
198	for (pfn = start_pfn;
199	     pfn < end_pfn;
200	     pfn += pageblock_nr_pages) {
201		page = __first_valid_page(pfn, pageblock_nr_pages);
202		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
203			continue;
204		unset_migratetype_isolate(page, migratetype);
205	}
206	return 0;
207}
208/*
209 * Test all pages in the range is free(means isolated) or not.
210 * all pages in [start_pfn...end_pfn) must be in the same zone.
211 * zone->lock must be held before call this.
212 *
213 * Returns 1 if all pages in the range are isolated.
214 */
215static int
216__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
217				  bool skip_hwpoisoned_pages)
218{
219	struct page *page;
220
221	while (pfn < end_pfn) {
222		if (!pfn_valid_within(pfn)) {
223			pfn++;
224			continue;
225		}
226		page = pfn_to_page(pfn);
227		if (PageBuddy(page))
228			/*
229			 * If the page is on a free list, it has to be on
230			 * the correct MIGRATE_ISOLATE freelist. There is no
231			 * simple way to verify that as VM_BUG_ON(), though.
232			 */
233			pfn += 1 << page_order(page);
234		else if (skip_hwpoisoned_pages && PageHWPoison(page))
235			/* A HWPoisoned page cannot be also PageBuddy */
236			pfn++;
237		else
238			break;
239	}
240	if (pfn < end_pfn)
241		return 0;
242	return 1;
243}
244
245int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
246			bool skip_hwpoisoned_pages)
247{
248	unsigned long pfn, flags;
249	struct page *page;
250	struct zone *zone;
251	int ret;
252
253	/*
254	 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
255	 * are not aligned to pageblock_nr_pages.
256	 * Then we just check migratetype first.
257	 */
258	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
259		page = __first_valid_page(pfn, pageblock_nr_pages);
260		if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
261			break;
262	}
263	page = __first_valid_page(start_pfn, end_pfn - start_pfn);
264	if ((pfn < end_pfn) || !page)
265		return -EBUSY;
266	/* Check all pages are free or marked as ISOLATED */
267	zone = page_zone(page);
268	spin_lock_irqsave(&zone->lock, flags);
269	ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
270						skip_hwpoisoned_pages);
271	spin_unlock_irqrestore(&zone->lock, flags);
272	return ret ? 0 : -EBUSY;
273}
274
275struct page *alloc_migrate_target(struct page *page, unsigned long private,
276				  int **resultp)
277{
278	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
279
280	/*
281	 * TODO: allocate a destination hugepage from a nearest neighbor node,
282	 * accordance with memory policy of the user process if possible. For
283	 * now as a simple work-around, we use the next node for destination.
284	 */
285	if (PageHuge(page)) {
286		int node = next_online_node(page_to_nid(page));
287		if (node == MAX_NUMNODES)
288			node = first_online_node;
289		return alloc_huge_page_node(page_hstate(compound_head(page)),
290					    node);
291	}
292
293	if (PageHighMem(page))
294		gfp_mask |= __GFP_HIGHMEM;
295
296	return alloc_page(gfp_mask);
297}
298