1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp.	June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 *      This program is free software; you can redistribute it and/or
8 *      modify it under the terms of the GNU General Public License
9 *      as published by the Free Software Foundation; either version
10 *      2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/memblock.h>
22
23#include <asm-generic/sections.h>
24#include <linux/io.h>
25
26#include "internal.h"
27
28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32#endif
33
34struct memblock memblock __initdata_memblock = {
35	.memory.regions		= memblock_memory_init_regions,
36	.memory.cnt		= 1,	/* empty dummy entry */
37	.memory.max		= INIT_MEMBLOCK_REGIONS,
38
39	.reserved.regions	= memblock_reserved_init_regions,
40	.reserved.cnt		= 1,	/* empty dummy entry */
41	.reserved.max		= INIT_MEMBLOCK_REGIONS,
42
43#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44	.physmem.regions	= memblock_physmem_init_regions,
45	.physmem.cnt		= 1,	/* empty dummy entry */
46	.physmem.max		= INIT_PHYSMEM_REGIONS,
47#endif
48
49	.bottom_up		= false,
50	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
51};
52
53int memblock_debug __initdata_memblock;
54#ifdef CONFIG_MOVABLE_NODE
55bool movable_node_enabled __initdata_memblock = false;
56#endif
57static int memblock_can_resize __initdata_memblock;
58static int memblock_memory_in_slab __initdata_memblock = 0;
59static int memblock_reserved_in_slab __initdata_memblock = 0;
60
61/* inline so we don't get a warning when pr_debug is compiled out */
62static __init_memblock const char *
63memblock_type_name(struct memblock_type *type)
64{
65	if (type == &memblock.memory)
66		return "memory";
67	else if (type == &memblock.reserved)
68		return "reserved";
69	else
70		return "unknown";
71}
72
73/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
74static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
75{
76	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
77}
78
79/*
80 * Address comparison utilities
81 */
82static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
83				       phys_addr_t base2, phys_addr_t size2)
84{
85	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
86}
87
88static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
89					phys_addr_t base, phys_addr_t size)
90{
91	unsigned long i;
92
93	for (i = 0; i < type->cnt; i++) {
94		phys_addr_t rgnbase = type->regions[i].base;
95		phys_addr_t rgnsize = type->regions[i].size;
96		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
97			break;
98	}
99
100	return (i < type->cnt) ? i : -1;
101}
102
103/*
104 * __memblock_find_range_bottom_up - find free area utility in bottom-up
105 * @start: start of candidate range
106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
107 * @size: size of free area to find
108 * @align: alignment of free area to find
109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
110 *
111 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
112 *
113 * RETURNS:
114 * Found address on success, 0 on failure.
115 */
116static phys_addr_t __init_memblock
117__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
118				phys_addr_t size, phys_addr_t align, int nid)
119{
120	phys_addr_t this_start, this_end, cand;
121	u64 i;
122
123	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
124		this_start = clamp(this_start, start, end);
125		this_end = clamp(this_end, start, end);
126
127		cand = round_up(this_start, align);
128		if (cand < this_end && this_end - cand >= size)
129			return cand;
130	}
131
132	return 0;
133}
134
135/**
136 * __memblock_find_range_top_down - find free area utility, in top-down
137 * @start: start of candidate range
138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
139 * @size: size of free area to find
140 * @align: alignment of free area to find
141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
142 *
143 * Utility called from memblock_find_in_range_node(), find free area top-down.
144 *
145 * RETURNS:
146 * Found address on success, 0 on failure.
147 */
148static phys_addr_t __init_memblock
149__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
150			       phys_addr_t size, phys_addr_t align, int nid)
151{
152	phys_addr_t this_start, this_end, cand;
153	u64 i;
154
155	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
156		this_start = clamp(this_start, start, end);
157		this_end = clamp(this_end, start, end);
158
159		if (this_end < size)
160			continue;
161
162		cand = round_down(this_end - size, align);
163		if (cand >= this_start)
164			return cand;
165	}
166
167	return 0;
168}
169
170/**
171 * memblock_find_in_range_node - find free area in given range and node
172 * @size: size of free area to find
173 * @align: alignment of free area to find
174 * @start: start of candidate range
175 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
176 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
177 *
178 * Find @size free area aligned to @align in the specified range and node.
179 *
180 * When allocation direction is bottom-up, the @start should be greater
181 * than the end of the kernel image. Otherwise, it will be trimmed. The
182 * reason is that we want the bottom-up allocation just near the kernel
183 * image so it is highly likely that the allocated memory and the kernel
184 * will reside in the same node.
185 *
186 * If bottom-up allocation failed, will try to allocate memory top-down.
187 *
188 * RETURNS:
189 * Found address on success, 0 on failure.
190 */
191phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
192					phys_addr_t align, phys_addr_t start,
193					phys_addr_t end, int nid)
194{
195	phys_addr_t kernel_end, ret;
196
197	/* pump up @end */
198	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
199		end = memblock.current_limit;
200
201	/* avoid allocating the first page */
202	start = max_t(phys_addr_t, start, PAGE_SIZE);
203	end = max(start, end);
204	kernel_end = __pa_symbol(_end);
205
206	/*
207	 * try bottom-up allocation only when bottom-up mode
208	 * is set and @end is above the kernel image.
209	 */
210	if (memblock_bottom_up() && end > kernel_end) {
211		phys_addr_t bottom_up_start;
212
213		/* make sure we will allocate above the kernel */
214		bottom_up_start = max(start, kernel_end);
215
216		/* ok, try bottom-up allocation first */
217		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
218						      size, align, nid);
219		if (ret)
220			return ret;
221
222		/*
223		 * we always limit bottom-up allocation above the kernel,
224		 * but top-down allocation doesn't have the limit, so
225		 * retrying top-down allocation may succeed when bottom-up
226		 * allocation failed.
227		 *
228		 * bottom-up allocation is expected to be fail very rarely,
229		 * so we use WARN_ONCE() here to see the stack trace if
230		 * fail happens.
231		 */
232		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
233			     "memory hotunplug may be affected\n");
234	}
235
236	return __memblock_find_range_top_down(start, end, size, align, nid);
237}
238
239/**
240 * memblock_find_in_range - find free area in given range
241 * @start: start of candidate range
242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
243 * @size: size of free area to find
244 * @align: alignment of free area to find
245 *
246 * Find @size free area aligned to @align in the specified range.
247 *
248 * RETURNS:
249 * Found address on success, 0 on failure.
250 */
251phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
252					phys_addr_t end, phys_addr_t size,
253					phys_addr_t align)
254{
255	return memblock_find_in_range_node(size, align, start, end,
256					    NUMA_NO_NODE);
257}
258
259static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
260{
261	type->total_size -= type->regions[r].size;
262	memmove(&type->regions[r], &type->regions[r + 1],
263		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
264	type->cnt--;
265
266	/* Special case for empty arrays */
267	if (type->cnt == 0) {
268		WARN_ON(type->total_size != 0);
269		type->cnt = 1;
270		type->regions[0].base = 0;
271		type->regions[0].size = 0;
272		type->regions[0].flags = 0;
273		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
274	}
275}
276
277#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
278
279phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
280					phys_addr_t *addr)
281{
282	if (memblock.reserved.regions == memblock_reserved_init_regions)
283		return 0;
284
285	*addr = __pa(memblock.reserved.regions);
286
287	return PAGE_ALIGN(sizeof(struct memblock_region) *
288			  memblock.reserved.max);
289}
290
291phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
292					phys_addr_t *addr)
293{
294	if (memblock.memory.regions == memblock_memory_init_regions)
295		return 0;
296
297	*addr = __pa(memblock.memory.regions);
298
299	return PAGE_ALIGN(sizeof(struct memblock_region) *
300			  memblock.memory.max);
301}
302
303#endif
304
305/**
306 * memblock_double_array - double the size of the memblock regions array
307 * @type: memblock type of the regions array being doubled
308 * @new_area_start: starting address of memory range to avoid overlap with
309 * @new_area_size: size of memory range to avoid overlap with
310 *
311 * Double the size of the @type regions array. If memblock is being used to
312 * allocate memory for a new reserved regions array and there is a previously
313 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
314 * waiting to be reserved, ensure the memory used by the new array does
315 * not overlap.
316 *
317 * RETURNS:
318 * 0 on success, -1 on failure.
319 */
320static int __init_memblock memblock_double_array(struct memblock_type *type,
321						phys_addr_t new_area_start,
322						phys_addr_t new_area_size)
323{
324	struct memblock_region *new_array, *old_array;
325	phys_addr_t old_alloc_size, new_alloc_size;
326	phys_addr_t old_size, new_size, addr;
327	int use_slab = slab_is_available();
328	int *in_slab;
329
330	/* We don't allow resizing until we know about the reserved regions
331	 * of memory that aren't suitable for allocation
332	 */
333	if (!memblock_can_resize)
334		return -1;
335
336	/* Calculate new doubled size */
337	old_size = type->max * sizeof(struct memblock_region);
338	new_size = old_size << 1;
339	/*
340	 * We need to allocated new one align to PAGE_SIZE,
341	 *   so we can free them completely later.
342	 */
343	old_alloc_size = PAGE_ALIGN(old_size);
344	new_alloc_size = PAGE_ALIGN(new_size);
345
346	/* Retrieve the slab flag */
347	if (type == &memblock.memory)
348		in_slab = &memblock_memory_in_slab;
349	else
350		in_slab = &memblock_reserved_in_slab;
351
352	/* Try to find some space for it.
353	 *
354	 * WARNING: We assume that either slab_is_available() and we use it or
355	 * we use MEMBLOCK for allocations. That means that this is unsafe to
356	 * use when bootmem is currently active (unless bootmem itself is
357	 * implemented on top of MEMBLOCK which isn't the case yet)
358	 *
359	 * This should however not be an issue for now, as we currently only
360	 * call into MEMBLOCK while it's still active, or much later when slab
361	 * is active for memory hotplug operations
362	 */
363	if (use_slab) {
364		new_array = kmalloc(new_size, GFP_KERNEL);
365		addr = new_array ? __pa(new_array) : 0;
366	} else {
367		/* only exclude range when trying to double reserved.regions */
368		if (type != &memblock.reserved)
369			new_area_start = new_area_size = 0;
370
371		addr = memblock_find_in_range(new_area_start + new_area_size,
372						memblock.current_limit,
373						new_alloc_size, PAGE_SIZE);
374		if (!addr && new_area_size)
375			addr = memblock_find_in_range(0,
376				min(new_area_start, memblock.current_limit),
377				new_alloc_size, PAGE_SIZE);
378
379		new_array = addr ? __va(addr) : NULL;
380	}
381	if (!addr) {
382		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
383		       memblock_type_name(type), type->max, type->max * 2);
384		return -1;
385	}
386
387	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
388			memblock_type_name(type), type->max * 2, (u64)addr,
389			(u64)addr + new_size - 1);
390
391	/*
392	 * Found space, we now need to move the array over before we add the
393	 * reserved region since it may be our reserved array itself that is
394	 * full.
395	 */
396	memcpy(new_array, type->regions, old_size);
397	memset(new_array + type->max, 0, old_size);
398	old_array = type->regions;
399	type->regions = new_array;
400	type->max <<= 1;
401
402	/* Free old array. We needn't free it if the array is the static one */
403	if (*in_slab)
404		kfree(old_array);
405	else if (old_array != memblock_memory_init_regions &&
406		 old_array != memblock_reserved_init_regions)
407		memblock_free(__pa(old_array), old_alloc_size);
408
409	/*
410	 * Reserve the new array if that comes from the memblock.  Otherwise, we
411	 * needn't do it
412	 */
413	if (!use_slab)
414		BUG_ON(memblock_reserve(addr, new_alloc_size));
415
416	/* Update slab flag */
417	*in_slab = use_slab;
418
419	return 0;
420}
421
422/**
423 * memblock_merge_regions - merge neighboring compatible regions
424 * @type: memblock type to scan
425 *
426 * Scan @type and merge neighboring compatible regions.
427 */
428static void __init_memblock memblock_merge_regions(struct memblock_type *type)
429{
430	int i = 0;
431
432	/* cnt never goes below 1 */
433	while (i < type->cnt - 1) {
434		struct memblock_region *this = &type->regions[i];
435		struct memblock_region *next = &type->regions[i + 1];
436
437		if (this->base + this->size != next->base ||
438		    memblock_get_region_node(this) !=
439		    memblock_get_region_node(next) ||
440		    this->flags != next->flags) {
441			BUG_ON(this->base + this->size > next->base);
442			i++;
443			continue;
444		}
445
446		this->size += next->size;
447		/* move forward from next + 1, index of which is i + 2 */
448		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
449		type->cnt--;
450	}
451}
452
453/**
454 * memblock_insert_region - insert new memblock region
455 * @type:	memblock type to insert into
456 * @idx:	index for the insertion point
457 * @base:	base address of the new region
458 * @size:	size of the new region
459 * @nid:	node id of the new region
460 * @flags:	flags of the new region
461 *
462 * Insert new memblock region [@base,@base+@size) into @type at @idx.
463 * @type must already have extra room to accomodate the new region.
464 */
465static void __init_memblock memblock_insert_region(struct memblock_type *type,
466						   int idx, phys_addr_t base,
467						   phys_addr_t size,
468						   int nid, unsigned long flags)
469{
470	struct memblock_region *rgn = &type->regions[idx];
471
472	BUG_ON(type->cnt >= type->max);
473	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
474	rgn->base = base;
475	rgn->size = size;
476	rgn->flags = flags;
477	memblock_set_region_node(rgn, nid);
478	type->cnt++;
479	type->total_size += size;
480}
481
482/**
483 * memblock_add_range - add new memblock region
484 * @type: memblock type to add new region into
485 * @base: base address of the new region
486 * @size: size of the new region
487 * @nid: nid of the new region
488 * @flags: flags of the new region
489 *
490 * Add new memblock region [@base,@base+@size) into @type.  The new region
491 * is allowed to overlap with existing ones - overlaps don't affect already
492 * existing regions.  @type is guaranteed to be minimal (all neighbouring
493 * compatible regions are merged) after the addition.
494 *
495 * RETURNS:
496 * 0 on success, -errno on failure.
497 */
498int __init_memblock memblock_add_range(struct memblock_type *type,
499				phys_addr_t base, phys_addr_t size,
500				int nid, unsigned long flags)
501{
502	bool insert = false;
503	phys_addr_t obase = base;
504	phys_addr_t end = base + memblock_cap_size(base, &size);
505	int i, nr_new;
506
507	if (!size)
508		return 0;
509
510	/* special case for empty array */
511	if (type->regions[0].size == 0) {
512		WARN_ON(type->cnt != 1 || type->total_size);
513		type->regions[0].base = base;
514		type->regions[0].size = size;
515		type->regions[0].flags = flags;
516		memblock_set_region_node(&type->regions[0], nid);
517		type->total_size = size;
518		return 0;
519	}
520repeat:
521	/*
522	 * The following is executed twice.  Once with %false @insert and
523	 * then with %true.  The first counts the number of regions needed
524	 * to accomodate the new area.  The second actually inserts them.
525	 */
526	base = obase;
527	nr_new = 0;
528
529	for (i = 0; i < type->cnt; i++) {
530		struct memblock_region *rgn = &type->regions[i];
531		phys_addr_t rbase = rgn->base;
532		phys_addr_t rend = rbase + rgn->size;
533
534		if (rbase >= end)
535			break;
536		if (rend <= base)
537			continue;
538		/*
539		 * @rgn overlaps.  If it separates the lower part of new
540		 * area, insert that portion.
541		 */
542		if (rbase > base) {
543			nr_new++;
544			if (insert)
545				memblock_insert_region(type, i++, base,
546						       rbase - base, nid,
547						       flags);
548		}
549		/* area below @rend is dealt with, forget about it */
550		base = min(rend, end);
551	}
552
553	/* insert the remaining portion */
554	if (base < end) {
555		nr_new++;
556		if (insert)
557			memblock_insert_region(type, i, base, end - base,
558					       nid, flags);
559	}
560
561	/*
562	 * If this was the first round, resize array and repeat for actual
563	 * insertions; otherwise, merge and return.
564	 */
565	if (!insert) {
566		while (type->cnt + nr_new > type->max)
567			if (memblock_double_array(type, obase, size) < 0)
568				return -ENOMEM;
569		insert = true;
570		goto repeat;
571	} else {
572		memblock_merge_regions(type);
573		return 0;
574	}
575}
576
577int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
578				       int nid)
579{
580	return memblock_add_range(&memblock.memory, base, size, nid, 0);
581}
582
583static int __init_memblock memblock_add_region(phys_addr_t base,
584						phys_addr_t size,
585						int nid,
586						unsigned long flags)
587{
588	struct memblock_type *_rgn = &memblock.memory;
589
590	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
591		     (unsigned long long)base,
592		     (unsigned long long)base + size - 1,
593		     flags, (void *)_RET_IP_);
594
595	return memblock_add_range(_rgn, base, size, nid, flags);
596}
597
598int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
599{
600	return memblock_add_region(base, size, MAX_NUMNODES, 0);
601}
602
603/**
604 * memblock_isolate_range - isolate given range into disjoint memblocks
605 * @type: memblock type to isolate range for
606 * @base: base of range to isolate
607 * @size: size of range to isolate
608 * @start_rgn: out parameter for the start of isolated region
609 * @end_rgn: out parameter for the end of isolated region
610 *
611 * Walk @type and ensure that regions don't cross the boundaries defined by
612 * [@base,@base+@size).  Crossing regions are split at the boundaries,
613 * which may create at most two more regions.  The index of the first
614 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
615 *
616 * RETURNS:
617 * 0 on success, -errno on failure.
618 */
619static int __init_memblock memblock_isolate_range(struct memblock_type *type,
620					phys_addr_t base, phys_addr_t size,
621					int *start_rgn, int *end_rgn)
622{
623	phys_addr_t end = base + memblock_cap_size(base, &size);
624	int i;
625
626	*start_rgn = *end_rgn = 0;
627
628	if (!size)
629		return 0;
630
631	/* we'll create at most two more regions */
632	while (type->cnt + 2 > type->max)
633		if (memblock_double_array(type, base, size) < 0)
634			return -ENOMEM;
635
636	for (i = 0; i < type->cnt; i++) {
637		struct memblock_region *rgn = &type->regions[i];
638		phys_addr_t rbase = rgn->base;
639		phys_addr_t rend = rbase + rgn->size;
640
641		if (rbase >= end)
642			break;
643		if (rend <= base)
644			continue;
645
646		if (rbase < base) {
647			/*
648			 * @rgn intersects from below.  Split and continue
649			 * to process the next region - the new top half.
650			 */
651			rgn->base = base;
652			rgn->size -= base - rbase;
653			type->total_size -= base - rbase;
654			memblock_insert_region(type, i, rbase, base - rbase,
655					       memblock_get_region_node(rgn),
656					       rgn->flags);
657		} else if (rend > end) {
658			/*
659			 * @rgn intersects from above.  Split and redo the
660			 * current region - the new bottom half.
661			 */
662			rgn->base = end;
663			rgn->size -= end - rbase;
664			type->total_size -= end - rbase;
665			memblock_insert_region(type, i--, rbase, end - rbase,
666					       memblock_get_region_node(rgn),
667					       rgn->flags);
668		} else {
669			/* @rgn is fully contained, record it */
670			if (!*end_rgn)
671				*start_rgn = i;
672			*end_rgn = i + 1;
673		}
674	}
675
676	return 0;
677}
678
679int __init_memblock memblock_remove_range(struct memblock_type *type,
680					  phys_addr_t base, phys_addr_t size)
681{
682	int start_rgn, end_rgn;
683	int i, ret;
684
685	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
686	if (ret)
687		return ret;
688
689	for (i = end_rgn - 1; i >= start_rgn; i--)
690		memblock_remove_region(type, i);
691	return 0;
692}
693
694int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
695{
696	return memblock_remove_range(&memblock.memory, base, size);
697}
698
699
700int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
701{
702	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
703		     (unsigned long long)base,
704		     (unsigned long long)base + size - 1,
705		     (void *)_RET_IP_);
706
707	kmemleak_free_part(__va(base), size);
708	return memblock_remove_range(&memblock.reserved, base, size);
709}
710
711static int __init_memblock memblock_reserve_region(phys_addr_t base,
712						   phys_addr_t size,
713						   int nid,
714						   unsigned long flags)
715{
716	struct memblock_type *type = &memblock.reserved;
717
718	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
719		     (unsigned long long)base,
720		     (unsigned long long)base + size - 1,
721		     flags, (void *)_RET_IP_);
722
723	return memblock_add_range(type, base, size, nid, flags);
724}
725
726int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
727{
728	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
729}
730
731/**
732 *
733 * This function isolates region [@base, @base + @size), and sets/clears flag
734 *
735 * Return 0 on succees, -errno on failure.
736 */
737static int __init_memblock memblock_setclr_flag(phys_addr_t base,
738				phys_addr_t size, int set, int flag)
739{
740	struct memblock_type *type = &memblock.memory;
741	int i, ret, start_rgn, end_rgn;
742
743	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
744	if (ret)
745		return ret;
746
747	for (i = start_rgn; i < end_rgn; i++)
748		if (set)
749			memblock_set_region_flags(&type->regions[i], flag);
750		else
751			memblock_clear_region_flags(&type->regions[i], flag);
752
753	memblock_merge_regions(type);
754	return 0;
755}
756
757/**
758 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
759 * @base: the base phys addr of the region
760 * @size: the size of the region
761 *
762 * Return 0 on succees, -errno on failure.
763 */
764int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
765{
766	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
767}
768
769/**
770 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
771 * @base: the base phys addr of the region
772 * @size: the size of the region
773 *
774 * Return 0 on succees, -errno on failure.
775 */
776int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
777{
778	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
779}
780
781/**
782 * __next__mem_range - next function for for_each_free_mem_range() etc.
783 * @idx: pointer to u64 loop variable
784 * @nid: node selector, %NUMA_NO_NODE for all nodes
785 * @type_a: pointer to memblock_type from where the range is taken
786 * @type_b: pointer to memblock_type which excludes memory from being taken
787 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
788 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
789 * @out_nid: ptr to int for nid of the range, can be %NULL
790 *
791 * Find the first area from *@idx which matches @nid, fill the out
792 * parameters, and update *@idx for the next iteration.  The lower 32bit of
793 * *@idx contains index into type_a and the upper 32bit indexes the
794 * areas before each region in type_b.	For example, if type_b regions
795 * look like the following,
796 *
797 *	0:[0-16), 1:[32-48), 2:[128-130)
798 *
799 * The upper 32bit indexes the following regions.
800 *
801 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
802 *
803 * As both region arrays are sorted, the function advances the two indices
804 * in lockstep and returns each intersection.
805 */
806void __init_memblock __next_mem_range(u64 *idx, int nid,
807				      struct memblock_type *type_a,
808				      struct memblock_type *type_b,
809				      phys_addr_t *out_start,
810				      phys_addr_t *out_end, int *out_nid)
811{
812	int idx_a = *idx & 0xffffffff;
813	int idx_b = *idx >> 32;
814
815	if (WARN_ONCE(nid == MAX_NUMNODES,
816	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
817		nid = NUMA_NO_NODE;
818
819	for (; idx_a < type_a->cnt; idx_a++) {
820		struct memblock_region *m = &type_a->regions[idx_a];
821
822		phys_addr_t m_start = m->base;
823		phys_addr_t m_end = m->base + m->size;
824		int	    m_nid = memblock_get_region_node(m);
825
826		/* only memory regions are associated with nodes, check it */
827		if (nid != NUMA_NO_NODE && nid != m_nid)
828			continue;
829
830		/* skip hotpluggable memory regions if needed */
831		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
832			continue;
833
834		if (!type_b) {
835			if (out_start)
836				*out_start = m_start;
837			if (out_end)
838				*out_end = m_end;
839			if (out_nid)
840				*out_nid = m_nid;
841			idx_a++;
842			*idx = (u32)idx_a | (u64)idx_b << 32;
843			return;
844		}
845
846		/* scan areas before each reservation */
847		for (; idx_b < type_b->cnt + 1; idx_b++) {
848			struct memblock_region *r;
849			phys_addr_t r_start;
850			phys_addr_t r_end;
851
852			r = &type_b->regions[idx_b];
853			r_start = idx_b ? r[-1].base + r[-1].size : 0;
854			r_end = idx_b < type_b->cnt ?
855				r->base : ULLONG_MAX;
856
857			/*
858			 * if idx_b advanced past idx_a,
859			 * break out to advance idx_a
860			 */
861			if (r_start >= m_end)
862				break;
863			/* if the two regions intersect, we're done */
864			if (m_start < r_end) {
865				if (out_start)
866					*out_start =
867						max(m_start, r_start);
868				if (out_end)
869					*out_end = min(m_end, r_end);
870				if (out_nid)
871					*out_nid = m_nid;
872				/*
873				 * The region which ends first is
874				 * advanced for the next iteration.
875				 */
876				if (m_end <= r_end)
877					idx_a++;
878				else
879					idx_b++;
880				*idx = (u32)idx_a | (u64)idx_b << 32;
881				return;
882			}
883		}
884	}
885
886	/* signal end of iteration */
887	*idx = ULLONG_MAX;
888}
889
890/**
891 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
892 *
893 * Finds the next range from type_a which is not marked as unsuitable
894 * in type_b.
895 *
896 * @idx: pointer to u64 loop variable
897 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
898 * @type_a: pointer to memblock_type from where the range is taken
899 * @type_b: pointer to memblock_type which excludes memory from being taken
900 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
901 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
902 * @out_nid: ptr to int for nid of the range, can be %NULL
903 *
904 * Reverse of __next_mem_range().
905 */
906void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
907					  struct memblock_type *type_a,
908					  struct memblock_type *type_b,
909					  phys_addr_t *out_start,
910					  phys_addr_t *out_end, int *out_nid)
911{
912	int idx_a = *idx & 0xffffffff;
913	int idx_b = *idx >> 32;
914
915	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
916		nid = NUMA_NO_NODE;
917
918	if (*idx == (u64)ULLONG_MAX) {
919		idx_a = type_a->cnt - 1;
920		idx_b = type_b->cnt;
921	}
922
923	for (; idx_a >= 0; idx_a--) {
924		struct memblock_region *m = &type_a->regions[idx_a];
925
926		phys_addr_t m_start = m->base;
927		phys_addr_t m_end = m->base + m->size;
928		int m_nid = memblock_get_region_node(m);
929
930		/* only memory regions are associated with nodes, check it */
931		if (nid != NUMA_NO_NODE && nid != m_nid)
932			continue;
933
934		/* skip hotpluggable memory regions if needed */
935		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
936			continue;
937
938		if (!type_b) {
939			if (out_start)
940				*out_start = m_start;
941			if (out_end)
942				*out_end = m_end;
943			if (out_nid)
944				*out_nid = m_nid;
945			idx_a++;
946			*idx = (u32)idx_a | (u64)idx_b << 32;
947			return;
948		}
949
950		/* scan areas before each reservation */
951		for (; idx_b >= 0; idx_b--) {
952			struct memblock_region *r;
953			phys_addr_t r_start;
954			phys_addr_t r_end;
955
956			r = &type_b->regions[idx_b];
957			r_start = idx_b ? r[-1].base + r[-1].size : 0;
958			r_end = idx_b < type_b->cnt ?
959				r->base : ULLONG_MAX;
960			/*
961			 * if idx_b advanced past idx_a,
962			 * break out to advance idx_a
963			 */
964
965			if (r_end <= m_start)
966				break;
967			/* if the two regions intersect, we're done */
968			if (m_end > r_start) {
969				if (out_start)
970					*out_start = max(m_start, r_start);
971				if (out_end)
972					*out_end = min(m_end, r_end);
973				if (out_nid)
974					*out_nid = m_nid;
975				if (m_start >= r_start)
976					idx_a--;
977				else
978					idx_b--;
979				*idx = (u32)idx_a | (u64)idx_b << 32;
980				return;
981			}
982		}
983	}
984	/* signal end of iteration */
985	*idx = ULLONG_MAX;
986}
987
988#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
989/*
990 * Common iterator interface used to define for_each_mem_range().
991 */
992void __init_memblock __next_mem_pfn_range(int *idx, int nid,
993				unsigned long *out_start_pfn,
994				unsigned long *out_end_pfn, int *out_nid)
995{
996	struct memblock_type *type = &memblock.memory;
997	struct memblock_region *r;
998
999	while (++*idx < type->cnt) {
1000		r = &type->regions[*idx];
1001
1002		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1003			continue;
1004		if (nid == MAX_NUMNODES || nid == r->nid)
1005			break;
1006	}
1007	if (*idx >= type->cnt) {
1008		*idx = -1;
1009		return;
1010	}
1011
1012	if (out_start_pfn)
1013		*out_start_pfn = PFN_UP(r->base);
1014	if (out_end_pfn)
1015		*out_end_pfn = PFN_DOWN(r->base + r->size);
1016	if (out_nid)
1017		*out_nid = r->nid;
1018}
1019
1020/**
1021 * memblock_set_node - set node ID on memblock regions
1022 * @base: base of area to set node ID for
1023 * @size: size of area to set node ID for
1024 * @type: memblock type to set node ID for
1025 * @nid: node ID to set
1026 *
1027 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1028 * Regions which cross the area boundaries are split as necessary.
1029 *
1030 * RETURNS:
1031 * 0 on success, -errno on failure.
1032 */
1033int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1034				      struct memblock_type *type, int nid)
1035{
1036	int start_rgn, end_rgn;
1037	int i, ret;
1038
1039	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1040	if (ret)
1041		return ret;
1042
1043	for (i = start_rgn; i < end_rgn; i++)
1044		memblock_set_region_node(&type->regions[i], nid);
1045
1046	memblock_merge_regions(type);
1047	return 0;
1048}
1049#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1050
1051static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1052					phys_addr_t align, phys_addr_t start,
1053					phys_addr_t end, int nid)
1054{
1055	phys_addr_t found;
1056
1057	if (!align)
1058		align = SMP_CACHE_BYTES;
1059
1060	found = memblock_find_in_range_node(size, align, start, end, nid);
1061	if (found && !memblock_reserve(found, size)) {
1062		/*
1063		 * The min_count is set to 0 so that memblock allocations are
1064		 * never reported as leaks.
1065		 */
1066		kmemleak_alloc(__va(found), size, 0, 0);
1067		return found;
1068	}
1069	return 0;
1070}
1071
1072phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1073					phys_addr_t start, phys_addr_t end)
1074{
1075	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1076}
1077
1078static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1079					phys_addr_t align, phys_addr_t max_addr,
1080					int nid)
1081{
1082	return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1083}
1084
1085phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1086{
1087	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1088}
1089
1090phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1091{
1092	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
1093}
1094
1095phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1096{
1097	phys_addr_t alloc;
1098
1099	alloc = __memblock_alloc_base(size, align, max_addr);
1100
1101	if (alloc == 0)
1102		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1103		      (unsigned long long) size, (unsigned long long) max_addr);
1104
1105	return alloc;
1106}
1107
1108phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1109{
1110	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1111}
1112
1113phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1114{
1115	phys_addr_t res = memblock_alloc_nid(size, align, nid);
1116
1117	if (res)
1118		return res;
1119	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1120}
1121
1122/**
1123 * memblock_virt_alloc_internal - allocate boot memory block
1124 * @size: size of memory block to be allocated in bytes
1125 * @align: alignment of the region and block's size
1126 * @min_addr: the lower bound of the memory region to allocate (phys address)
1127 * @max_addr: the upper bound of the memory region to allocate (phys address)
1128 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1129 *
1130 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1131 * will fall back to memory below @min_addr. Also, allocation may fall back
1132 * to any node in the system if the specified node can not
1133 * hold the requested memory.
1134 *
1135 * The allocation is performed from memory region limited by
1136 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1137 *
1138 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1139 *
1140 * The phys address of allocated boot memory block is converted to virtual and
1141 * allocated memory is reset to 0.
1142 *
1143 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1144 * allocated boot memory block, so that it is never reported as leaks.
1145 *
1146 * RETURNS:
1147 * Virtual address of allocated memory block on success, NULL on failure.
1148 */
1149static void * __init memblock_virt_alloc_internal(
1150				phys_addr_t size, phys_addr_t align,
1151				phys_addr_t min_addr, phys_addr_t max_addr,
1152				int nid)
1153{
1154	phys_addr_t alloc;
1155	void *ptr;
1156
1157	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1158		nid = NUMA_NO_NODE;
1159
1160	/*
1161	 * Detect any accidental use of these APIs after slab is ready, as at
1162	 * this moment memblock may be deinitialized already and its
1163	 * internal data may be destroyed (after execution of free_all_bootmem)
1164	 */
1165	if (WARN_ON_ONCE(slab_is_available()))
1166		return kzalloc_node(size, GFP_NOWAIT, nid);
1167
1168	if (!align)
1169		align = SMP_CACHE_BYTES;
1170
1171	if (max_addr > memblock.current_limit)
1172		max_addr = memblock.current_limit;
1173
1174again:
1175	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1176					    nid);
1177	if (alloc)
1178		goto done;
1179
1180	if (nid != NUMA_NO_NODE) {
1181		alloc = memblock_find_in_range_node(size, align, min_addr,
1182						    max_addr,  NUMA_NO_NODE);
1183		if (alloc)
1184			goto done;
1185	}
1186
1187	if (min_addr) {
1188		min_addr = 0;
1189		goto again;
1190	} else {
1191		goto error;
1192	}
1193
1194done:
1195	memblock_reserve(alloc, size);
1196	ptr = phys_to_virt(alloc);
1197	memset(ptr, 0, size);
1198
1199	/*
1200	 * The min_count is set to 0 so that bootmem allocated blocks
1201	 * are never reported as leaks. This is because many of these blocks
1202	 * are only referred via the physical address which is not
1203	 * looked up by kmemleak.
1204	 */
1205	kmemleak_alloc(ptr, size, 0, 0);
1206
1207	return ptr;
1208
1209error:
1210	return NULL;
1211}
1212
1213/**
1214 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1215 * @size: size of memory block to be allocated in bytes
1216 * @align: alignment of the region and block's size
1217 * @min_addr: the lower bound of the memory region from where the allocation
1218 *	  is preferred (phys address)
1219 * @max_addr: the upper bound of the memory region from where the allocation
1220 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1221 *	      allocate only from memory limited by memblock.current_limit value
1222 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1223 *
1224 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1225 * additional debug information (including caller info), if enabled.
1226 *
1227 * RETURNS:
1228 * Virtual address of allocated memory block on success, NULL on failure.
1229 */
1230void * __init memblock_virt_alloc_try_nid_nopanic(
1231				phys_addr_t size, phys_addr_t align,
1232				phys_addr_t min_addr, phys_addr_t max_addr,
1233				int nid)
1234{
1235	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1236		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1237		     (u64)max_addr, (void *)_RET_IP_);
1238	return memblock_virt_alloc_internal(size, align, min_addr,
1239					     max_addr, nid);
1240}
1241
1242/**
1243 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1244 * @size: size of memory block to be allocated in bytes
1245 * @align: alignment of the region and block's size
1246 * @min_addr: the lower bound of the memory region from where the allocation
1247 *	  is preferred (phys address)
1248 * @max_addr: the upper bound of the memory region from where the allocation
1249 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1250 *	      allocate only from memory limited by memblock.current_limit value
1251 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1252 *
1253 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1254 * which provides debug information (including caller info), if enabled,
1255 * and panics if the request can not be satisfied.
1256 *
1257 * RETURNS:
1258 * Virtual address of allocated memory block on success, NULL on failure.
1259 */
1260void * __init memblock_virt_alloc_try_nid(
1261			phys_addr_t size, phys_addr_t align,
1262			phys_addr_t min_addr, phys_addr_t max_addr,
1263			int nid)
1264{
1265	void *ptr;
1266
1267	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1268		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1269		     (u64)max_addr, (void *)_RET_IP_);
1270	ptr = memblock_virt_alloc_internal(size, align,
1271					   min_addr, max_addr, nid);
1272	if (ptr)
1273		return ptr;
1274
1275	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1276	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1277	      (u64)max_addr);
1278	return NULL;
1279}
1280
1281/**
1282 * __memblock_free_early - free boot memory block
1283 * @base: phys starting address of the  boot memory block
1284 * @size: size of the boot memory block in bytes
1285 *
1286 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1287 * The freeing memory will not be released to the buddy allocator.
1288 */
1289void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1290{
1291	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1292		     __func__, (u64)base, (u64)base + size - 1,
1293		     (void *)_RET_IP_);
1294	kmemleak_free_part(__va(base), size);
1295	memblock_remove_range(&memblock.reserved, base, size);
1296}
1297
1298/*
1299 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1300 * @addr: phys starting address of the  boot memory block
1301 * @size: size of the boot memory block in bytes
1302 *
1303 * This is only useful when the bootmem allocator has already been torn
1304 * down, but we are still initializing the system.  Pages are released directly
1305 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1306 */
1307void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1308{
1309	u64 cursor, end;
1310
1311	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1312		     __func__, (u64)base, (u64)base + size - 1,
1313		     (void *)_RET_IP_);
1314	kmemleak_free_part(__va(base), size);
1315	cursor = PFN_UP(base);
1316	end = PFN_DOWN(base + size);
1317
1318	for (; cursor < end; cursor++) {
1319		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1320		totalram_pages++;
1321	}
1322}
1323
1324/*
1325 * Remaining API functions
1326 */
1327
1328phys_addr_t __init memblock_phys_mem_size(void)
1329{
1330	return memblock.memory.total_size;
1331}
1332
1333phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1334{
1335	unsigned long pages = 0;
1336	struct memblock_region *r;
1337	unsigned long start_pfn, end_pfn;
1338
1339	for_each_memblock(memory, r) {
1340		start_pfn = memblock_region_memory_base_pfn(r);
1341		end_pfn = memblock_region_memory_end_pfn(r);
1342		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1343		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1344		pages += end_pfn - start_pfn;
1345	}
1346
1347	return PFN_PHYS(pages);
1348}
1349
1350/* lowest address */
1351phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1352{
1353	return memblock.memory.regions[0].base;
1354}
1355
1356phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1357{
1358	int idx = memblock.memory.cnt - 1;
1359
1360	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1361}
1362
1363void __init memblock_enforce_memory_limit(phys_addr_t limit)
1364{
1365	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1366	struct memblock_region *r;
1367
1368	if (!limit)
1369		return;
1370
1371	/* find out max address */
1372	for_each_memblock(memory, r) {
1373		if (limit <= r->size) {
1374			max_addr = r->base + limit;
1375			break;
1376		}
1377		limit -= r->size;
1378	}
1379
1380	/* truncate both memory and reserved regions */
1381	memblock_remove_range(&memblock.memory, max_addr,
1382			      (phys_addr_t)ULLONG_MAX);
1383	memblock_remove_range(&memblock.reserved, max_addr,
1384			      (phys_addr_t)ULLONG_MAX);
1385}
1386
1387static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1388{
1389	unsigned int left = 0, right = type->cnt;
1390
1391	do {
1392		unsigned int mid = (right + left) / 2;
1393
1394		if (addr < type->regions[mid].base)
1395			right = mid;
1396		else if (addr >= (type->regions[mid].base +
1397				  type->regions[mid].size))
1398			left = mid + 1;
1399		else
1400			return mid;
1401	} while (left < right);
1402	return -1;
1403}
1404
1405int __init memblock_is_reserved(phys_addr_t addr)
1406{
1407	return memblock_search(&memblock.reserved, addr) != -1;
1408}
1409
1410int __init_memblock memblock_is_memory(phys_addr_t addr)
1411{
1412	return memblock_search(&memblock.memory, addr) != -1;
1413}
1414
1415#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1416int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1417			 unsigned long *start_pfn, unsigned long *end_pfn)
1418{
1419	struct memblock_type *type = &memblock.memory;
1420	int mid = memblock_search(type, PFN_PHYS(pfn));
1421
1422	if (mid == -1)
1423		return -1;
1424
1425	*start_pfn = PFN_DOWN(type->regions[mid].base);
1426	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1427
1428	return type->regions[mid].nid;
1429}
1430#endif
1431
1432/**
1433 * memblock_is_region_memory - check if a region is a subset of memory
1434 * @base: base of region to check
1435 * @size: size of region to check
1436 *
1437 * Check if the region [@base, @base+@size) is a subset of a memory block.
1438 *
1439 * RETURNS:
1440 * 0 if false, non-zero if true
1441 */
1442int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1443{
1444	int idx = memblock_search(&memblock.memory, base);
1445	phys_addr_t end = base + memblock_cap_size(base, &size);
1446
1447	if (idx == -1)
1448		return 0;
1449	return memblock.memory.regions[idx].base <= base &&
1450		(memblock.memory.regions[idx].base +
1451		 memblock.memory.regions[idx].size) >= end;
1452}
1453
1454/**
1455 * memblock_is_region_reserved - check if a region intersects reserved memory
1456 * @base: base of region to check
1457 * @size: size of region to check
1458 *
1459 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1460 *
1461 * RETURNS:
1462 * 0 if false, non-zero if true
1463 */
1464int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1465{
1466	memblock_cap_size(base, &size);
1467	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
1468}
1469
1470void __init_memblock memblock_trim_memory(phys_addr_t align)
1471{
1472	phys_addr_t start, end, orig_start, orig_end;
1473	struct memblock_region *r;
1474
1475	for_each_memblock(memory, r) {
1476		orig_start = r->base;
1477		orig_end = r->base + r->size;
1478		start = round_up(orig_start, align);
1479		end = round_down(orig_end, align);
1480
1481		if (start == orig_start && end == orig_end)
1482			continue;
1483
1484		if (start < end) {
1485			r->base = start;
1486			r->size = end - start;
1487		} else {
1488			memblock_remove_region(&memblock.memory,
1489					       r - memblock.memory.regions);
1490			r--;
1491		}
1492	}
1493}
1494
1495void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1496{
1497	memblock.current_limit = limit;
1498}
1499
1500phys_addr_t __init_memblock memblock_get_current_limit(void)
1501{
1502	return memblock.current_limit;
1503}
1504
1505static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1506{
1507	unsigned long long base, size;
1508	unsigned long flags;
1509	int i;
1510
1511	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1512
1513	for (i = 0; i < type->cnt; i++) {
1514		struct memblock_region *rgn = &type->regions[i];
1515		char nid_buf[32] = "";
1516
1517		base = rgn->base;
1518		size = rgn->size;
1519		flags = rgn->flags;
1520#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1521		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1522			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1523				 memblock_get_region_node(rgn));
1524#endif
1525		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1526			name, i, base, base + size - 1, size, nid_buf, flags);
1527	}
1528}
1529
1530void __init_memblock __memblock_dump_all(void)
1531{
1532	pr_info("MEMBLOCK configuration:\n");
1533	pr_info(" memory size = %#llx reserved size = %#llx\n",
1534		(unsigned long long)memblock.memory.total_size,
1535		(unsigned long long)memblock.reserved.total_size);
1536
1537	memblock_dump(&memblock.memory, "memory");
1538	memblock_dump(&memblock.reserved, "reserved");
1539}
1540
1541void __init memblock_allow_resize(void)
1542{
1543	memblock_can_resize = 1;
1544}
1545
1546static int __init early_memblock(char *p)
1547{
1548	if (p && strstr(p, "debug"))
1549		memblock_debug = 1;
1550	return 0;
1551}
1552early_param("memblock", early_memblock);
1553
1554#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1555
1556static int memblock_debug_show(struct seq_file *m, void *private)
1557{
1558	struct memblock_type *type = m->private;
1559	struct memblock_region *reg;
1560	int i;
1561
1562	for (i = 0; i < type->cnt; i++) {
1563		reg = &type->regions[i];
1564		seq_printf(m, "%4d: ", i);
1565		if (sizeof(phys_addr_t) == 4)
1566			seq_printf(m, "0x%08lx..0x%08lx\n",
1567				   (unsigned long)reg->base,
1568				   (unsigned long)(reg->base + reg->size - 1));
1569		else
1570			seq_printf(m, "0x%016llx..0x%016llx\n",
1571				   (unsigned long long)reg->base,
1572				   (unsigned long long)(reg->base + reg->size - 1));
1573
1574	}
1575	return 0;
1576}
1577
1578static int memblock_debug_open(struct inode *inode, struct file *file)
1579{
1580	return single_open(file, memblock_debug_show, inode->i_private);
1581}
1582
1583static const struct file_operations memblock_debug_fops = {
1584	.open = memblock_debug_open,
1585	.read = seq_read,
1586	.llseek = seq_lseek,
1587	.release = single_release,
1588};
1589
1590static int __init memblock_init_debugfs(void)
1591{
1592	struct dentry *root = debugfs_create_dir("memblock", NULL);
1593	if (!root)
1594		return -ENXIO;
1595	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1596	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1597#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1598	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1599#endif
1600
1601	return 0;
1602}
1603__initcall(memblock_init_debugfs);
1604
1605#endif /* CONFIG_DEBUG_FS */
1606