1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp.	June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 *      This program is free software; you can redistribute it and/or
8 *      modify it under the terms of the GNU General Public License
9 *      as published by the Free Software Foundation; either version
10 *      2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <linux/poison.h>
18#include <linux/pfn.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/memblock.h>
22
23#include <asm-generic/sections.h>
24#include <linux/io.h>
25
26#include "internal.h"
27
28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32#endif
33
34struct memblock memblock __initdata_memblock = {
35	.memory.regions		= memblock_memory_init_regions,
36	.memory.cnt		= 1,	/* empty dummy entry */
37	.memory.max		= INIT_MEMBLOCK_REGIONS,
38
39	.reserved.regions	= memblock_reserved_init_regions,
40	.reserved.cnt		= 1,	/* empty dummy entry */
41	.reserved.max		= INIT_MEMBLOCK_REGIONS,
42
43#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44	.physmem.regions	= memblock_physmem_init_regions,
45	.physmem.cnt		= 1,	/* empty dummy entry */
46	.physmem.max		= INIT_PHYSMEM_REGIONS,
47#endif
48
49	.bottom_up		= false,
50	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
51};
52
53int memblock_debug __initdata_memblock;
54#ifdef CONFIG_MOVABLE_NODE
55bool movable_node_enabled __initdata_memblock = false;
56#endif
57static bool system_has_some_mirror __initdata_memblock = false;
58static int memblock_can_resize __initdata_memblock;
59static int memblock_memory_in_slab __initdata_memblock = 0;
60static int memblock_reserved_in_slab __initdata_memblock = 0;
61
62ulong __init_memblock choose_memblock_flags(void)
63{
64	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
65}
66
67/* inline so we don't get a warning when pr_debug is compiled out */
68static __init_memblock const char *
69memblock_type_name(struct memblock_type *type)
70{
71	if (type == &memblock.memory)
72		return "memory";
73	else if (type == &memblock.reserved)
74		return "reserved";
75	else
76		return "unknown";
77}
78
79/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
80static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
81{
82	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
83}
84
85/*
86 * Address comparison utilities
87 */
88static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
89				       phys_addr_t base2, phys_addr_t size2)
90{
91	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
92}
93
94bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
95					phys_addr_t base, phys_addr_t size)
96{
97	unsigned long i;
98
99	for (i = 0; i < type->cnt; i++) {
100		phys_addr_t rgnbase = type->regions[i].base;
101		phys_addr_t rgnsize = type->regions[i].size;
102		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
103			break;
104	}
105
106	return i < type->cnt;
107}
108
109/*
110 * __memblock_find_range_bottom_up - find free area utility in bottom-up
111 * @start: start of candidate range
112 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
113 * @size: size of free area to find
114 * @align: alignment of free area to find
115 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116 * @flags: pick from blocks based on memory attributes
117 *
118 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
119 *
120 * RETURNS:
121 * Found address on success, 0 on failure.
122 */
123static phys_addr_t __init_memblock
124__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
125				phys_addr_t size, phys_addr_t align, int nid,
126				ulong flags)
127{
128	phys_addr_t this_start, this_end, cand;
129	u64 i;
130
131	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
132		this_start = clamp(this_start, start, end);
133		this_end = clamp(this_end, start, end);
134
135		cand = round_up(this_start, align);
136		if (cand < this_end && this_end - cand >= size)
137			return cand;
138	}
139
140	return 0;
141}
142
143/**
144 * __memblock_find_range_top_down - find free area utility, in top-down
145 * @start: start of candidate range
146 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
147 * @size: size of free area to find
148 * @align: alignment of free area to find
149 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150 * @flags: pick from blocks based on memory attributes
151 *
152 * Utility called from memblock_find_in_range_node(), find free area top-down.
153 *
154 * RETURNS:
155 * Found address on success, 0 on failure.
156 */
157static phys_addr_t __init_memblock
158__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
159			       phys_addr_t size, phys_addr_t align, int nid,
160			       ulong flags)
161{
162	phys_addr_t this_start, this_end, cand;
163	u64 i;
164
165	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
166					NULL) {
167		this_start = clamp(this_start, start, end);
168		this_end = clamp(this_end, start, end);
169
170		if (this_end < size)
171			continue;
172
173		cand = round_down(this_end - size, align);
174		if (cand >= this_start)
175			return cand;
176	}
177
178	return 0;
179}
180
181/**
182 * memblock_find_in_range_node - find free area in given range and node
183 * @size: size of free area to find
184 * @align: alignment of free area to find
185 * @start: start of candidate range
186 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
187 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
188 * @flags: pick from blocks based on memory attributes
189 *
190 * Find @size free area aligned to @align in the specified range and node.
191 *
192 * When allocation direction is bottom-up, the @start should be greater
193 * than the end of the kernel image. Otherwise, it will be trimmed. The
194 * reason is that we want the bottom-up allocation just near the kernel
195 * image so it is highly likely that the allocated memory and the kernel
196 * will reside in the same node.
197 *
198 * If bottom-up allocation failed, will try to allocate memory top-down.
199 *
200 * RETURNS:
201 * Found address on success, 0 on failure.
202 */
203phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
204					phys_addr_t align, phys_addr_t start,
205					phys_addr_t end, int nid, ulong flags)
206{
207	phys_addr_t kernel_end, ret;
208
209	/* pump up @end */
210	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
211		end = memblock.current_limit;
212
213	/* avoid allocating the first page */
214	start = max_t(phys_addr_t, start, PAGE_SIZE);
215	end = max(start, end);
216	kernel_end = __pa_symbol(_end);
217
218	/*
219	 * try bottom-up allocation only when bottom-up mode
220	 * is set and @end is above the kernel image.
221	 */
222	if (memblock_bottom_up() && end > kernel_end) {
223		phys_addr_t bottom_up_start;
224
225		/* make sure we will allocate above the kernel */
226		bottom_up_start = max(start, kernel_end);
227
228		/* ok, try bottom-up allocation first */
229		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
230						      size, align, nid, flags);
231		if (ret)
232			return ret;
233
234		/*
235		 * we always limit bottom-up allocation above the kernel,
236		 * but top-down allocation doesn't have the limit, so
237		 * retrying top-down allocation may succeed when bottom-up
238		 * allocation failed.
239		 *
240		 * bottom-up allocation is expected to be fail very rarely,
241		 * so we use WARN_ONCE() here to see the stack trace if
242		 * fail happens.
243		 */
244		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
245			     "memory hotunplug may be affected\n");
246	}
247
248	return __memblock_find_range_top_down(start, end, size, align, nid,
249					      flags);
250}
251
252/**
253 * memblock_find_in_range - find free area in given range
254 * @start: start of candidate range
255 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
256 * @size: size of free area to find
257 * @align: alignment of free area to find
258 *
259 * Find @size free area aligned to @align in the specified range.
260 *
261 * RETURNS:
262 * Found address on success, 0 on failure.
263 */
264phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
265					phys_addr_t end, phys_addr_t size,
266					phys_addr_t align)
267{
268	phys_addr_t ret;
269	ulong flags = choose_memblock_flags();
270
271again:
272	ret = memblock_find_in_range_node(size, align, start, end,
273					    NUMA_NO_NODE, flags);
274
275	if (!ret && (flags & MEMBLOCK_MIRROR)) {
276		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
277			&size);
278		flags &= ~MEMBLOCK_MIRROR;
279		goto again;
280	}
281
282	return ret;
283}
284
285static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
286{
287	type->total_size -= type->regions[r].size;
288	memmove(&type->regions[r], &type->regions[r + 1],
289		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
290	type->cnt--;
291
292	/* Special case for empty arrays */
293	if (type->cnt == 0) {
294		WARN_ON(type->total_size != 0);
295		type->cnt = 1;
296		type->regions[0].base = 0;
297		type->regions[0].size = 0;
298		type->regions[0].flags = 0;
299		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
300	}
301}
302
303#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
304
305phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
306					phys_addr_t *addr)
307{
308	if (memblock.reserved.regions == memblock_reserved_init_regions)
309		return 0;
310
311	*addr = __pa(memblock.reserved.regions);
312
313	return PAGE_ALIGN(sizeof(struct memblock_region) *
314			  memblock.reserved.max);
315}
316
317phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
318					phys_addr_t *addr)
319{
320	if (memblock.memory.regions == memblock_memory_init_regions)
321		return 0;
322
323	*addr = __pa(memblock.memory.regions);
324
325	return PAGE_ALIGN(sizeof(struct memblock_region) *
326			  memblock.memory.max);
327}
328
329#endif
330
331/**
332 * memblock_double_array - double the size of the memblock regions array
333 * @type: memblock type of the regions array being doubled
334 * @new_area_start: starting address of memory range to avoid overlap with
335 * @new_area_size: size of memory range to avoid overlap with
336 *
337 * Double the size of the @type regions array. If memblock is being used to
338 * allocate memory for a new reserved regions array and there is a previously
339 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
340 * waiting to be reserved, ensure the memory used by the new array does
341 * not overlap.
342 *
343 * RETURNS:
344 * 0 on success, -1 on failure.
345 */
346static int __init_memblock memblock_double_array(struct memblock_type *type,
347						phys_addr_t new_area_start,
348						phys_addr_t new_area_size)
349{
350	struct memblock_region *new_array, *old_array;
351	phys_addr_t old_alloc_size, new_alloc_size;
352	phys_addr_t old_size, new_size, addr;
353	int use_slab = slab_is_available();
354	int *in_slab;
355
356	/* We don't allow resizing until we know about the reserved regions
357	 * of memory that aren't suitable for allocation
358	 */
359	if (!memblock_can_resize)
360		return -1;
361
362	/* Calculate new doubled size */
363	old_size = type->max * sizeof(struct memblock_region);
364	new_size = old_size << 1;
365	/*
366	 * We need to allocated new one align to PAGE_SIZE,
367	 *   so we can free them completely later.
368	 */
369	old_alloc_size = PAGE_ALIGN(old_size);
370	new_alloc_size = PAGE_ALIGN(new_size);
371
372	/* Retrieve the slab flag */
373	if (type == &memblock.memory)
374		in_slab = &memblock_memory_in_slab;
375	else
376		in_slab = &memblock_reserved_in_slab;
377
378	/* Try to find some space for it.
379	 *
380	 * WARNING: We assume that either slab_is_available() and we use it or
381	 * we use MEMBLOCK for allocations. That means that this is unsafe to
382	 * use when bootmem is currently active (unless bootmem itself is
383	 * implemented on top of MEMBLOCK which isn't the case yet)
384	 *
385	 * This should however not be an issue for now, as we currently only
386	 * call into MEMBLOCK while it's still active, or much later when slab
387	 * is active for memory hotplug operations
388	 */
389	if (use_slab) {
390		new_array = kmalloc(new_size, GFP_KERNEL);
391		addr = new_array ? __pa(new_array) : 0;
392	} else {
393		/* only exclude range when trying to double reserved.regions */
394		if (type != &memblock.reserved)
395			new_area_start = new_area_size = 0;
396
397		addr = memblock_find_in_range(new_area_start + new_area_size,
398						memblock.current_limit,
399						new_alloc_size, PAGE_SIZE);
400		if (!addr && new_area_size)
401			addr = memblock_find_in_range(0,
402				min(new_area_start, memblock.current_limit),
403				new_alloc_size, PAGE_SIZE);
404
405		new_array = addr ? __va(addr) : NULL;
406	}
407	if (!addr) {
408		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
409		       memblock_type_name(type), type->max, type->max * 2);
410		return -1;
411	}
412
413	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
414			memblock_type_name(type), type->max * 2, (u64)addr,
415			(u64)addr + new_size - 1);
416
417	/*
418	 * Found space, we now need to move the array over before we add the
419	 * reserved region since it may be our reserved array itself that is
420	 * full.
421	 */
422	memcpy(new_array, type->regions, old_size);
423	memset(new_array + type->max, 0, old_size);
424	old_array = type->regions;
425	type->regions = new_array;
426	type->max <<= 1;
427
428	/* Free old array. We needn't free it if the array is the static one */
429	if (*in_slab)
430		kfree(old_array);
431	else if (old_array != memblock_memory_init_regions &&
432		 old_array != memblock_reserved_init_regions)
433		memblock_free(__pa(old_array), old_alloc_size);
434
435	/*
436	 * Reserve the new array if that comes from the memblock.  Otherwise, we
437	 * needn't do it
438	 */
439	if (!use_slab)
440		BUG_ON(memblock_reserve(addr, new_alloc_size));
441
442	/* Update slab flag */
443	*in_slab = use_slab;
444
445	return 0;
446}
447
448/**
449 * memblock_merge_regions - merge neighboring compatible regions
450 * @type: memblock type to scan
451 *
452 * Scan @type and merge neighboring compatible regions.
453 */
454static void __init_memblock memblock_merge_regions(struct memblock_type *type)
455{
456	int i = 0;
457
458	/* cnt never goes below 1 */
459	while (i < type->cnt - 1) {
460		struct memblock_region *this = &type->regions[i];
461		struct memblock_region *next = &type->regions[i + 1];
462
463		if (this->base + this->size != next->base ||
464		    memblock_get_region_node(this) !=
465		    memblock_get_region_node(next) ||
466		    this->flags != next->flags) {
467			BUG_ON(this->base + this->size > next->base);
468			i++;
469			continue;
470		}
471
472		this->size += next->size;
473		/* move forward from next + 1, index of which is i + 2 */
474		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
475		type->cnt--;
476	}
477}
478
479/**
480 * memblock_insert_region - insert new memblock region
481 * @type:	memblock type to insert into
482 * @idx:	index for the insertion point
483 * @base:	base address of the new region
484 * @size:	size of the new region
485 * @nid:	node id of the new region
486 * @flags:	flags of the new region
487 *
488 * Insert new memblock region [@base,@base+@size) into @type at @idx.
489 * @type must already have extra room to accomodate the new region.
490 */
491static void __init_memblock memblock_insert_region(struct memblock_type *type,
492						   int idx, phys_addr_t base,
493						   phys_addr_t size,
494						   int nid, unsigned long flags)
495{
496	struct memblock_region *rgn = &type->regions[idx];
497
498	BUG_ON(type->cnt >= type->max);
499	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
500	rgn->base = base;
501	rgn->size = size;
502	rgn->flags = flags;
503	memblock_set_region_node(rgn, nid);
504	type->cnt++;
505	type->total_size += size;
506}
507
508/**
509 * memblock_add_range - add new memblock region
510 * @type: memblock type to add new region into
511 * @base: base address of the new region
512 * @size: size of the new region
513 * @nid: nid of the new region
514 * @flags: flags of the new region
515 *
516 * Add new memblock region [@base,@base+@size) into @type.  The new region
517 * is allowed to overlap with existing ones - overlaps don't affect already
518 * existing regions.  @type is guaranteed to be minimal (all neighbouring
519 * compatible regions are merged) after the addition.
520 *
521 * RETURNS:
522 * 0 on success, -errno on failure.
523 */
524int __init_memblock memblock_add_range(struct memblock_type *type,
525				phys_addr_t base, phys_addr_t size,
526				int nid, unsigned long flags)
527{
528	bool insert = false;
529	phys_addr_t obase = base;
530	phys_addr_t end = base + memblock_cap_size(base, &size);
531	int i, nr_new;
532
533	if (!size)
534		return 0;
535
536	/* special case for empty array */
537	if (type->regions[0].size == 0) {
538		WARN_ON(type->cnt != 1 || type->total_size);
539		type->regions[0].base = base;
540		type->regions[0].size = size;
541		type->regions[0].flags = flags;
542		memblock_set_region_node(&type->regions[0], nid);
543		type->total_size = size;
544		return 0;
545	}
546repeat:
547	/*
548	 * The following is executed twice.  Once with %false @insert and
549	 * then with %true.  The first counts the number of regions needed
550	 * to accomodate the new area.  The second actually inserts them.
551	 */
552	base = obase;
553	nr_new = 0;
554
555	for (i = 0; i < type->cnt; i++) {
556		struct memblock_region *rgn = &type->regions[i];
557		phys_addr_t rbase = rgn->base;
558		phys_addr_t rend = rbase + rgn->size;
559
560		if (rbase >= end)
561			break;
562		if (rend <= base)
563			continue;
564		/*
565		 * @rgn overlaps.  If it separates the lower part of new
566		 * area, insert that portion.
567		 */
568		if (rbase > base) {
569#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
570			WARN_ON(nid != memblock_get_region_node(rgn));
571#endif
572			WARN_ON(flags != rgn->flags);
573			nr_new++;
574			if (insert)
575				memblock_insert_region(type, i++, base,
576						       rbase - base, nid,
577						       flags);
578		}
579		/* area below @rend is dealt with, forget about it */
580		base = min(rend, end);
581	}
582
583	/* insert the remaining portion */
584	if (base < end) {
585		nr_new++;
586		if (insert)
587			memblock_insert_region(type, i, base, end - base,
588					       nid, flags);
589	}
590
591	/*
592	 * If this was the first round, resize array and repeat for actual
593	 * insertions; otherwise, merge and return.
594	 */
595	if (!insert) {
596		while (type->cnt + nr_new > type->max)
597			if (memblock_double_array(type, obase, size) < 0)
598				return -ENOMEM;
599		insert = true;
600		goto repeat;
601	} else {
602		memblock_merge_regions(type);
603		return 0;
604	}
605}
606
607int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
608				       int nid)
609{
610	return memblock_add_range(&memblock.memory, base, size, nid, 0);
611}
612
613static int __init_memblock memblock_add_region(phys_addr_t base,
614						phys_addr_t size,
615						int nid,
616						unsigned long flags)
617{
618	struct memblock_type *type = &memblock.memory;
619
620	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
621		     (unsigned long long)base,
622		     (unsigned long long)base + size - 1,
623		     flags, (void *)_RET_IP_);
624
625	return memblock_add_range(type, base, size, nid, flags);
626}
627
628int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
629{
630	return memblock_add_region(base, size, MAX_NUMNODES, 0);
631}
632
633/**
634 * memblock_isolate_range - isolate given range into disjoint memblocks
635 * @type: memblock type to isolate range for
636 * @base: base of range to isolate
637 * @size: size of range to isolate
638 * @start_rgn: out parameter for the start of isolated region
639 * @end_rgn: out parameter for the end of isolated region
640 *
641 * Walk @type and ensure that regions don't cross the boundaries defined by
642 * [@base,@base+@size).  Crossing regions are split at the boundaries,
643 * which may create at most two more regions.  The index of the first
644 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
645 *
646 * RETURNS:
647 * 0 on success, -errno on failure.
648 */
649static int __init_memblock memblock_isolate_range(struct memblock_type *type,
650					phys_addr_t base, phys_addr_t size,
651					int *start_rgn, int *end_rgn)
652{
653	phys_addr_t end = base + memblock_cap_size(base, &size);
654	int i;
655
656	*start_rgn = *end_rgn = 0;
657
658	if (!size)
659		return 0;
660
661	/* we'll create at most two more regions */
662	while (type->cnt + 2 > type->max)
663		if (memblock_double_array(type, base, size) < 0)
664			return -ENOMEM;
665
666	for (i = 0; i < type->cnt; i++) {
667		struct memblock_region *rgn = &type->regions[i];
668		phys_addr_t rbase = rgn->base;
669		phys_addr_t rend = rbase + rgn->size;
670
671		if (rbase >= end)
672			break;
673		if (rend <= base)
674			continue;
675
676		if (rbase < base) {
677			/*
678			 * @rgn intersects from below.  Split and continue
679			 * to process the next region - the new top half.
680			 */
681			rgn->base = base;
682			rgn->size -= base - rbase;
683			type->total_size -= base - rbase;
684			memblock_insert_region(type, i, rbase, base - rbase,
685					       memblock_get_region_node(rgn),
686					       rgn->flags);
687		} else if (rend > end) {
688			/*
689			 * @rgn intersects from above.  Split and redo the
690			 * current region - the new bottom half.
691			 */
692			rgn->base = end;
693			rgn->size -= end - rbase;
694			type->total_size -= end - rbase;
695			memblock_insert_region(type, i--, rbase, end - rbase,
696					       memblock_get_region_node(rgn),
697					       rgn->flags);
698		} else {
699			/* @rgn is fully contained, record it */
700			if (!*end_rgn)
701				*start_rgn = i;
702			*end_rgn = i + 1;
703		}
704	}
705
706	return 0;
707}
708
709static int __init_memblock memblock_remove_range(struct memblock_type *type,
710					  phys_addr_t base, phys_addr_t size)
711{
712	int start_rgn, end_rgn;
713	int i, ret;
714
715	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
716	if (ret)
717		return ret;
718
719	for (i = end_rgn - 1; i >= start_rgn; i--)
720		memblock_remove_region(type, i);
721	return 0;
722}
723
724int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
725{
726	return memblock_remove_range(&memblock.memory, base, size);
727}
728
729
730int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
731{
732	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
733		     (unsigned long long)base,
734		     (unsigned long long)base + size - 1,
735		     (void *)_RET_IP_);
736
737	kmemleak_free_part(__va(base), size);
738	return memblock_remove_range(&memblock.reserved, base, size);
739}
740
741static int __init_memblock memblock_reserve_region(phys_addr_t base,
742						   phys_addr_t size,
743						   int nid,
744						   unsigned long flags)
745{
746	struct memblock_type *type = &memblock.reserved;
747
748	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
749		     (unsigned long long)base,
750		     (unsigned long long)base + size - 1,
751		     flags, (void *)_RET_IP_);
752
753	return memblock_add_range(type, base, size, nid, flags);
754}
755
756int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
757{
758	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
759}
760
761/**
762 *
763 * This function isolates region [@base, @base + @size), and sets/clears flag
764 *
765 * Return 0 on success, -errno on failure.
766 */
767static int __init_memblock memblock_setclr_flag(phys_addr_t base,
768				phys_addr_t size, int set, int flag)
769{
770	struct memblock_type *type = &memblock.memory;
771	int i, ret, start_rgn, end_rgn;
772
773	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
774	if (ret)
775		return ret;
776
777	for (i = start_rgn; i < end_rgn; i++)
778		if (set)
779			memblock_set_region_flags(&type->regions[i], flag);
780		else
781			memblock_clear_region_flags(&type->regions[i], flag);
782
783	memblock_merge_regions(type);
784	return 0;
785}
786
787/**
788 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
789 * @base: the base phys addr of the region
790 * @size: the size of the region
791 *
792 * Return 0 on success, -errno on failure.
793 */
794int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
795{
796	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
797}
798
799/**
800 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
801 * @base: the base phys addr of the region
802 * @size: the size of the region
803 *
804 * Return 0 on success, -errno on failure.
805 */
806int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
807{
808	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
809}
810
811/**
812 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
813 * @base: the base phys addr of the region
814 * @size: the size of the region
815 *
816 * Return 0 on success, -errno on failure.
817 */
818int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
819{
820	system_has_some_mirror = true;
821
822	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
823}
824
825
826/**
827 * __next_reserved_mem_region - next function for for_each_reserved_region()
828 * @idx: pointer to u64 loop variable
829 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
830 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
831 *
832 * Iterate over all reserved memory regions.
833 */
834void __init_memblock __next_reserved_mem_region(u64 *idx,
835					   phys_addr_t *out_start,
836					   phys_addr_t *out_end)
837{
838	struct memblock_type *type = &memblock.reserved;
839
840	if (*idx >= 0 && *idx < type->cnt) {
841		struct memblock_region *r = &type->regions[*idx];
842		phys_addr_t base = r->base;
843		phys_addr_t size = r->size;
844
845		if (out_start)
846			*out_start = base;
847		if (out_end)
848			*out_end = base + size - 1;
849
850		*idx += 1;
851		return;
852	}
853
854	/* signal end of iteration */
855	*idx = ULLONG_MAX;
856}
857
858/**
859 * __next__mem_range - next function for for_each_free_mem_range() etc.
860 * @idx: pointer to u64 loop variable
861 * @nid: node selector, %NUMA_NO_NODE for all nodes
862 * @flags: pick from blocks based on memory attributes
863 * @type_a: pointer to memblock_type from where the range is taken
864 * @type_b: pointer to memblock_type which excludes memory from being taken
865 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
866 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
867 * @out_nid: ptr to int for nid of the range, can be %NULL
868 *
869 * Find the first area from *@idx which matches @nid, fill the out
870 * parameters, and update *@idx for the next iteration.  The lower 32bit of
871 * *@idx contains index into type_a and the upper 32bit indexes the
872 * areas before each region in type_b.	For example, if type_b regions
873 * look like the following,
874 *
875 *	0:[0-16), 1:[32-48), 2:[128-130)
876 *
877 * The upper 32bit indexes the following regions.
878 *
879 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
880 *
881 * As both region arrays are sorted, the function advances the two indices
882 * in lockstep and returns each intersection.
883 */
884void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
885				      struct memblock_type *type_a,
886				      struct memblock_type *type_b,
887				      phys_addr_t *out_start,
888				      phys_addr_t *out_end, int *out_nid)
889{
890	int idx_a = *idx & 0xffffffff;
891	int idx_b = *idx >> 32;
892
893	if (WARN_ONCE(nid == MAX_NUMNODES,
894	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
895		nid = NUMA_NO_NODE;
896
897	for (; idx_a < type_a->cnt; idx_a++) {
898		struct memblock_region *m = &type_a->regions[idx_a];
899
900		phys_addr_t m_start = m->base;
901		phys_addr_t m_end = m->base + m->size;
902		int	    m_nid = memblock_get_region_node(m);
903
904		/* only memory regions are associated with nodes, check it */
905		if (nid != NUMA_NO_NODE && nid != m_nid)
906			continue;
907
908		/* skip hotpluggable memory regions if needed */
909		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
910			continue;
911
912		/* if we want mirror memory skip non-mirror memory regions */
913		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
914			continue;
915
916		if (!type_b) {
917			if (out_start)
918				*out_start = m_start;
919			if (out_end)
920				*out_end = m_end;
921			if (out_nid)
922				*out_nid = m_nid;
923			idx_a++;
924			*idx = (u32)idx_a | (u64)idx_b << 32;
925			return;
926		}
927
928		/* scan areas before each reservation */
929		for (; idx_b < type_b->cnt + 1; idx_b++) {
930			struct memblock_region *r;
931			phys_addr_t r_start;
932			phys_addr_t r_end;
933
934			r = &type_b->regions[idx_b];
935			r_start = idx_b ? r[-1].base + r[-1].size : 0;
936			r_end = idx_b < type_b->cnt ?
937				r->base : ULLONG_MAX;
938
939			/*
940			 * if idx_b advanced past idx_a,
941			 * break out to advance idx_a
942			 */
943			if (r_start >= m_end)
944				break;
945			/* if the two regions intersect, we're done */
946			if (m_start < r_end) {
947				if (out_start)
948					*out_start =
949						max(m_start, r_start);
950				if (out_end)
951					*out_end = min(m_end, r_end);
952				if (out_nid)
953					*out_nid = m_nid;
954				/*
955				 * The region which ends first is
956				 * advanced for the next iteration.
957				 */
958				if (m_end <= r_end)
959					idx_a++;
960				else
961					idx_b++;
962				*idx = (u32)idx_a | (u64)idx_b << 32;
963				return;
964			}
965		}
966	}
967
968	/* signal end of iteration */
969	*idx = ULLONG_MAX;
970}
971
972/**
973 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
974 *
975 * Finds the next range from type_a which is not marked as unsuitable
976 * in type_b.
977 *
978 * @idx: pointer to u64 loop variable
979 * @nid: node selector, %NUMA_NO_NODE for all nodes
980 * @flags: pick from blocks based on memory attributes
981 * @type_a: pointer to memblock_type from where the range is taken
982 * @type_b: pointer to memblock_type which excludes memory from being taken
983 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
984 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
985 * @out_nid: ptr to int for nid of the range, can be %NULL
986 *
987 * Reverse of __next_mem_range().
988 */
989void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
990					  struct memblock_type *type_a,
991					  struct memblock_type *type_b,
992					  phys_addr_t *out_start,
993					  phys_addr_t *out_end, int *out_nid)
994{
995	int idx_a = *idx & 0xffffffff;
996	int idx_b = *idx >> 32;
997
998	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
999		nid = NUMA_NO_NODE;
1000
1001	if (*idx == (u64)ULLONG_MAX) {
1002		idx_a = type_a->cnt - 1;
1003		idx_b = type_b->cnt;
1004	}
1005
1006	for (; idx_a >= 0; idx_a--) {
1007		struct memblock_region *m = &type_a->regions[idx_a];
1008
1009		phys_addr_t m_start = m->base;
1010		phys_addr_t m_end = m->base + m->size;
1011		int m_nid = memblock_get_region_node(m);
1012
1013		/* only memory regions are associated with nodes, check it */
1014		if (nid != NUMA_NO_NODE && nid != m_nid)
1015			continue;
1016
1017		/* skip hotpluggable memory regions if needed */
1018		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1019			continue;
1020
1021		/* if we want mirror memory skip non-mirror memory regions */
1022		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1023			continue;
1024
1025		if (!type_b) {
1026			if (out_start)
1027				*out_start = m_start;
1028			if (out_end)
1029				*out_end = m_end;
1030			if (out_nid)
1031				*out_nid = m_nid;
1032			idx_a++;
1033			*idx = (u32)idx_a | (u64)idx_b << 32;
1034			return;
1035		}
1036
1037		/* scan areas before each reservation */
1038		for (; idx_b >= 0; idx_b--) {
1039			struct memblock_region *r;
1040			phys_addr_t r_start;
1041			phys_addr_t r_end;
1042
1043			r = &type_b->regions[idx_b];
1044			r_start = idx_b ? r[-1].base + r[-1].size : 0;
1045			r_end = idx_b < type_b->cnt ?
1046				r->base : ULLONG_MAX;
1047			/*
1048			 * if idx_b advanced past idx_a,
1049			 * break out to advance idx_a
1050			 */
1051
1052			if (r_end <= m_start)
1053				break;
1054			/* if the two regions intersect, we're done */
1055			if (m_end > r_start) {
1056				if (out_start)
1057					*out_start = max(m_start, r_start);
1058				if (out_end)
1059					*out_end = min(m_end, r_end);
1060				if (out_nid)
1061					*out_nid = m_nid;
1062				if (m_start >= r_start)
1063					idx_a--;
1064				else
1065					idx_b--;
1066				*idx = (u32)idx_a | (u64)idx_b << 32;
1067				return;
1068			}
1069		}
1070	}
1071	/* signal end of iteration */
1072	*idx = ULLONG_MAX;
1073}
1074
1075#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1076/*
1077 * Common iterator interface used to define for_each_mem_range().
1078 */
1079void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1080				unsigned long *out_start_pfn,
1081				unsigned long *out_end_pfn, int *out_nid)
1082{
1083	struct memblock_type *type = &memblock.memory;
1084	struct memblock_region *r;
1085
1086	while (++*idx < type->cnt) {
1087		r = &type->regions[*idx];
1088
1089		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1090			continue;
1091		if (nid == MAX_NUMNODES || nid == r->nid)
1092			break;
1093	}
1094	if (*idx >= type->cnt) {
1095		*idx = -1;
1096		return;
1097	}
1098
1099	if (out_start_pfn)
1100		*out_start_pfn = PFN_UP(r->base);
1101	if (out_end_pfn)
1102		*out_end_pfn = PFN_DOWN(r->base + r->size);
1103	if (out_nid)
1104		*out_nid = r->nid;
1105}
1106
1107/**
1108 * memblock_set_node - set node ID on memblock regions
1109 * @base: base of area to set node ID for
1110 * @size: size of area to set node ID for
1111 * @type: memblock type to set node ID for
1112 * @nid: node ID to set
1113 *
1114 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1115 * Regions which cross the area boundaries are split as necessary.
1116 *
1117 * RETURNS:
1118 * 0 on success, -errno on failure.
1119 */
1120int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1121				      struct memblock_type *type, int nid)
1122{
1123	int start_rgn, end_rgn;
1124	int i, ret;
1125
1126	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1127	if (ret)
1128		return ret;
1129
1130	for (i = start_rgn; i < end_rgn; i++)
1131		memblock_set_region_node(&type->regions[i], nid);
1132
1133	memblock_merge_regions(type);
1134	return 0;
1135}
1136#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1137
1138static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1139					phys_addr_t align, phys_addr_t start,
1140					phys_addr_t end, int nid, ulong flags)
1141{
1142	phys_addr_t found;
1143
1144	if (!align)
1145		align = SMP_CACHE_BYTES;
1146
1147	found = memblock_find_in_range_node(size, align, start, end, nid,
1148					    flags);
1149	if (found && !memblock_reserve(found, size)) {
1150		/*
1151		 * The min_count is set to 0 so that memblock allocations are
1152		 * never reported as leaks.
1153		 */
1154		kmemleak_alloc(__va(found), size, 0, 0);
1155		return found;
1156	}
1157	return 0;
1158}
1159
1160phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1161					phys_addr_t start, phys_addr_t end,
1162					ulong flags)
1163{
1164	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1165					flags);
1166}
1167
1168static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1169					phys_addr_t align, phys_addr_t max_addr,
1170					int nid, ulong flags)
1171{
1172	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1173}
1174
1175phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1176{
1177	ulong flags = choose_memblock_flags();
1178	phys_addr_t ret;
1179
1180again:
1181	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1182				      nid, flags);
1183
1184	if (!ret && (flags & MEMBLOCK_MIRROR)) {
1185		flags &= ~MEMBLOCK_MIRROR;
1186		goto again;
1187	}
1188	return ret;
1189}
1190
1191phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1192{
1193	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1194				       MEMBLOCK_NONE);
1195}
1196
1197phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1198{
1199	phys_addr_t alloc;
1200
1201	alloc = __memblock_alloc_base(size, align, max_addr);
1202
1203	if (alloc == 0)
1204		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1205		      (unsigned long long) size, (unsigned long long) max_addr);
1206
1207	return alloc;
1208}
1209
1210phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1211{
1212	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1213}
1214
1215phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1216{
1217	phys_addr_t res = memblock_alloc_nid(size, align, nid);
1218
1219	if (res)
1220		return res;
1221	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1222}
1223
1224/**
1225 * memblock_virt_alloc_internal - allocate boot memory block
1226 * @size: size of memory block to be allocated in bytes
1227 * @align: alignment of the region and block's size
1228 * @min_addr: the lower bound of the memory region to allocate (phys address)
1229 * @max_addr: the upper bound of the memory region to allocate (phys address)
1230 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1231 *
1232 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1233 * will fall back to memory below @min_addr. Also, allocation may fall back
1234 * to any node in the system if the specified node can not
1235 * hold the requested memory.
1236 *
1237 * The allocation is performed from memory region limited by
1238 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1239 *
1240 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1241 *
1242 * The phys address of allocated boot memory block is converted to virtual and
1243 * allocated memory is reset to 0.
1244 *
1245 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1246 * allocated boot memory block, so that it is never reported as leaks.
1247 *
1248 * RETURNS:
1249 * Virtual address of allocated memory block on success, NULL on failure.
1250 */
1251static void * __init memblock_virt_alloc_internal(
1252				phys_addr_t size, phys_addr_t align,
1253				phys_addr_t min_addr, phys_addr_t max_addr,
1254				int nid)
1255{
1256	phys_addr_t alloc;
1257	void *ptr;
1258	ulong flags = choose_memblock_flags();
1259
1260	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1261		nid = NUMA_NO_NODE;
1262
1263	/*
1264	 * Detect any accidental use of these APIs after slab is ready, as at
1265	 * this moment memblock may be deinitialized already and its
1266	 * internal data may be destroyed (after execution of free_all_bootmem)
1267	 */
1268	if (WARN_ON_ONCE(slab_is_available()))
1269		return kzalloc_node(size, GFP_NOWAIT, nid);
1270
1271	if (!align)
1272		align = SMP_CACHE_BYTES;
1273
1274	if (max_addr > memblock.current_limit)
1275		max_addr = memblock.current_limit;
1276
1277again:
1278	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1279					    nid, flags);
1280	if (alloc)
1281		goto done;
1282
1283	if (nid != NUMA_NO_NODE) {
1284		alloc = memblock_find_in_range_node(size, align, min_addr,
1285						    max_addr, NUMA_NO_NODE,
1286						    flags);
1287		if (alloc)
1288			goto done;
1289	}
1290
1291	if (min_addr) {
1292		min_addr = 0;
1293		goto again;
1294	}
1295
1296	if (flags & MEMBLOCK_MIRROR) {
1297		flags &= ~MEMBLOCK_MIRROR;
1298		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1299			&size);
1300		goto again;
1301	}
1302
1303	return NULL;
1304done:
1305	memblock_reserve(alloc, size);
1306	ptr = phys_to_virt(alloc);
1307	memset(ptr, 0, size);
1308
1309	/*
1310	 * The min_count is set to 0 so that bootmem allocated blocks
1311	 * are never reported as leaks. This is because many of these blocks
1312	 * are only referred via the physical address which is not
1313	 * looked up by kmemleak.
1314	 */
1315	kmemleak_alloc(ptr, size, 0, 0);
1316
1317	return ptr;
1318}
1319
1320/**
1321 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1322 * @size: size of memory block to be allocated in bytes
1323 * @align: alignment of the region and block's size
1324 * @min_addr: the lower bound of the memory region from where the allocation
1325 *	  is preferred (phys address)
1326 * @max_addr: the upper bound of the memory region from where the allocation
1327 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1328 *	      allocate only from memory limited by memblock.current_limit value
1329 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1330 *
1331 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1332 * additional debug information (including caller info), if enabled.
1333 *
1334 * RETURNS:
1335 * Virtual address of allocated memory block on success, NULL on failure.
1336 */
1337void * __init memblock_virt_alloc_try_nid_nopanic(
1338				phys_addr_t size, phys_addr_t align,
1339				phys_addr_t min_addr, phys_addr_t max_addr,
1340				int nid)
1341{
1342	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1343		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1344		     (u64)max_addr, (void *)_RET_IP_);
1345	return memblock_virt_alloc_internal(size, align, min_addr,
1346					     max_addr, nid);
1347}
1348
1349/**
1350 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1351 * @size: size of memory block to be allocated in bytes
1352 * @align: alignment of the region and block's size
1353 * @min_addr: the lower bound of the memory region from where the allocation
1354 *	  is preferred (phys address)
1355 * @max_addr: the upper bound of the memory region from where the allocation
1356 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1357 *	      allocate only from memory limited by memblock.current_limit value
1358 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1359 *
1360 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1361 * which provides debug information (including caller info), if enabled,
1362 * and panics if the request can not be satisfied.
1363 *
1364 * RETURNS:
1365 * Virtual address of allocated memory block on success, NULL on failure.
1366 */
1367void * __init memblock_virt_alloc_try_nid(
1368			phys_addr_t size, phys_addr_t align,
1369			phys_addr_t min_addr, phys_addr_t max_addr,
1370			int nid)
1371{
1372	void *ptr;
1373
1374	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1375		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1376		     (u64)max_addr, (void *)_RET_IP_);
1377	ptr = memblock_virt_alloc_internal(size, align,
1378					   min_addr, max_addr, nid);
1379	if (ptr)
1380		return ptr;
1381
1382	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1383	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1384	      (u64)max_addr);
1385	return NULL;
1386}
1387
1388/**
1389 * __memblock_free_early - free boot memory block
1390 * @base: phys starting address of the  boot memory block
1391 * @size: size of the boot memory block in bytes
1392 *
1393 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1394 * The freeing memory will not be released to the buddy allocator.
1395 */
1396void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1397{
1398	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1399		     __func__, (u64)base, (u64)base + size - 1,
1400		     (void *)_RET_IP_);
1401	kmemleak_free_part(__va(base), size);
1402	memblock_remove_range(&memblock.reserved, base, size);
1403}
1404
1405/*
1406 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1407 * @addr: phys starting address of the  boot memory block
1408 * @size: size of the boot memory block in bytes
1409 *
1410 * This is only useful when the bootmem allocator has already been torn
1411 * down, but we are still initializing the system.  Pages are released directly
1412 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1413 */
1414void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1415{
1416	u64 cursor, end;
1417
1418	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1419		     __func__, (u64)base, (u64)base + size - 1,
1420		     (void *)_RET_IP_);
1421	kmemleak_free_part(__va(base), size);
1422	cursor = PFN_UP(base);
1423	end = PFN_DOWN(base + size);
1424
1425	for (; cursor < end; cursor++) {
1426		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1427		totalram_pages++;
1428	}
1429}
1430
1431/*
1432 * Remaining API functions
1433 */
1434
1435phys_addr_t __init memblock_phys_mem_size(void)
1436{
1437	return memblock.memory.total_size;
1438}
1439
1440phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1441{
1442	unsigned long pages = 0;
1443	struct memblock_region *r;
1444	unsigned long start_pfn, end_pfn;
1445
1446	for_each_memblock(memory, r) {
1447		start_pfn = memblock_region_memory_base_pfn(r);
1448		end_pfn = memblock_region_memory_end_pfn(r);
1449		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1450		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1451		pages += end_pfn - start_pfn;
1452	}
1453
1454	return PFN_PHYS(pages);
1455}
1456
1457/* lowest address */
1458phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1459{
1460	return memblock.memory.regions[0].base;
1461}
1462
1463phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1464{
1465	int idx = memblock.memory.cnt - 1;
1466
1467	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1468}
1469
1470void __init memblock_enforce_memory_limit(phys_addr_t limit)
1471{
1472	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1473	struct memblock_region *r;
1474
1475	if (!limit)
1476		return;
1477
1478	/* find out max address */
1479	for_each_memblock(memory, r) {
1480		if (limit <= r->size) {
1481			max_addr = r->base + limit;
1482			break;
1483		}
1484		limit -= r->size;
1485	}
1486
1487	/* truncate both memory and reserved regions */
1488	memblock_remove_range(&memblock.memory, max_addr,
1489			      (phys_addr_t)ULLONG_MAX);
1490	memblock_remove_range(&memblock.reserved, max_addr,
1491			      (phys_addr_t)ULLONG_MAX);
1492}
1493
1494static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1495{
1496	unsigned int left = 0, right = type->cnt;
1497
1498	do {
1499		unsigned int mid = (right + left) / 2;
1500
1501		if (addr < type->regions[mid].base)
1502			right = mid;
1503		else if (addr >= (type->regions[mid].base +
1504				  type->regions[mid].size))
1505			left = mid + 1;
1506		else
1507			return mid;
1508	} while (left < right);
1509	return -1;
1510}
1511
1512int __init memblock_is_reserved(phys_addr_t addr)
1513{
1514	return memblock_search(&memblock.reserved, addr) != -1;
1515}
1516
1517int __init_memblock memblock_is_memory(phys_addr_t addr)
1518{
1519	return memblock_search(&memblock.memory, addr) != -1;
1520}
1521
1522#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1523int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1524			 unsigned long *start_pfn, unsigned long *end_pfn)
1525{
1526	struct memblock_type *type = &memblock.memory;
1527	int mid = memblock_search(type, PFN_PHYS(pfn));
1528
1529	if (mid == -1)
1530		return -1;
1531
1532	*start_pfn = PFN_DOWN(type->regions[mid].base);
1533	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1534
1535	return type->regions[mid].nid;
1536}
1537#endif
1538
1539/**
1540 * memblock_is_region_memory - check if a region is a subset of memory
1541 * @base: base of region to check
1542 * @size: size of region to check
1543 *
1544 * Check if the region [@base, @base+@size) is a subset of a memory block.
1545 *
1546 * RETURNS:
1547 * 0 if false, non-zero if true
1548 */
1549int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1550{
1551	int idx = memblock_search(&memblock.memory, base);
1552	phys_addr_t end = base + memblock_cap_size(base, &size);
1553
1554	if (idx == -1)
1555		return 0;
1556	return memblock.memory.regions[idx].base <= base &&
1557		(memblock.memory.regions[idx].base +
1558		 memblock.memory.regions[idx].size) >= end;
1559}
1560
1561/**
1562 * memblock_is_region_reserved - check if a region intersects reserved memory
1563 * @base: base of region to check
1564 * @size: size of region to check
1565 *
1566 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1567 *
1568 * RETURNS:
1569 * True if they intersect, false if not.
1570 */
1571bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1572{
1573	memblock_cap_size(base, &size);
1574	return memblock_overlaps_region(&memblock.reserved, base, size);
1575}
1576
1577void __init_memblock memblock_trim_memory(phys_addr_t align)
1578{
1579	phys_addr_t start, end, orig_start, orig_end;
1580	struct memblock_region *r;
1581
1582	for_each_memblock(memory, r) {
1583		orig_start = r->base;
1584		orig_end = r->base + r->size;
1585		start = round_up(orig_start, align);
1586		end = round_down(orig_end, align);
1587
1588		if (start == orig_start && end == orig_end)
1589			continue;
1590
1591		if (start < end) {
1592			r->base = start;
1593			r->size = end - start;
1594		} else {
1595			memblock_remove_region(&memblock.memory,
1596					       r - memblock.memory.regions);
1597			r--;
1598		}
1599	}
1600}
1601
1602void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1603{
1604	memblock.current_limit = limit;
1605}
1606
1607phys_addr_t __init_memblock memblock_get_current_limit(void)
1608{
1609	return memblock.current_limit;
1610}
1611
1612static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1613{
1614	unsigned long long base, size;
1615	unsigned long flags;
1616	int i;
1617
1618	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1619
1620	for (i = 0; i < type->cnt; i++) {
1621		struct memblock_region *rgn = &type->regions[i];
1622		char nid_buf[32] = "";
1623
1624		base = rgn->base;
1625		size = rgn->size;
1626		flags = rgn->flags;
1627#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1628		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1629			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1630				 memblock_get_region_node(rgn));
1631#endif
1632		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1633			name, i, base, base + size - 1, size, nid_buf, flags);
1634	}
1635}
1636
1637void __init_memblock __memblock_dump_all(void)
1638{
1639	pr_info("MEMBLOCK configuration:\n");
1640	pr_info(" memory size = %#llx reserved size = %#llx\n",
1641		(unsigned long long)memblock.memory.total_size,
1642		(unsigned long long)memblock.reserved.total_size);
1643
1644	memblock_dump(&memblock.memory, "memory");
1645	memblock_dump(&memblock.reserved, "reserved");
1646}
1647
1648void __init memblock_allow_resize(void)
1649{
1650	memblock_can_resize = 1;
1651}
1652
1653static int __init early_memblock(char *p)
1654{
1655	if (p && strstr(p, "debug"))
1656		memblock_debug = 1;
1657	return 0;
1658}
1659early_param("memblock", early_memblock);
1660
1661#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1662
1663static int memblock_debug_show(struct seq_file *m, void *private)
1664{
1665	struct memblock_type *type = m->private;
1666	struct memblock_region *reg;
1667	int i;
1668
1669	for (i = 0; i < type->cnt; i++) {
1670		reg = &type->regions[i];
1671		seq_printf(m, "%4d: ", i);
1672		if (sizeof(phys_addr_t) == 4)
1673			seq_printf(m, "0x%08lx..0x%08lx\n",
1674				   (unsigned long)reg->base,
1675				   (unsigned long)(reg->base + reg->size - 1));
1676		else
1677			seq_printf(m, "0x%016llx..0x%016llx\n",
1678				   (unsigned long long)reg->base,
1679				   (unsigned long long)(reg->base + reg->size - 1));
1680
1681	}
1682	return 0;
1683}
1684
1685static int memblock_debug_open(struct inode *inode, struct file *file)
1686{
1687	return single_open(file, memblock_debug_show, inode->i_private);
1688}
1689
1690static const struct file_operations memblock_debug_fops = {
1691	.open = memblock_debug_open,
1692	.read = seq_read,
1693	.llseek = seq_lseek,
1694	.release = single_release,
1695};
1696
1697static int __init memblock_init_debugfs(void)
1698{
1699	struct dentry *root = debugfs_create_dir("memblock", NULL);
1700	if (!root)
1701		return -ENXIO;
1702	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1703	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1704#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1705	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1706#endif
1707
1708	return 0;
1709}
1710__initcall(memblock_init_debugfs);
1711
1712#endif /* CONFIG_DEBUG_FS */
1713