1/* 2 * bootmem - A boot-time physical memory allocator and configurator 3 * 4 * Copyright (C) 1999 Ingo Molnar 5 * 1999 Kanoj Sarcar, SGI 6 * 2008 Johannes Weiner 7 * 8 * Access to this subsystem has to be serialized externally (which is true 9 * for the boot process anyway). 10 */ 11#include <linux/init.h> 12#include <linux/pfn.h> 13#include <linux/slab.h> 14#include <linux/bootmem.h> 15#include <linux/export.h> 16#include <linux/kmemleak.h> 17#include <linux/range.h> 18#include <linux/memblock.h> 19#include <linux/bug.h> 20#include <linux/io.h> 21 22#include <asm/processor.h> 23 24#include "internal.h" 25 26#ifndef CONFIG_NEED_MULTIPLE_NODES 27struct pglist_data __refdata contig_page_data = { 28 .bdata = &bootmem_node_data[0] 29}; 30EXPORT_SYMBOL(contig_page_data); 31#endif 32 33unsigned long max_low_pfn; 34unsigned long min_low_pfn; 35unsigned long max_pfn; 36 37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38 39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list); 40 41static int bootmem_debug; 42 43static int __init bootmem_debug_setup(char *buf) 44{ 45 bootmem_debug = 1; 46 return 0; 47} 48early_param("bootmem_debug", bootmem_debug_setup); 49 50#define bdebug(fmt, args...) ({ \ 51 if (unlikely(bootmem_debug)) \ 52 printk(KERN_INFO \ 53 "bootmem::%s " fmt, \ 54 __func__, ## args); \ 55}) 56 57static unsigned long __init bootmap_bytes(unsigned long pages) 58{ 59 unsigned long bytes = DIV_ROUND_UP(pages, 8); 60 61 return ALIGN(bytes, sizeof(long)); 62} 63 64/** 65 * bootmem_bootmap_pages - calculate bitmap size in pages 66 * @pages: number of pages the bitmap has to represent 67 */ 68unsigned long __init bootmem_bootmap_pages(unsigned long pages) 69{ 70 unsigned long bytes = bootmap_bytes(pages); 71 72 return PAGE_ALIGN(bytes) >> PAGE_SHIFT; 73} 74 75/* 76 * link bdata in order 77 */ 78static void __init link_bootmem(bootmem_data_t *bdata) 79{ 80 bootmem_data_t *ent; 81 82 list_for_each_entry(ent, &bdata_list, list) { 83 if (bdata->node_min_pfn < ent->node_min_pfn) { 84 list_add_tail(&bdata->list, &ent->list); 85 return; 86 } 87 } 88 89 list_add_tail(&bdata->list, &bdata_list); 90} 91 92/* 93 * Called once to set up the allocator itself. 94 */ 95static unsigned long __init init_bootmem_core(bootmem_data_t *bdata, 96 unsigned long mapstart, unsigned long start, unsigned long end) 97{ 98 unsigned long mapsize; 99 100 mminit_validate_memmodel_limits(&start, &end); 101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart)); 102 bdata->node_min_pfn = start; 103 bdata->node_low_pfn = end; 104 link_bootmem(bdata); 105 106 /* 107 * Initially all pages are reserved - setup_arch() has to 108 * register free RAM areas explicitly. 109 */ 110 mapsize = bootmap_bytes(end - start); 111 memset(bdata->node_bootmem_map, 0xff, mapsize); 112 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 115 116 return mapsize; 117} 118 119/** 120 * init_bootmem_node - register a node as boot memory 121 * @pgdat: node to register 122 * @freepfn: pfn where the bitmap for this node is to be placed 123 * @startpfn: first pfn on the node 124 * @endpfn: first pfn after the node 125 * 126 * Returns the number of bytes needed to hold the bitmap for this node. 127 */ 128unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, 129 unsigned long startpfn, unsigned long endpfn) 130{ 131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); 132} 133 134/** 135 * init_bootmem - register boot memory 136 * @start: pfn where the bitmap is to be placed 137 * @pages: number of available physical pages 138 * 139 * Returns the number of bytes needed to hold the bitmap. 140 */ 141unsigned long __init init_bootmem(unsigned long start, unsigned long pages) 142{ 143 max_low_pfn = pages; 144 min_low_pfn = start; 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 146} 147 148/* 149 * free_bootmem_late - free bootmem pages directly to page allocator 150 * @addr: starting physical address of the range 151 * @size: size of the range in bytes 152 * 153 * This is only useful when the bootmem allocator has already been torn 154 * down, but we are still initializing the system. Pages are given directly 155 * to the page allocator, no bootmem metadata is updated because it is gone. 156 */ 157void __init free_bootmem_late(unsigned long physaddr, unsigned long size) 158{ 159 unsigned long cursor, end; 160 161 kmemleak_free_part(__va(physaddr), size); 162 163 cursor = PFN_UP(physaddr); 164 end = PFN_DOWN(physaddr + size); 165 166 for (; cursor < end; cursor++) { 167 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 168 totalram_pages++; 169 } 170} 171 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 173{ 174 struct page *page; 175 unsigned long *map, start, end, pages, cur, count = 0; 176 177 if (!bdata->node_bootmem_map) 178 return 0; 179 180 map = bdata->node_bootmem_map; 181 start = bdata->node_min_pfn; 182 end = bdata->node_low_pfn; 183 184 bdebug("nid=%td start=%lx end=%lx\n", 185 bdata - bootmem_node_data, start, end); 186 187 while (start < end) { 188 unsigned long idx, vec; 189 unsigned shift; 190 191 idx = start - bdata->node_min_pfn; 192 shift = idx & (BITS_PER_LONG - 1); 193 /* 194 * vec holds at most BITS_PER_LONG map bits, 195 * bit 0 corresponds to start. 196 */ 197 vec = ~map[idx / BITS_PER_LONG]; 198 199 if (shift) { 200 vec >>= shift; 201 if (end - start >= BITS_PER_LONG) 202 vec |= ~map[idx / BITS_PER_LONG + 1] << 203 (BITS_PER_LONG - shift); 204 } 205 /* 206 * If we have a properly aligned and fully unreserved 207 * BITS_PER_LONG block of pages in front of us, free 208 * it in one go. 209 */ 210 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { 211 int order = ilog2(BITS_PER_LONG); 212 213 __free_pages_bootmem(pfn_to_page(start), start, order); 214 count += BITS_PER_LONG; 215 start += BITS_PER_LONG; 216 } else { 217 cur = start; 218 219 start = ALIGN(start + 1, BITS_PER_LONG); 220 while (vec && cur != start) { 221 if (vec & 1) { 222 page = pfn_to_page(cur); 223 __free_pages_bootmem(page, cur, 0); 224 count++; 225 } 226 vec >>= 1; 227 ++cur; 228 } 229 } 230 } 231 232 cur = bdata->node_min_pfn; 233 page = virt_to_page(bdata->node_bootmem_map); 234 pages = bdata->node_low_pfn - bdata->node_min_pfn; 235 pages = bootmem_bootmap_pages(pages); 236 count += pages; 237 while (pages--) 238 __free_pages_bootmem(page++, cur++, 0); 239 240 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 241 242 return count; 243} 244 245static int reset_managed_pages_done __initdata; 246 247void reset_node_managed_pages(pg_data_t *pgdat) 248{ 249 struct zone *z; 250 251 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 252 z->managed_pages = 0; 253} 254 255void __init reset_all_zones_managed_pages(void) 256{ 257 struct pglist_data *pgdat; 258 259 if (reset_managed_pages_done) 260 return; 261 262 for_each_online_pgdat(pgdat) 263 reset_node_managed_pages(pgdat); 264 265 reset_managed_pages_done = 1; 266} 267 268/** 269 * free_all_bootmem - release free pages to the buddy allocator 270 * 271 * Returns the number of pages actually released. 272 */ 273unsigned long __init free_all_bootmem(void) 274{ 275 unsigned long total_pages = 0; 276 bootmem_data_t *bdata; 277 278 reset_all_zones_managed_pages(); 279 280 list_for_each_entry(bdata, &bdata_list, list) 281 total_pages += free_all_bootmem_core(bdata); 282 283 totalram_pages += total_pages; 284 285 return total_pages; 286} 287 288static void __init __free(bootmem_data_t *bdata, 289 unsigned long sidx, unsigned long eidx) 290{ 291 unsigned long idx; 292 293 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data, 294 sidx + bdata->node_min_pfn, 295 eidx + bdata->node_min_pfn); 296 297 if (bdata->hint_idx > sidx) 298 bdata->hint_idx = sidx; 299 300 for (idx = sidx; idx < eidx; idx++) 301 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) 302 BUG(); 303} 304 305static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, 306 unsigned long eidx, int flags) 307{ 308 unsigned long idx; 309 int exclusive = flags & BOOTMEM_EXCLUSIVE; 310 311 bdebug("nid=%td start=%lx end=%lx flags=%x\n", 312 bdata - bootmem_node_data, 313 sidx + bdata->node_min_pfn, 314 eidx + bdata->node_min_pfn, 315 flags); 316 317 for (idx = sidx; idx < eidx; idx++) 318 if (test_and_set_bit(idx, bdata->node_bootmem_map)) { 319 if (exclusive) { 320 __free(bdata, sidx, idx); 321 return -EBUSY; 322 } 323 bdebug("silent double reserve of PFN %lx\n", 324 idx + bdata->node_min_pfn); 325 } 326 return 0; 327} 328 329static int __init mark_bootmem_node(bootmem_data_t *bdata, 330 unsigned long start, unsigned long end, 331 int reserve, int flags) 332{ 333 unsigned long sidx, eidx; 334 335 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n", 336 bdata - bootmem_node_data, start, end, reserve, flags); 337 338 BUG_ON(start < bdata->node_min_pfn); 339 BUG_ON(end > bdata->node_low_pfn); 340 341 sidx = start - bdata->node_min_pfn; 342 eidx = end - bdata->node_min_pfn; 343 344 if (reserve) 345 return __reserve(bdata, sidx, eidx, flags); 346 else 347 __free(bdata, sidx, eidx); 348 return 0; 349} 350 351static int __init mark_bootmem(unsigned long start, unsigned long end, 352 int reserve, int flags) 353{ 354 unsigned long pos; 355 bootmem_data_t *bdata; 356 357 pos = start; 358 list_for_each_entry(bdata, &bdata_list, list) { 359 int err; 360 unsigned long max; 361 362 if (pos < bdata->node_min_pfn || 363 pos >= bdata->node_low_pfn) { 364 BUG_ON(pos != start); 365 continue; 366 } 367 368 max = min(bdata->node_low_pfn, end); 369 370 err = mark_bootmem_node(bdata, pos, max, reserve, flags); 371 if (reserve && err) { 372 mark_bootmem(start, pos, 0, 0); 373 return err; 374 } 375 376 if (max == end) 377 return 0; 378 pos = bdata->node_low_pfn; 379 } 380 BUG(); 381} 382 383/** 384 * free_bootmem_node - mark a page range as usable 385 * @pgdat: node the range resides on 386 * @physaddr: starting address of the range 387 * @size: size of the range in bytes 388 * 389 * Partial pages will be considered reserved and left as they are. 390 * 391 * The range must reside completely on the specified node. 392 */ 393void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 394 unsigned long size) 395{ 396 unsigned long start, end; 397 398 kmemleak_free_part(__va(physaddr), size); 399 400 start = PFN_UP(physaddr); 401 end = PFN_DOWN(physaddr + size); 402 403 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); 404} 405 406/** 407 * free_bootmem - mark a page range as usable 408 * @addr: starting physical address of the range 409 * @size: size of the range in bytes 410 * 411 * Partial pages will be considered reserved and left as they are. 412 * 413 * The range must be contiguous but may span node boundaries. 414 */ 415void __init free_bootmem(unsigned long physaddr, unsigned long size) 416{ 417 unsigned long start, end; 418 419 kmemleak_free_part(__va(physaddr), size); 420 421 start = PFN_UP(physaddr); 422 end = PFN_DOWN(physaddr + size); 423 424 mark_bootmem(start, end, 0, 0); 425} 426 427/** 428 * reserve_bootmem_node - mark a page range as reserved 429 * @pgdat: node the range resides on 430 * @physaddr: starting address of the range 431 * @size: size of the range in bytes 432 * @flags: reservation flags (see linux/bootmem.h) 433 * 434 * Partial pages will be reserved. 435 * 436 * The range must reside completely on the specified node. 437 */ 438int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, 439 unsigned long size, int flags) 440{ 441 unsigned long start, end; 442 443 start = PFN_DOWN(physaddr); 444 end = PFN_UP(physaddr + size); 445 446 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); 447} 448 449/** 450 * reserve_bootmem - mark a page range as reserved 451 * @addr: starting address of the range 452 * @size: size of the range in bytes 453 * @flags: reservation flags (see linux/bootmem.h) 454 * 455 * Partial pages will be reserved. 456 * 457 * The range must be contiguous but may span node boundaries. 458 */ 459int __init reserve_bootmem(unsigned long addr, unsigned long size, 460 int flags) 461{ 462 unsigned long start, end; 463 464 start = PFN_DOWN(addr); 465 end = PFN_UP(addr + size); 466 467 return mark_bootmem(start, end, 1, flags); 468} 469 470static unsigned long __init align_idx(struct bootmem_data *bdata, 471 unsigned long idx, unsigned long step) 472{ 473 unsigned long base = bdata->node_min_pfn; 474 475 /* 476 * Align the index with respect to the node start so that the 477 * combination of both satisfies the requested alignment. 478 */ 479 480 return ALIGN(base + idx, step) - base; 481} 482 483static unsigned long __init align_off(struct bootmem_data *bdata, 484 unsigned long off, unsigned long align) 485{ 486 unsigned long base = PFN_PHYS(bdata->node_min_pfn); 487 488 /* Same as align_idx for byte offsets */ 489 490 return ALIGN(base + off, align) - base; 491} 492 493static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata, 494 unsigned long size, unsigned long align, 495 unsigned long goal, unsigned long limit) 496{ 497 unsigned long fallback = 0; 498 unsigned long min, max, start, sidx, midx, step; 499 500 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n", 501 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT, 502 align, goal, limit); 503 504 BUG_ON(!size); 505 BUG_ON(align & (align - 1)); 506 BUG_ON(limit && goal + size > limit); 507 508 if (!bdata->node_bootmem_map) 509 return NULL; 510 511 min = bdata->node_min_pfn; 512 max = bdata->node_low_pfn; 513 514 goal >>= PAGE_SHIFT; 515 limit >>= PAGE_SHIFT; 516 517 if (limit && max > limit) 518 max = limit; 519 if (max <= min) 520 return NULL; 521 522 step = max(align >> PAGE_SHIFT, 1UL); 523 524 if (goal && min < goal && goal < max) 525 start = ALIGN(goal, step); 526 else 527 start = ALIGN(min, step); 528 529 sidx = start - bdata->node_min_pfn; 530 midx = max - bdata->node_min_pfn; 531 532 if (bdata->hint_idx > sidx) { 533 /* 534 * Handle the valid case of sidx being zero and still 535 * catch the fallback below. 536 */ 537 fallback = sidx + 1; 538 sidx = align_idx(bdata, bdata->hint_idx, step); 539 } 540 541 while (1) { 542 int merge; 543 void *region; 544 unsigned long eidx, i, start_off, end_off; 545find_block: 546 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx); 547 sidx = align_idx(bdata, sidx, step); 548 eidx = sidx + PFN_UP(size); 549 550 if (sidx >= midx || eidx > midx) 551 break; 552 553 for (i = sidx; i < eidx; i++) 554 if (test_bit(i, bdata->node_bootmem_map)) { 555 sidx = align_idx(bdata, i, step); 556 if (sidx == i) 557 sidx += step; 558 goto find_block; 559 } 560 561 if (bdata->last_end_off & (PAGE_SIZE - 1) && 562 PFN_DOWN(bdata->last_end_off) + 1 == sidx) 563 start_off = align_off(bdata, bdata->last_end_off, align); 564 else 565 start_off = PFN_PHYS(sidx); 566 567 merge = PFN_DOWN(start_off) < sidx; 568 end_off = start_off + size; 569 570 bdata->last_end_off = end_off; 571 bdata->hint_idx = PFN_UP(end_off); 572 573 /* 574 * Reserve the area now: 575 */ 576 if (__reserve(bdata, PFN_DOWN(start_off) + merge, 577 PFN_UP(end_off), BOOTMEM_EXCLUSIVE)) 578 BUG(); 579 580 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 581 start_off); 582 memset(region, 0, size); 583 /* 584 * The min_count is set to 0 so that bootmem allocated blocks 585 * are never reported as leaks. 586 */ 587 kmemleak_alloc(region, size, 0, 0); 588 return region; 589 } 590 591 if (fallback) { 592 sidx = align_idx(bdata, fallback - 1, step); 593 fallback = 0; 594 goto find_block; 595 } 596 597 return NULL; 598} 599 600static void * __init alloc_bootmem_core(unsigned long size, 601 unsigned long align, 602 unsigned long goal, 603 unsigned long limit) 604{ 605 bootmem_data_t *bdata; 606 void *region; 607 608 if (WARN_ON_ONCE(slab_is_available())) 609 return kzalloc(size, GFP_NOWAIT); 610 611 list_for_each_entry(bdata, &bdata_list, list) { 612 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) 613 continue; 614 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 615 break; 616 617 region = alloc_bootmem_bdata(bdata, size, align, goal, limit); 618 if (region) 619 return region; 620 } 621 622 return NULL; 623} 624 625static void * __init ___alloc_bootmem_nopanic(unsigned long size, 626 unsigned long align, 627 unsigned long goal, 628 unsigned long limit) 629{ 630 void *ptr; 631 632restart: 633 ptr = alloc_bootmem_core(size, align, goal, limit); 634 if (ptr) 635 return ptr; 636 if (goal) { 637 goal = 0; 638 goto restart; 639 } 640 641 return NULL; 642} 643 644/** 645 * __alloc_bootmem_nopanic - allocate boot memory without panicking 646 * @size: size of the request in bytes 647 * @align: alignment of the region 648 * @goal: preferred starting address of the region 649 * 650 * The goal is dropped if it can not be satisfied and the allocation will 651 * fall back to memory below @goal. 652 * 653 * Allocation may happen on any node in the system. 654 * 655 * Returns NULL on failure. 656 */ 657void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, 658 unsigned long goal) 659{ 660 unsigned long limit = 0; 661 662 return ___alloc_bootmem_nopanic(size, align, goal, limit); 663} 664 665static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, 666 unsigned long goal, unsigned long limit) 667{ 668 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit); 669 670 if (mem) 671 return mem; 672 /* 673 * Whoops, we cannot satisfy the allocation request. 674 */ 675 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 676 panic("Out of memory"); 677 return NULL; 678} 679 680/** 681 * __alloc_bootmem - allocate boot memory 682 * @size: size of the request in bytes 683 * @align: alignment of the region 684 * @goal: preferred starting address of the region 685 * 686 * The goal is dropped if it can not be satisfied and the allocation will 687 * fall back to memory below @goal. 688 * 689 * Allocation may happen on any node in the system. 690 * 691 * The function panics if the request can not be satisfied. 692 */ 693void * __init __alloc_bootmem(unsigned long size, unsigned long align, 694 unsigned long goal) 695{ 696 unsigned long limit = 0; 697 698 return ___alloc_bootmem(size, align, goal, limit); 699} 700 701void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, 702 unsigned long size, unsigned long align, 703 unsigned long goal, unsigned long limit) 704{ 705 void *ptr; 706 707 if (WARN_ON_ONCE(slab_is_available())) 708 return kzalloc(size, GFP_NOWAIT); 709again: 710 711 /* do not panic in alloc_bootmem_bdata() */ 712 if (limit && goal + size > limit) 713 limit = 0; 714 715 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit); 716 if (ptr) 717 return ptr; 718 719 ptr = alloc_bootmem_core(size, align, goal, limit); 720 if (ptr) 721 return ptr; 722 723 if (goal) { 724 goal = 0; 725 goto again; 726 } 727 728 return NULL; 729} 730 731void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 732 unsigned long align, unsigned long goal) 733{ 734 if (WARN_ON_ONCE(slab_is_available())) 735 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 736 737 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 738} 739 740void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 741 unsigned long align, unsigned long goal, 742 unsigned long limit) 743{ 744 void *ptr; 745 746 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0); 747 if (ptr) 748 return ptr; 749 750 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); 751 panic("Out of memory"); 752 return NULL; 753} 754 755/** 756 * __alloc_bootmem_node - allocate boot memory from a specific node 757 * @pgdat: node to allocate from 758 * @size: size of the request in bytes 759 * @align: alignment of the region 760 * @goal: preferred starting address of the region 761 * 762 * The goal is dropped if it can not be satisfied and the allocation will 763 * fall back to memory below @goal. 764 * 765 * Allocation may fall back to any node in the system if the specified node 766 * can not hold the requested memory. 767 * 768 * The function panics if the request can not be satisfied. 769 */ 770void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, 771 unsigned long align, unsigned long goal) 772{ 773 if (WARN_ON_ONCE(slab_is_available())) 774 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 775 776 return ___alloc_bootmem_node(pgdat, size, align, goal, 0); 777} 778 779void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 780 unsigned long align, unsigned long goal) 781{ 782#ifdef MAX_DMA32_PFN 783 unsigned long end_pfn; 784 785 if (WARN_ON_ONCE(slab_is_available())) 786 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 787 788 /* update goal according ...MAX_DMA32_PFN */ 789 end_pfn = pgdat_end_pfn(pgdat); 790 791 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) && 792 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) { 793 void *ptr; 794 unsigned long new_goal; 795 796 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 797 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, 798 new_goal, 0); 799 if (ptr) 800 return ptr; 801 } 802#endif 803 804 return __alloc_bootmem_node(pgdat, size, align, goal); 805 806} 807 808#ifndef ARCH_LOW_ADDRESS_LIMIT 809#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 810#endif 811 812/** 813 * __alloc_bootmem_low - allocate low boot memory 814 * @size: size of the request in bytes 815 * @align: alignment of the region 816 * @goal: preferred starting address of the region 817 * 818 * The goal is dropped if it can not be satisfied and the allocation will 819 * fall back to memory below @goal. 820 * 821 * Allocation may happen on any node in the system. 822 * 823 * The function panics if the request can not be satisfied. 824 */ 825void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, 826 unsigned long goal) 827{ 828 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT); 829} 830 831void * __init __alloc_bootmem_low_nopanic(unsigned long size, 832 unsigned long align, 833 unsigned long goal) 834{ 835 return ___alloc_bootmem_nopanic(size, align, goal, 836 ARCH_LOW_ADDRESS_LIMIT); 837} 838 839/** 840 * __alloc_bootmem_low_node - allocate low boot memory from a specific node 841 * @pgdat: node to allocate from 842 * @size: size of the request in bytes 843 * @align: alignment of the region 844 * @goal: preferred starting address of the region 845 * 846 * The goal is dropped if it can not be satisfied and the allocation will 847 * fall back to memory below @goal. 848 * 849 * Allocation may fall back to any node in the system if the specified node 850 * can not hold the requested memory. 851 * 852 * The function panics if the request can not be satisfied. 853 */ 854void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 855 unsigned long align, unsigned long goal) 856{ 857 if (WARN_ON_ONCE(slab_is_available())) 858 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 859 860 return ___alloc_bootmem_node(pgdat, size, align, 861 goal, ARCH_LOW_ADDRESS_LIMIT); 862} 863