root/arch/x86/mm/numa.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. numa_setup
  2. numa_cpu_node
  3. numa_set_node
  4. numa_clear_node
  5. setup_node_to_cpumask_map
  6. numa_add_memblk_to
  7. numa_remove_memblk_from
  8. numa_add_memblk
  9. alloc_node_data
  10. numa_cleanup_meminfo
  11. numa_nodemask_from_meminfo
  12. numa_reset_distance
  13. numa_alloc_distance
  14. numa_set_distance
  15. __node_distance
  16. numa_meminfo_cover_memory
  17. numa_clear_kernel_node_hotplug
  18. numa_register_memblks
  19. numa_init_array
  20. numa_init
  21. dummy_numa_init
  22. x86_numa_init
  23. init_memory_less_node
  24. init_cpu_to_node
  25. numa_add_cpu
  26. numa_remove_cpu
  27. __cpu_to_node
  28. early_cpu_to_node
  29. debug_cpumask_set_cpu
  30. numa_set_cpumask
  31. numa_add_cpu
  32. numa_remove_cpu
  33. cpumask_of_node
  34. memory_add_physaddr_to_nid

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /* Common code for 32 and 64-bit NUMA */
   3 #include <linux/acpi.h>
   4 #include <linux/kernel.h>
   5 #include <linux/mm.h>
   6 #include <linux/string.h>
   7 #include <linux/init.h>
   8 #include <linux/memblock.h>
   9 #include <linux/mmzone.h>
  10 #include <linux/ctype.h>
  11 #include <linux/nodemask.h>
  12 #include <linux/sched.h>
  13 #include <linux/topology.h>
  14 
  15 #include <asm/e820/api.h>
  16 #include <asm/proto.h>
  17 #include <asm/dma.h>
  18 #include <asm/amd_nb.h>
  19 
  20 #include "numa_internal.h"
  21 
  22 int numa_off;
  23 nodemask_t numa_nodes_parsed __initdata;
  24 
  25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  26 EXPORT_SYMBOL(node_data);
  27 
  28 static struct numa_meminfo numa_meminfo
  29 #ifndef CONFIG_MEMORY_HOTPLUG
  30 __initdata
  31 #endif
  32 ;
  33 
  34 static int numa_distance_cnt;
  35 static u8 *numa_distance;
  36 
  37 static __init int numa_setup(char *opt)
  38 {
  39         if (!opt)
  40                 return -EINVAL;
  41         if (!strncmp(opt, "off", 3))
  42                 numa_off = 1;
  43 #ifdef CONFIG_NUMA_EMU
  44         if (!strncmp(opt, "fake=", 5))
  45                 numa_emu_cmdline(opt + 5);
  46 #endif
  47 #ifdef CONFIG_ACPI_NUMA
  48         if (!strncmp(opt, "noacpi", 6))
  49                 acpi_numa = -1;
  50 #endif
  51         return 0;
  52 }
  53 early_param("numa", numa_setup);
  54 
  55 /*
  56  * apicid, cpu, node mappings
  57  */
  58 s16 __apicid_to_node[MAX_LOCAL_APIC] = {
  59         [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  60 };
  61 
  62 int numa_cpu_node(int cpu)
  63 {
  64         int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  65 
  66         if (apicid != BAD_APICID)
  67                 return __apicid_to_node[apicid];
  68         return NUMA_NO_NODE;
  69 }
  70 
  71 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  72 EXPORT_SYMBOL(node_to_cpumask_map);
  73 
  74 /*
  75  * Map cpu index to node index
  76  */
  77 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  78 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  79 
  80 void numa_set_node(int cpu, int node)
  81 {
  82         int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  83 
  84         /* early setting, no percpu area yet */
  85         if (cpu_to_node_map) {
  86                 cpu_to_node_map[cpu] = node;
  87                 return;
  88         }
  89 
  90 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  91         if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  92                 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  93                 dump_stack();
  94                 return;
  95         }
  96 #endif
  97         per_cpu(x86_cpu_to_node_map, cpu) = node;
  98 
  99         set_cpu_numa_node(cpu, node);
 100 }
 101 
 102 void numa_clear_node(int cpu)
 103 {
 104         numa_set_node(cpu, NUMA_NO_NODE);
 105 }
 106 
 107 /*
 108  * Allocate node_to_cpumask_map based on number of available nodes
 109  * Requires node_possible_map to be valid.
 110  *
 111  * Note: cpumask_of_node() is not valid until after this is done.
 112  * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 113  */
 114 void __init setup_node_to_cpumask_map(void)
 115 {
 116         unsigned int node;
 117 
 118         /* setup nr_node_ids if not done yet */
 119         if (nr_node_ids == MAX_NUMNODES)
 120                 setup_nr_node_ids();
 121 
 122         /* allocate the map */
 123         for (node = 0; node < nr_node_ids; node++)
 124                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 125 
 126         /* cpumask_of_node() will now work */
 127         pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
 128 }
 129 
 130 static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 131                                      struct numa_meminfo *mi)
 132 {
 133         /* ignore zero length blks */
 134         if (start == end)
 135                 return 0;
 136 
 137         /* whine about and ignore invalid blks */
 138         if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
 139                 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
 140                         nid, start, end - 1);
 141                 return 0;
 142         }
 143 
 144         if (mi->nr_blks >= NR_NODE_MEMBLKS) {
 145                 pr_err("too many memblk ranges\n");
 146                 return -EINVAL;
 147         }
 148 
 149         mi->blk[mi->nr_blks].start = start;
 150         mi->blk[mi->nr_blks].end = end;
 151         mi->blk[mi->nr_blks].nid = nid;
 152         mi->nr_blks++;
 153         return 0;
 154 }
 155 
 156 /**
 157  * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
 158  * @idx: Index of memblk to remove
 159  * @mi: numa_meminfo to remove memblk from
 160  *
 161  * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
 162  * decrementing @mi->nr_blks.
 163  */
 164 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 165 {
 166         mi->nr_blks--;
 167         memmove(&mi->blk[idx], &mi->blk[idx + 1],
 168                 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
 169 }
 170 
 171 /**
 172  * numa_add_memblk - Add one numa_memblk to numa_meminfo
 173  * @nid: NUMA node ID of the new memblk
 174  * @start: Start address of the new memblk
 175  * @end: End address of the new memblk
 176  *
 177  * Add a new memblk to the default numa_meminfo.
 178  *
 179  * RETURNS:
 180  * 0 on success, -errno on failure.
 181  */
 182 int __init numa_add_memblk(int nid, u64 start, u64 end)
 183 {
 184         return numa_add_memblk_to(nid, start, end, &numa_meminfo);
 185 }
 186 
 187 /* Allocate NODE_DATA for a node on the local memory */
 188 static void __init alloc_node_data(int nid)
 189 {
 190         const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 191         u64 nd_pa;
 192         void *nd;
 193         int tnid;
 194 
 195         /*
 196          * Allocate node data.  Try node-local memory and then any node.
 197          * Never allocate in DMA zone.
 198          */
 199         nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
 200         if (!nd_pa) {
 201                 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
 202                        nd_size, nid);
 203                 return;
 204         }
 205         nd = __va(nd_pa);
 206 
 207         /* report and initialize */
 208         printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
 209                nd_pa, nd_pa + nd_size - 1);
 210         tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 211         if (tnid != nid)
 212                 printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
 213 
 214         node_data[nid] = nd;
 215         memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 216 
 217         node_set_online(nid);
 218 }
 219 
 220 /**
 221  * numa_cleanup_meminfo - Cleanup a numa_meminfo
 222  * @mi: numa_meminfo to clean up
 223  *
 224  * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
 225  * conflicts and clear unused memblks.
 226  *
 227  * RETURNS:
 228  * 0 on success, -errno on failure.
 229  */
 230 int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 231 {
 232         const u64 low = 0;
 233         const u64 high = PFN_PHYS(max_pfn);
 234         int i, j, k;
 235 
 236         /* first, trim all entries */
 237         for (i = 0; i < mi->nr_blks; i++) {
 238                 struct numa_memblk *bi = &mi->blk[i];
 239 
 240                 /* make sure all blocks are inside the limits */
 241                 bi->start = max(bi->start, low);
 242                 bi->end = min(bi->end, high);
 243 
 244                 /* and there's no empty or non-exist block */
 245                 if (bi->start >= bi->end ||
 246                     !memblock_overlaps_region(&memblock.memory,
 247                         bi->start, bi->end - bi->start))
 248                         numa_remove_memblk_from(i--, mi);
 249         }
 250 
 251         /* merge neighboring / overlapping entries */
 252         for (i = 0; i < mi->nr_blks; i++) {
 253                 struct numa_memblk *bi = &mi->blk[i];
 254 
 255                 for (j = i + 1; j < mi->nr_blks; j++) {
 256                         struct numa_memblk *bj = &mi->blk[j];
 257                         u64 start, end;
 258 
 259                         /*
 260                          * See whether there are overlapping blocks.  Whine
 261                          * about but allow overlaps of the same nid.  They
 262                          * will be merged below.
 263                          */
 264                         if (bi->end > bj->start && bi->start < bj->end) {
 265                                 if (bi->nid != bj->nid) {
 266                                         pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
 267                                                bi->nid, bi->start, bi->end - 1,
 268                                                bj->nid, bj->start, bj->end - 1);
 269                                         return -EINVAL;
 270                                 }
 271                                 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
 272                                         bi->nid, bi->start, bi->end - 1,
 273                                         bj->start, bj->end - 1);
 274                         }
 275 
 276                         /*
 277                          * Join together blocks on the same node, holes
 278                          * between which don't overlap with memory on other
 279                          * nodes.
 280                          */
 281                         if (bi->nid != bj->nid)
 282                                 continue;
 283                         start = min(bi->start, bj->start);
 284                         end = max(bi->end, bj->end);
 285                         for (k = 0; k < mi->nr_blks; k++) {
 286                                 struct numa_memblk *bk = &mi->blk[k];
 287 
 288                                 if (bi->nid == bk->nid)
 289                                         continue;
 290                                 if (start < bk->end && end > bk->start)
 291                                         break;
 292                         }
 293                         if (k < mi->nr_blks)
 294                                 continue;
 295                         printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
 296                                bi->nid, bi->start, bi->end - 1, bj->start,
 297                                bj->end - 1, start, end - 1);
 298                         bi->start = start;
 299                         bi->end = end;
 300                         numa_remove_memblk_from(j--, mi);
 301                 }
 302         }
 303 
 304         /* clear unused ones */
 305         for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
 306                 mi->blk[i].start = mi->blk[i].end = 0;
 307                 mi->blk[i].nid = NUMA_NO_NODE;
 308         }
 309 
 310         return 0;
 311 }
 312 
 313 /*
 314  * Set nodes, which have memory in @mi, in *@nodemask.
 315  */
 316 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
 317                                               const struct numa_meminfo *mi)
 318 {
 319         int i;
 320 
 321         for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
 322                 if (mi->blk[i].start != mi->blk[i].end &&
 323                     mi->blk[i].nid != NUMA_NO_NODE)
 324                         node_set(mi->blk[i].nid, *nodemask);
 325 }
 326 
 327 /**
 328  * numa_reset_distance - Reset NUMA distance table
 329  *
 330  * The current table is freed.  The next numa_set_distance() call will
 331  * create a new one.
 332  */
 333 void __init numa_reset_distance(void)
 334 {
 335         size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 336 
 337         /* numa_distance could be 1LU marking allocation failure, test cnt */
 338         if (numa_distance_cnt)
 339                 memblock_free(__pa(numa_distance), size);
 340         numa_distance_cnt = 0;
 341         numa_distance = NULL;   /* enable table creation */
 342 }
 343 
 344 static int __init numa_alloc_distance(void)
 345 {
 346         nodemask_t nodes_parsed;
 347         size_t size;
 348         int i, j, cnt = 0;
 349         u64 phys;
 350 
 351         /* size the new table and allocate it */
 352         nodes_parsed = numa_nodes_parsed;
 353         numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 354 
 355         for_each_node_mask(i, nodes_parsed)
 356                 cnt = i;
 357         cnt++;
 358         size = cnt * cnt * sizeof(numa_distance[0]);
 359 
 360         phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 361                                       size, PAGE_SIZE);
 362         if (!phys) {
 363                 pr_warn("Warning: can't allocate distance table!\n");
 364                 /* don't retry until explicitly reset */
 365                 numa_distance = (void *)1LU;
 366                 return -ENOMEM;
 367         }
 368         memblock_reserve(phys, size);
 369 
 370         numa_distance = __va(phys);
 371         numa_distance_cnt = cnt;
 372 
 373         /* fill with the default distances */
 374         for (i = 0; i < cnt; i++)
 375                 for (j = 0; j < cnt; j++)
 376                         numa_distance[i * cnt + j] = i == j ?
 377                                 LOCAL_DISTANCE : REMOTE_DISTANCE;
 378         printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 379 
 380         return 0;
 381 }
 382 
 383 /**
 384  * numa_set_distance - Set NUMA distance from one NUMA to another
 385  * @from: the 'from' node to set distance
 386  * @to: the 'to'  node to set distance
 387  * @distance: NUMA distance
 388  *
 389  * Set the distance from node @from to @to to @distance.  If distance table
 390  * doesn't exist, one which is large enough to accommodate all the currently
 391  * known nodes will be created.
 392  *
 393  * If such table cannot be allocated, a warning is printed and further
 394  * calls are ignored until the distance table is reset with
 395  * numa_reset_distance().
 396  *
 397  * If @from or @to is higher than the highest known node or lower than zero
 398  * at the time of table creation or @distance doesn't make sense, the call
 399  * is ignored.
 400  * This is to allow simplification of specific NUMA config implementations.
 401  */
 402 void __init numa_set_distance(int from, int to, int distance)
 403 {
 404         if (!numa_distance && numa_alloc_distance() < 0)
 405                 return;
 406 
 407         if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
 408                         from < 0 || to < 0) {
 409                 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
 410                              from, to, distance);
 411                 return;
 412         }
 413 
 414         if ((u8)distance != distance ||
 415             (from == to && distance != LOCAL_DISTANCE)) {
 416                 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
 417                              from, to, distance);
 418                 return;
 419         }
 420 
 421         numa_distance[from * numa_distance_cnt + to] = distance;
 422 }
 423 
 424 int __node_distance(int from, int to)
 425 {
 426         if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 427                 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 428         return numa_distance[from * numa_distance_cnt + to];
 429 }
 430 EXPORT_SYMBOL(__node_distance);
 431 
 432 /*
 433  * Sanity check to catch more bad NUMA configurations (they are amazingly
 434  * common).  Make sure the nodes cover all memory.
 435  */
 436 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 437 {
 438         u64 numaram, e820ram;
 439         int i;
 440 
 441         numaram = 0;
 442         for (i = 0; i < mi->nr_blks; i++) {
 443                 u64 s = mi->blk[i].start >> PAGE_SHIFT;
 444                 u64 e = mi->blk[i].end >> PAGE_SHIFT;
 445                 numaram += e - s;
 446                 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
 447                 if ((s64)numaram < 0)
 448                         numaram = 0;
 449         }
 450 
 451         e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
 452 
 453         /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 454         if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 455                 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
 456                        (numaram << PAGE_SHIFT) >> 20,
 457                        (e820ram << PAGE_SHIFT) >> 20);
 458                 return false;
 459         }
 460         return true;
 461 }
 462 
 463 /*
 464  * Mark all currently memblock-reserved physical memory (which covers the
 465  * kernel's own memory ranges) as hot-unswappable.
 466  */
 467 static void __init numa_clear_kernel_node_hotplug(void)
 468 {
 469         nodemask_t reserved_nodemask = NODE_MASK_NONE;
 470         struct memblock_region *mb_region;
 471         int i;
 472 
 473         /*
 474          * We have to do some preprocessing of memblock regions, to
 475          * make them suitable for reservation.
 476          *
 477          * At this time, all memory regions reserved by memblock are
 478          * used by the kernel, but those regions are not split up
 479          * along node boundaries yet, and don't necessarily have their
 480          * node ID set yet either.
 481          *
 482          * So iterate over all memory known to the x86 architecture,
 483          * and use those ranges to set the nid in memblock.reserved.
 484          * This will split up the memblock regions along node
 485          * boundaries and will set the node IDs as well.
 486          */
 487         for (i = 0; i < numa_meminfo.nr_blks; i++) {
 488                 struct numa_memblk *mb = numa_meminfo.blk + i;
 489                 int ret;
 490 
 491                 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
 492                 WARN_ON_ONCE(ret);
 493         }
 494 
 495         /*
 496          * Now go over all reserved memblock regions, to construct a
 497          * node mask of all kernel reserved memory areas.
 498          *
 499          * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
 500          *   numa_meminfo might not include all memblock.reserved
 501          *   memory ranges, because quirks such as trim_snb_memory()
 502          *   reserve specific pages for Sandy Bridge graphics. ]
 503          */
 504         for_each_memblock(reserved, mb_region) {
 505                 if (mb_region->nid != MAX_NUMNODES)
 506                         node_set(mb_region->nid, reserved_nodemask);
 507         }
 508 
 509         /*
 510          * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
 511          * belonging to the reserved node mask.
 512          *
 513          * Note that this will include memory regions that reside
 514          * on nodes that contain kernel memory - entire nodes
 515          * become hot-unpluggable:
 516          */
 517         for (i = 0; i < numa_meminfo.nr_blks; i++) {
 518                 struct numa_memblk *mb = numa_meminfo.blk + i;
 519 
 520                 if (!node_isset(mb->nid, reserved_nodemask))
 521                         continue;
 522 
 523                 memblock_clear_hotplug(mb->start, mb->end - mb->start);
 524         }
 525 }
 526 
 527 static int __init numa_register_memblks(struct numa_meminfo *mi)
 528 {
 529         unsigned long uninitialized_var(pfn_align);
 530         int i, nid;
 531 
 532         /* Account for nodes with cpus and no memory */
 533         node_possible_map = numa_nodes_parsed;
 534         numa_nodemask_from_meminfo(&node_possible_map, mi);
 535         if (WARN_ON(nodes_empty(node_possible_map)))
 536                 return -EINVAL;
 537 
 538         for (i = 0; i < mi->nr_blks; i++) {
 539                 struct numa_memblk *mb = &mi->blk[i];
 540                 memblock_set_node(mb->start, mb->end - mb->start,
 541                                   &memblock.memory, mb->nid);
 542         }
 543 
 544         /*
 545          * At very early time, the kernel have to use some memory such as
 546          * loading the kernel image. We cannot prevent this anyway. So any
 547          * node the kernel resides in should be un-hotpluggable.
 548          *
 549          * And when we come here, alloc node data won't fail.
 550          */
 551         numa_clear_kernel_node_hotplug();
 552 
 553         /*
 554          * If sections array is gonna be used for pfn -> nid mapping, check
 555          * whether its granularity is fine enough.
 556          */
 557 #ifdef NODE_NOT_IN_PAGE_FLAGS
 558         pfn_align = node_map_pfn_alignment();
 559         if (pfn_align && pfn_align < PAGES_PER_SECTION) {
 560                 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
 561                        PFN_PHYS(pfn_align) >> 20,
 562                        PFN_PHYS(PAGES_PER_SECTION) >> 20);
 563                 return -EINVAL;
 564         }
 565 #endif
 566         if (!numa_meminfo_cover_memory(mi))
 567                 return -EINVAL;
 568 
 569         /* Finally register nodes. */
 570         for_each_node_mask(nid, node_possible_map) {
 571                 u64 start = PFN_PHYS(max_pfn);
 572                 u64 end = 0;
 573 
 574                 for (i = 0; i < mi->nr_blks; i++) {
 575                         if (nid != mi->blk[i].nid)
 576                                 continue;
 577                         start = min(mi->blk[i].start, start);
 578                         end = max(mi->blk[i].end, end);
 579                 }
 580 
 581                 if (start >= end)
 582                         continue;
 583 
 584                 /*
 585                  * Don't confuse VM with a node that doesn't have the
 586                  * minimum amount of memory:
 587                  */
 588                 if (end && (end - start) < NODE_MIN_SIZE)
 589                         continue;
 590 
 591                 alloc_node_data(nid);
 592         }
 593 
 594         /* Dump memblock with node info and return. */
 595         memblock_dump_all();
 596         return 0;
 597 }
 598 
 599 /*
 600  * There are unfortunately some poorly designed mainboards around that
 601  * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 602  * mapping. To avoid this fill in the mapping for all possible CPUs,
 603  * as the number of CPUs is not known yet. We round robin the existing
 604  * nodes.
 605  */
 606 static void __init numa_init_array(void)
 607 {
 608         int rr, i;
 609 
 610         rr = first_node(node_online_map);
 611         for (i = 0; i < nr_cpu_ids; i++) {
 612                 if (early_cpu_to_node(i) != NUMA_NO_NODE)
 613                         continue;
 614                 numa_set_node(i, rr);
 615                 rr = next_node_in(rr, node_online_map);
 616         }
 617 }
 618 
 619 static int __init numa_init(int (*init_func)(void))
 620 {
 621         int i;
 622         int ret;
 623 
 624         for (i = 0; i < MAX_LOCAL_APIC; i++)
 625                 set_apicid_to_node(i, NUMA_NO_NODE);
 626 
 627         nodes_clear(numa_nodes_parsed);
 628         nodes_clear(node_possible_map);
 629         nodes_clear(node_online_map);
 630         memset(&numa_meminfo, 0, sizeof(numa_meminfo));
 631         WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
 632                                   MAX_NUMNODES));
 633         WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
 634                                   MAX_NUMNODES));
 635         /* In case that parsing SRAT failed. */
 636         WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
 637         numa_reset_distance();
 638 
 639         ret = init_func();
 640         if (ret < 0)
 641                 return ret;
 642 
 643         /*
 644          * We reset memblock back to the top-down direction
 645          * here because if we configured ACPI_NUMA, we have
 646          * parsed SRAT in init_func(). It is ok to have the
 647          * reset here even if we did't configure ACPI_NUMA
 648          * or acpi numa init fails and fallbacks to dummy
 649          * numa init.
 650          */
 651         memblock_set_bottom_up(false);
 652 
 653         ret = numa_cleanup_meminfo(&numa_meminfo);
 654         if (ret < 0)
 655                 return ret;
 656 
 657         numa_emulation(&numa_meminfo, numa_distance_cnt);
 658 
 659         ret = numa_register_memblks(&numa_meminfo);
 660         if (ret < 0)
 661                 return ret;
 662 
 663         for (i = 0; i < nr_cpu_ids; i++) {
 664                 int nid = early_cpu_to_node(i);
 665 
 666                 if (nid == NUMA_NO_NODE)
 667                         continue;
 668                 if (!node_online(nid))
 669                         numa_clear_node(i);
 670         }
 671         numa_init_array();
 672 
 673         return 0;
 674 }
 675 
 676 /**
 677  * dummy_numa_init - Fallback dummy NUMA init
 678  *
 679  * Used if there's no underlying NUMA architecture, NUMA initialization
 680  * fails, or NUMA is disabled on the command line.
 681  *
 682  * Must online at least one node and add memory blocks that cover all
 683  * allowed memory.  This function must not fail.
 684  */
 685 static int __init dummy_numa_init(void)
 686 {
 687         printk(KERN_INFO "%s\n",
 688                numa_off ? "NUMA turned off" : "No NUMA configuration found");
 689         printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
 690                0LLU, PFN_PHYS(max_pfn) - 1);
 691 
 692         node_set(0, numa_nodes_parsed);
 693         numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
 694 
 695         return 0;
 696 }
 697 
 698 /**
 699  * x86_numa_init - Initialize NUMA
 700  *
 701  * Try each configured NUMA initialization method until one succeeds.  The
 702  * last fallback is dummy single node config encomapssing whole memory and
 703  * never fails.
 704  */
 705 void __init x86_numa_init(void)
 706 {
 707         if (!numa_off) {
 708 #ifdef CONFIG_ACPI_NUMA
 709                 if (!numa_init(x86_acpi_numa_init))
 710                         return;
 711 #endif
 712 #ifdef CONFIG_AMD_NUMA
 713                 if (!numa_init(amd_numa_init))
 714                         return;
 715 #endif
 716         }
 717 
 718         numa_init(dummy_numa_init);
 719 }
 720 
 721 static void __init init_memory_less_node(int nid)
 722 {
 723         unsigned long zones_size[MAX_NR_ZONES] = {0};
 724         unsigned long zholes_size[MAX_NR_ZONES] = {0};
 725 
 726         /* Allocate and initialize node data. Memory-less node is now online.*/
 727         alloc_node_data(nid);
 728         free_area_init_node(nid, zones_size, 0, zholes_size);
 729 
 730         /*
 731          * All zonelists will be built later in start_kernel() after per cpu
 732          * areas are initialized.
 733          */
 734 }
 735 
 736 /*
 737  * Setup early cpu_to_node.
 738  *
 739  * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 740  * and apicid_to_node[] tables have valid entries for a CPU.
 741  * This means we skip cpu_to_node[] initialisation for NUMA
 742  * emulation and faking node case (when running a kernel compiled
 743  * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 744  * is already initialized in a round robin manner at numa_init_array,
 745  * prior to this call, and this initialization is good enough
 746  * for the fake NUMA cases.
 747  *
 748  * Called before the per_cpu areas are setup.
 749  */
 750 void __init init_cpu_to_node(void)
 751 {
 752         int cpu;
 753         u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 754 
 755         BUG_ON(cpu_to_apicid == NULL);
 756 
 757         for_each_possible_cpu(cpu) {
 758                 int node = numa_cpu_node(cpu);
 759 
 760                 if (node == NUMA_NO_NODE)
 761                         continue;
 762 
 763                 if (!node_online(node))
 764                         init_memory_less_node(node);
 765 
 766                 numa_set_node(cpu, node);
 767         }
 768 }
 769 
 770 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
 771 
 772 # ifndef CONFIG_NUMA_EMU
 773 void numa_add_cpu(int cpu)
 774 {
 775         cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 776 }
 777 
 778 void numa_remove_cpu(int cpu)
 779 {
 780         cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 781 }
 782 # endif /* !CONFIG_NUMA_EMU */
 783 
 784 #else   /* !CONFIG_DEBUG_PER_CPU_MAPS */
 785 
 786 int __cpu_to_node(int cpu)
 787 {
 788         if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 789                 printk(KERN_WARNING
 790                         "cpu_to_node(%d): usage too early!\n", cpu);
 791                 dump_stack();
 792                 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 793         }
 794         return per_cpu(x86_cpu_to_node_map, cpu);
 795 }
 796 EXPORT_SYMBOL(__cpu_to_node);
 797 
 798 /*
 799  * Same function as cpu_to_node() but used if called before the
 800  * per_cpu areas are setup.
 801  */
 802 int early_cpu_to_node(int cpu)
 803 {
 804         if (early_per_cpu_ptr(x86_cpu_to_node_map))
 805                 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 806 
 807         if (!cpu_possible(cpu)) {
 808                 printk(KERN_WARNING
 809                         "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 810                 dump_stack();
 811                 return NUMA_NO_NODE;
 812         }
 813         return per_cpu(x86_cpu_to_node_map, cpu);
 814 }
 815 
 816 void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 817 {
 818         struct cpumask *mask;
 819 
 820         if (node == NUMA_NO_NODE) {
 821                 /* early_cpu_to_node() already emits a warning and trace */
 822                 return;
 823         }
 824         mask = node_to_cpumask_map[node];
 825         if (!mask) {
 826                 pr_err("node_to_cpumask_map[%i] NULL\n", node);
 827                 dump_stack();
 828                 return;
 829         }
 830 
 831         if (enable)
 832                 cpumask_set_cpu(cpu, mask);
 833         else
 834                 cpumask_clear_cpu(cpu, mask);
 835 
 836         printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
 837                 enable ? "numa_add_cpu" : "numa_remove_cpu",
 838                 cpu, node, cpumask_pr_args(mask));
 839         return;
 840 }
 841 
 842 # ifndef CONFIG_NUMA_EMU
 843 static void numa_set_cpumask(int cpu, bool enable)
 844 {
 845         debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 846 }
 847 
 848 void numa_add_cpu(int cpu)
 849 {
 850         numa_set_cpumask(cpu, true);
 851 }
 852 
 853 void numa_remove_cpu(int cpu)
 854 {
 855         numa_set_cpumask(cpu, false);
 856 }
 857 # endif /* !CONFIG_NUMA_EMU */
 858 
 859 /*
 860  * Returns a pointer to the bitmask of CPUs on Node 'node'.
 861  */
 862 const struct cpumask *cpumask_of_node(int node)
 863 {
 864         if ((unsigned)node >= nr_node_ids) {
 865                 printk(KERN_WARNING
 866                         "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
 867                         node, nr_node_ids);
 868                 dump_stack();
 869                 return cpu_none_mask;
 870         }
 871         if (node_to_cpumask_map[node] == NULL) {
 872                 printk(KERN_WARNING
 873                         "cpumask_of_node(%d): no node_to_cpumask_map!\n",
 874                         node);
 875                 dump_stack();
 876                 return cpu_online_mask;
 877         }
 878         return node_to_cpumask_map[node];
 879 }
 880 EXPORT_SYMBOL(cpumask_of_node);
 881 
 882 #endif  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 883 
 884 #ifdef CONFIG_MEMORY_HOTPLUG
 885 int memory_add_physaddr_to_nid(u64 start)
 886 {
 887         struct numa_meminfo *mi = &numa_meminfo;
 888         int nid = mi->blk[0].nid;
 889         int i;
 890 
 891         for (i = 0; i < mi->nr_blks; i++)
 892                 if (mi->blk[i].start <= start && mi->blk[i].end > start)
 893                         nid = mi->blk[i].nid;
 894         return nid;
 895 }
 896 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 897 #endif

/* [<][>][^][v][top][bottom][index][help] */