root/arch/powerpc/mm/init_64.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmemmap_section_start
  2. vmemmap_populated
  3. vmemmap_list_alloc
  4. vmemmap_list_populate
  5. altmap_cross_boundary
  6. vmemmap_populate
  7. vmemmap_list_free
  8. vmemmap_free
  9. register_page_bootmem_memmap
  10. parse_disable_radix
  11. early_check_vec5
  12. mmu_early_init_devtree

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  *  PowerPC version
   4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5  *
   6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   8  *    Copyright (C) 1996 Paul Mackerras
   9  *
  10  *  Derived from "arch/i386/mm/init.c"
  11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  12  *
  13  *  Dave Engebretsen <engebret@us.ibm.com>
  14  *      Rework for PPC64 port.
  15  */
  16 
  17 #undef DEBUG
  18 
  19 #include <linux/signal.h>
  20 #include <linux/sched.h>
  21 #include <linux/kernel.h>
  22 #include <linux/errno.h>
  23 #include <linux/string.h>
  24 #include <linux/types.h>
  25 #include <linux/mman.h>
  26 #include <linux/mm.h>
  27 #include <linux/swap.h>
  28 #include <linux/stddef.h>
  29 #include <linux/vmalloc.h>
  30 #include <linux/init.h>
  31 #include <linux/delay.h>
  32 #include <linux/highmem.h>
  33 #include <linux/idr.h>
  34 #include <linux/nodemask.h>
  35 #include <linux/module.h>
  36 #include <linux/poison.h>
  37 #include <linux/memblock.h>
  38 #include <linux/hugetlb.h>
  39 #include <linux/slab.h>
  40 #include <linux/of_fdt.h>
  41 #include <linux/libfdt.h>
  42 #include <linux/memremap.h>
  43 
  44 #include <asm/pgalloc.h>
  45 #include <asm/page.h>
  46 #include <asm/prom.h>
  47 #include <asm/rtas.h>
  48 #include <asm/io.h>
  49 #include <asm/mmu_context.h>
  50 #include <asm/pgtable.h>
  51 #include <asm/mmu.h>
  52 #include <linux/uaccess.h>
  53 #include <asm/smp.h>
  54 #include <asm/machdep.h>
  55 #include <asm/tlb.h>
  56 #include <asm/eeh.h>
  57 #include <asm/processor.h>
  58 #include <asm/mmzone.h>
  59 #include <asm/cputable.h>
  60 #include <asm/sections.h>
  61 #include <asm/iommu.h>
  62 #include <asm/vdso.h>
  63 
  64 #include <mm/mmu_decl.h>
  65 
  66 phys_addr_t memstart_addr = ~0;
  67 EXPORT_SYMBOL_GPL(memstart_addr);
  68 phys_addr_t kernstart_addr;
  69 EXPORT_SYMBOL_GPL(kernstart_addr);
  70 
  71 #ifdef CONFIG_SPARSEMEM_VMEMMAP
  72 /*
  73  * Given an address within the vmemmap, determine the pfn of the page that
  74  * represents the start of the section it is within.  Note that we have to
  75  * do this by hand as the proffered address may not be correctly aligned.
  76  * Subtraction of non-aligned pointers produces undefined results.
  77  */
  78 static unsigned long __meminit vmemmap_section_start(unsigned long page)
  79 {
  80         unsigned long offset = page - ((unsigned long)(vmemmap));
  81 
  82         /* Return the pfn of the start of the section. */
  83         return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
  84 }
  85 
  86 /*
  87  * Check if this vmemmap page is already initialised.  If any section
  88  * which overlaps this vmemmap page is initialised then this page is
  89  * initialised already.
  90  */
  91 static int __meminit vmemmap_populated(unsigned long start, int page_size)
  92 {
  93         unsigned long end = start + page_size;
  94         start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
  95 
  96         for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
  97                 if (pfn_valid(page_to_pfn((struct page *)start)))
  98                         return 1;
  99 
 100         return 0;
 101 }
 102 
 103 /*
 104  * vmemmap virtual address space management does not have a traditonal page
 105  * table to track which virtual struct pages are backed by physical mapping.
 106  * The virtual to physical mappings are tracked in a simple linked list
 107  * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
 108  * all times where as the 'next' list maintains the available
 109  * vmemmap_backing structures which have been deleted from the
 110  * 'vmemmap_global' list during system runtime (memory hotplug remove
 111  * operation). The freed 'vmemmap_backing' structures are reused later when
 112  * new requests come in without allocating fresh memory. This pointer also
 113  * tracks the allocated 'vmemmap_backing' structures as we allocate one
 114  * full page memory at a time when we dont have any.
 115  */
 116 struct vmemmap_backing *vmemmap_list;
 117 static struct vmemmap_backing *next;
 118 
 119 /*
 120  * The same pointer 'next' tracks individual chunks inside the allocated
 121  * full page during the boot time and again tracks the freeed nodes during
 122  * runtime. It is racy but it does not happen as they are separated by the
 123  * boot process. Will create problem if some how we have memory hotplug
 124  * operation during boot !!
 125  */
 126 static int num_left;
 127 static int num_freed;
 128 
 129 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
 130 {
 131         struct vmemmap_backing *vmem_back;
 132         /* get from freed entries first */
 133         if (num_freed) {
 134                 num_freed--;
 135                 vmem_back = next;
 136                 next = next->list;
 137 
 138                 return vmem_back;
 139         }
 140 
 141         /* allocate a page when required and hand out chunks */
 142         if (!num_left) {
 143                 next = vmemmap_alloc_block(PAGE_SIZE, node);
 144                 if (unlikely(!next)) {
 145                         WARN_ON(1);
 146                         return NULL;
 147                 }
 148                 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
 149         }
 150 
 151         num_left--;
 152 
 153         return next++;
 154 }
 155 
 156 static __meminit void vmemmap_list_populate(unsigned long phys,
 157                                             unsigned long start,
 158                                             int node)
 159 {
 160         struct vmemmap_backing *vmem_back;
 161 
 162         vmem_back = vmemmap_list_alloc(node);
 163         if (unlikely(!vmem_back)) {
 164                 WARN_ON(1);
 165                 return;
 166         }
 167 
 168         vmem_back->phys = phys;
 169         vmem_back->virt_addr = start;
 170         vmem_back->list = vmemmap_list;
 171 
 172         vmemmap_list = vmem_back;
 173 }
 174 
 175 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
 176                                 unsigned long page_size)
 177 {
 178         unsigned long nr_pfn = page_size / sizeof(struct page);
 179         unsigned long start_pfn = page_to_pfn((struct page *)start);
 180 
 181         if ((start_pfn + nr_pfn) > altmap->end_pfn)
 182                 return true;
 183 
 184         if (start_pfn < altmap->base_pfn)
 185                 return true;
 186 
 187         return false;
 188 }
 189 
 190 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 191                 struct vmem_altmap *altmap)
 192 {
 193         unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 194 
 195         /* Align to the page size of the linear mapping. */
 196         start = _ALIGN_DOWN(start, page_size);
 197 
 198         pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 199 
 200         for (; start < end; start += page_size) {
 201                 void *p = NULL;
 202                 int rc;
 203 
 204                 if (vmemmap_populated(start, page_size))
 205                         continue;
 206 
 207                 /*
 208                  * Allocate from the altmap first if we have one. This may
 209                  * fail due to alignment issues when using 16MB hugepages, so
 210                  * fall back to system memory if the altmap allocation fail.
 211                  */
 212                 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
 213                         p = altmap_alloc_block_buf(page_size, altmap);
 214                         if (!p)
 215                                 pr_debug("altmap block allocation failed, falling back to system memory");
 216                 }
 217                 if (!p)
 218                         p = vmemmap_alloc_block_buf(page_size, node);
 219                 if (!p)
 220                         return -ENOMEM;
 221 
 222                 vmemmap_list_populate(__pa(p), start, node);
 223 
 224                 pr_debug("      * %016lx..%016lx allocated at %p\n",
 225                          start, start + page_size, p);
 226 
 227                 rc = vmemmap_create_mapping(start, page_size, __pa(p));
 228                 if (rc < 0) {
 229                         pr_warn("%s: Unable to create vmemmap mapping: %d\n",
 230                                 __func__, rc);
 231                         return -EFAULT;
 232                 }
 233         }
 234 
 235         return 0;
 236 }
 237 
 238 #ifdef CONFIG_MEMORY_HOTPLUG
 239 static unsigned long vmemmap_list_free(unsigned long start)
 240 {
 241         struct vmemmap_backing *vmem_back, *vmem_back_prev;
 242 
 243         vmem_back_prev = vmem_back = vmemmap_list;
 244 
 245         /* look for it with prev pointer recorded */
 246         for (; vmem_back; vmem_back = vmem_back->list) {
 247                 if (vmem_back->virt_addr == start)
 248                         break;
 249                 vmem_back_prev = vmem_back;
 250         }
 251 
 252         if (unlikely(!vmem_back)) {
 253                 WARN_ON(1);
 254                 return 0;
 255         }
 256 
 257         /* remove it from vmemmap_list */
 258         if (vmem_back == vmemmap_list) /* remove head */
 259                 vmemmap_list = vmem_back->list;
 260         else
 261                 vmem_back_prev->list = vmem_back->list;
 262 
 263         /* next point to this freed entry */
 264         vmem_back->list = next;
 265         next = vmem_back;
 266         num_freed++;
 267 
 268         return vmem_back->phys;
 269 }
 270 
 271 void __ref vmemmap_free(unsigned long start, unsigned long end,
 272                 struct vmem_altmap *altmap)
 273 {
 274         unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 275         unsigned long page_order = get_order(page_size);
 276         unsigned long alt_start = ~0, alt_end = ~0;
 277         unsigned long base_pfn;
 278 
 279         start = _ALIGN_DOWN(start, page_size);
 280         if (altmap) {
 281                 alt_start = altmap->base_pfn;
 282                 alt_end = altmap->base_pfn + altmap->reserve +
 283                           altmap->free + altmap->alloc + altmap->align;
 284         }
 285 
 286         pr_debug("vmemmap_free %lx...%lx\n", start, end);
 287 
 288         for (; start < end; start += page_size) {
 289                 unsigned long nr_pages, addr;
 290                 struct page *page;
 291 
 292                 /*
 293                  * the section has already be marked as invalid, so
 294                  * vmemmap_populated() true means some other sections still
 295                  * in this page, so skip it.
 296                  */
 297                 if (vmemmap_populated(start, page_size))
 298                         continue;
 299 
 300                 addr = vmemmap_list_free(start);
 301                 if (!addr)
 302                         continue;
 303 
 304                 page = pfn_to_page(addr >> PAGE_SHIFT);
 305                 nr_pages = 1 << page_order;
 306                 base_pfn = PHYS_PFN(addr);
 307 
 308                 if (base_pfn >= alt_start && base_pfn < alt_end) {
 309                         vmem_altmap_free(altmap, nr_pages);
 310                 } else if (PageReserved(page)) {
 311                         /* allocated from bootmem */
 312                         if (page_size < PAGE_SIZE) {
 313                                 /*
 314                                  * this shouldn't happen, but if it is
 315                                  * the case, leave the memory there
 316                                  */
 317                                 WARN_ON_ONCE(1);
 318                         } else {
 319                                 while (nr_pages--)
 320                                         free_reserved_page(page++);
 321                         }
 322                 } else {
 323                         free_pages((unsigned long)(__va(addr)), page_order);
 324                 }
 325 
 326                 vmemmap_remove_mapping(start, page_size);
 327         }
 328 }
 329 #endif
 330 void register_page_bootmem_memmap(unsigned long section_nr,
 331                                   struct page *start_page, unsigned long size)
 332 {
 333 }
 334 
 335 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 336 
 337 #ifdef CONFIG_PPC_BOOK3S_64
 338 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
 339 
 340 static int __init parse_disable_radix(char *p)
 341 {
 342         bool val;
 343 
 344         if (!p)
 345                 val = true;
 346         else if (kstrtobool(p, &val))
 347                 return -EINVAL;
 348 
 349         disable_radix = val;
 350 
 351         return 0;
 352 }
 353 early_param("disable_radix", parse_disable_radix);
 354 
 355 /*
 356  * If we're running under a hypervisor, we need to check the contents of
 357  * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
 358  * radix.  If not, we clear the radix feature bit so we fall back to hash.
 359  */
 360 static void __init early_check_vec5(void)
 361 {
 362         unsigned long root, chosen;
 363         int size;
 364         const u8 *vec5;
 365         u8 mmu_supported;
 366 
 367         root = of_get_flat_dt_root();
 368         chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
 369         if (chosen == -FDT_ERR_NOTFOUND) {
 370                 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 371                 return;
 372         }
 373         vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
 374         if (!vec5) {
 375                 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 376                 return;
 377         }
 378         if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
 379                 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 380                 return;
 381         }
 382 
 383         /* Check for supported configuration */
 384         mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
 385                         OV5_FEAT(OV5_MMU_SUPPORT);
 386         if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
 387                 /* Hypervisor only supports radix - check enabled && GTSE */
 388                 if (!early_radix_enabled()) {
 389                         pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
 390                 }
 391                 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
 392                                                 OV5_FEAT(OV5_RADIX_GTSE))) {
 393                         pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
 394                 }
 395                 /* Do radix anyway - the hypervisor said we had to */
 396                 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
 397         } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
 398                 /* Hypervisor only supports hash - disable radix */
 399                 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 400         }
 401 }
 402 
 403 void __init mmu_early_init_devtree(void)
 404 {
 405         /* Disable radix mode based on kernel command line. */
 406         if (disable_radix)
 407                 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 408 
 409         /*
 410          * Check /chosen/ibm,architecture-vec-5 if running as a guest.
 411          * When running bare-metal, we can use radix if we like
 412          * even though the ibm,architecture-vec-5 property created by
 413          * skiboot doesn't have the necessary bits set.
 414          */
 415         if (!(mfmsr() & MSR_HV))
 416                 early_check_vec5();
 417 
 418         if (early_radix_enabled())
 419                 radix__early_init_devtree();
 420         else
 421                 hash__early_init_devtree();
 422 }
 423 #endif /* CONFIG_PPC_BOOK3S_64 */

/* [<][>][^][v][top][bottom][index][help] */