root/arch/powerpc/mm/nohash/tlb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmu_get_tsize
  2. mmu_get_tsize
  3. local_flush_tlb_mm
  4. __local_flush_tlb_page
  5. local_flush_tlb_page
  6. do_flush_tlb_mm_ipi
  7. do_flush_tlb_page_ipi
  8. flush_tlb_mm
  9. __flush_tlb_page
  10. flush_tlb_page
  11. early_init_mmu_47x
  12. flush_tlb_kernel_range
  13. flush_tlb_range
  14. tlb_flush
  15. tlb_flush_pgtable
  16. setup_page_sizes
  17. setup_mmu_htw
  18. early_init_this_mmu
  19. early_init_mmu_global
  20. early_mmu_set_memory_limit
  21. early_init_mmu
  22. early_init_mmu_secondary
  23. setup_initial_memory_limit
  24. early_init_mmu

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * This file contains the routines for TLB flushing.
   4  * On machines where the MMU does not use a hash table to store virtual to
   5  * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
   6  * this does -not- include 603 however which shares the implementation with
   7  * hash based processors)
   8  *
   9  *  -- BenH
  10  *
  11  * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
  12  *                     IBM Corp.
  13  *
  14  *  Derived from arch/ppc/mm/init.c:
  15  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  16  *
  17  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  18  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  19  *    Copyright (C) 1996 Paul Mackerras
  20  *
  21  *  Derived from "arch/i386/mm/init.c"
  22  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  23  */
  24 
  25 #include <linux/kernel.h>
  26 #include <linux/export.h>
  27 #include <linux/mm.h>
  28 #include <linux/init.h>
  29 #include <linux/highmem.h>
  30 #include <linux/pagemap.h>
  31 #include <linux/preempt.h>
  32 #include <linux/spinlock.h>
  33 #include <linux/memblock.h>
  34 #include <linux/of_fdt.h>
  35 #include <linux/hugetlb.h>
  36 
  37 #include <asm/tlbflush.h>
  38 #include <asm/tlb.h>
  39 #include <asm/code-patching.h>
  40 #include <asm/cputhreads.h>
  41 #include <asm/hugetlb.h>
  42 #include <asm/paca.h>
  43 
  44 #include <mm/mmu_decl.h>
  45 
  46 /*
  47  * This struct lists the sw-supported page sizes.  The hardawre MMU may support
  48  * other sizes not listed here.   The .ind field is only used on MMUs that have
  49  * indirect page table entries.
  50  */
  51 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
  52 #ifdef CONFIG_PPC_FSL_BOOK3E
  53 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
  54         [MMU_PAGE_4K] = {
  55                 .shift  = 12,
  56                 .enc    = BOOK3E_PAGESZ_4K,
  57         },
  58         [MMU_PAGE_2M] = {
  59                 .shift  = 21,
  60                 .enc    = BOOK3E_PAGESZ_2M,
  61         },
  62         [MMU_PAGE_4M] = {
  63                 .shift  = 22,
  64                 .enc    = BOOK3E_PAGESZ_4M,
  65         },
  66         [MMU_PAGE_16M] = {
  67                 .shift  = 24,
  68                 .enc    = BOOK3E_PAGESZ_16M,
  69         },
  70         [MMU_PAGE_64M] = {
  71                 .shift  = 26,
  72                 .enc    = BOOK3E_PAGESZ_64M,
  73         },
  74         [MMU_PAGE_256M] = {
  75                 .shift  = 28,
  76                 .enc    = BOOK3E_PAGESZ_256M,
  77         },
  78         [MMU_PAGE_1G] = {
  79                 .shift  = 30,
  80                 .enc    = BOOK3E_PAGESZ_1GB,
  81         },
  82 };
  83 #elif defined(CONFIG_PPC_8xx)
  84 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
  85         /* we only manage 4k and 16k pages as normal pages */
  86 #ifdef CONFIG_PPC_4K_PAGES
  87         [MMU_PAGE_4K] = {
  88                 .shift  = 12,
  89         },
  90 #else
  91         [MMU_PAGE_16K] = {
  92                 .shift  = 14,
  93         },
  94 #endif
  95         [MMU_PAGE_512K] = {
  96                 .shift  = 19,
  97         },
  98         [MMU_PAGE_8M] = {
  99                 .shift  = 23,
 100         },
 101 };
 102 #else
 103 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 104         [MMU_PAGE_4K] = {
 105                 .shift  = 12,
 106                 .ind    = 20,
 107                 .enc    = BOOK3E_PAGESZ_4K,
 108         },
 109         [MMU_PAGE_16K] = {
 110                 .shift  = 14,
 111                 .enc    = BOOK3E_PAGESZ_16K,
 112         },
 113         [MMU_PAGE_64K] = {
 114                 .shift  = 16,
 115                 .ind    = 28,
 116                 .enc    = BOOK3E_PAGESZ_64K,
 117         },
 118         [MMU_PAGE_1M] = {
 119                 .shift  = 20,
 120                 .enc    = BOOK3E_PAGESZ_1M,
 121         },
 122         [MMU_PAGE_16M] = {
 123                 .shift  = 24,
 124                 .ind    = 36,
 125                 .enc    = BOOK3E_PAGESZ_16M,
 126         },
 127         [MMU_PAGE_256M] = {
 128                 .shift  = 28,
 129                 .enc    = BOOK3E_PAGESZ_256M,
 130         },
 131         [MMU_PAGE_1G] = {
 132                 .shift  = 30,
 133                 .enc    = BOOK3E_PAGESZ_1GB,
 134         },
 135 };
 136 #endif /* CONFIG_FSL_BOOKE */
 137 
 138 static inline int mmu_get_tsize(int psize)
 139 {
 140         return mmu_psize_defs[psize].enc;
 141 }
 142 #else
 143 static inline int mmu_get_tsize(int psize)
 144 {
 145         /* This isn't used on !Book3E for now */
 146         return 0;
 147 }
 148 #endif /* CONFIG_PPC_BOOK3E_MMU */
 149 
 150 /* The variables below are currently only used on 64-bit Book3E
 151  * though this will probably be made common with other nohash
 152  * implementations at some point
 153  */
 154 #ifdef CONFIG_PPC64
 155 
 156 int mmu_linear_psize;           /* Page size used for the linear mapping */
 157 int mmu_pte_psize;              /* Page size used for PTE pages */
 158 int mmu_vmemmap_psize;          /* Page size used for the virtual mem map */
 159 int book3e_htw_mode;            /* HW tablewalk?  Value is PPC_HTW_* */
 160 unsigned long linear_map_top;   /* Top of linear mapping */
 161 
 162 
 163 /*
 164  * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
 165  * exceptions.  This is used for bolted and e6500 TLB miss handlers which
 166  * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
 167  * this is set to zero.
 168  */
 169 int extlb_level_exc;
 170 
 171 #endif /* CONFIG_PPC64 */
 172 
 173 #ifdef CONFIG_PPC_FSL_BOOK3E
 174 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
 175 DEFINE_PER_CPU(int, next_tlbcam_idx);
 176 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
 177 #endif
 178 
 179 /*
 180  * Base TLB flushing operations:
 181  *
 182  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 183  *  - flush_tlb_page(vma, vmaddr) flushes one page
 184  *  - flush_tlb_range(vma, start, end) flushes a range of pages
 185  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 186  *
 187  *  - local_* variants of page and mm only apply to the current
 188  *    processor
 189  */
 190 
 191 /*
 192  * These are the base non-SMP variants of page and mm flushing
 193  */
 194 void local_flush_tlb_mm(struct mm_struct *mm)
 195 {
 196         unsigned int pid;
 197 
 198         preempt_disable();
 199         pid = mm->context.id;
 200         if (pid != MMU_NO_CONTEXT)
 201                 _tlbil_pid(pid);
 202         preempt_enable();
 203 }
 204 EXPORT_SYMBOL(local_flush_tlb_mm);
 205 
 206 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 207                             int tsize, int ind)
 208 {
 209         unsigned int pid;
 210 
 211         preempt_disable();
 212         pid = mm ? mm->context.id : 0;
 213         if (pid != MMU_NO_CONTEXT)
 214                 _tlbil_va(vmaddr, pid, tsize, ind);
 215         preempt_enable();
 216 }
 217 
 218 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 219 {
 220         __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
 221                                mmu_get_tsize(mmu_virtual_psize), 0);
 222 }
 223 EXPORT_SYMBOL(local_flush_tlb_page);
 224 
 225 /*
 226  * And here are the SMP non-local implementations
 227  */
 228 #ifdef CONFIG_SMP
 229 
 230 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
 231 
 232 struct tlb_flush_param {
 233         unsigned long addr;
 234         unsigned int pid;
 235         unsigned int tsize;
 236         unsigned int ind;
 237 };
 238 
 239 static void do_flush_tlb_mm_ipi(void *param)
 240 {
 241         struct tlb_flush_param *p = param;
 242 
 243         _tlbil_pid(p ? p->pid : 0);
 244 }
 245 
 246 static void do_flush_tlb_page_ipi(void *param)
 247 {
 248         struct tlb_flush_param *p = param;
 249 
 250         _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
 251 }
 252 
 253 
 254 /* Note on invalidations and PID:
 255  *
 256  * We snapshot the PID with preempt disabled. At this point, it can still
 257  * change either because:
 258  * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 259  * - we are invaliating some target that isn't currently running here
 260  *   and is concurrently acquiring a new PID on another CPU
 261  * - some other CPU is re-acquiring a lost PID for this mm
 262  * etc...
 263  *
 264  * However, this shouldn't be a problem as we only guarantee
 265  * invalidation of TLB entries present prior to this call, so we
 266  * don't care about the PID changing, and invalidating a stale PID
 267  * is generally harmless.
 268  */
 269 
 270 void flush_tlb_mm(struct mm_struct *mm)
 271 {
 272         unsigned int pid;
 273 
 274         preempt_disable();
 275         pid = mm->context.id;
 276         if (unlikely(pid == MMU_NO_CONTEXT))
 277                 goto no_context;
 278         if (!mm_is_core_local(mm)) {
 279                 struct tlb_flush_param p = { .pid = pid };
 280                 /* Ignores smp_processor_id() even if set. */
 281                 smp_call_function_many(mm_cpumask(mm),
 282                                        do_flush_tlb_mm_ipi, &p, 1);
 283         }
 284         _tlbil_pid(pid);
 285  no_context:
 286         preempt_enable();
 287 }
 288 EXPORT_SYMBOL(flush_tlb_mm);
 289 
 290 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 291                       int tsize, int ind)
 292 {
 293         struct cpumask *cpu_mask;
 294         unsigned int pid;
 295 
 296         /*
 297          * This function as well as __local_flush_tlb_page() must only be called
 298          * for user contexts.
 299          */
 300         if (WARN_ON(!mm))
 301                 return;
 302 
 303         preempt_disable();
 304         pid = mm->context.id;
 305         if (unlikely(pid == MMU_NO_CONTEXT))
 306                 goto bail;
 307         cpu_mask = mm_cpumask(mm);
 308         if (!mm_is_core_local(mm)) {
 309                 /* If broadcast tlbivax is supported, use it */
 310                 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
 311                         int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
 312                         if (lock)
 313                                 raw_spin_lock(&tlbivax_lock);
 314                         _tlbivax_bcast(vmaddr, pid, tsize, ind);
 315                         if (lock)
 316                                 raw_spin_unlock(&tlbivax_lock);
 317                         goto bail;
 318                 } else {
 319                         struct tlb_flush_param p = {
 320                                 .pid = pid,
 321                                 .addr = vmaddr,
 322                                 .tsize = tsize,
 323                                 .ind = ind,
 324                         };
 325                         /* Ignores smp_processor_id() even if set in cpu_mask */
 326                         smp_call_function_many(cpu_mask,
 327                                                do_flush_tlb_page_ipi, &p, 1);
 328                 }
 329         }
 330         _tlbil_va(vmaddr, pid, tsize, ind);
 331  bail:
 332         preempt_enable();
 333 }
 334 
 335 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 336 {
 337 #ifdef CONFIG_HUGETLB_PAGE
 338         if (vma && is_vm_hugetlb_page(vma))
 339                 flush_hugetlb_page(vma, vmaddr);
 340 #endif
 341 
 342         __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
 343                          mmu_get_tsize(mmu_virtual_psize), 0);
 344 }
 345 EXPORT_SYMBOL(flush_tlb_page);
 346 
 347 #endif /* CONFIG_SMP */
 348 
 349 #ifdef CONFIG_PPC_47x
 350 void __init early_init_mmu_47x(void)
 351 {
 352 #ifdef CONFIG_SMP
 353         unsigned long root = of_get_flat_dt_root();
 354         if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
 355                 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
 356 #endif /* CONFIG_SMP */
 357 }
 358 #endif /* CONFIG_PPC_47x */
 359 
 360 /*
 361  * Flush kernel TLB entries in the given range
 362  */
 363 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 364 {
 365 #ifdef CONFIG_SMP
 366         preempt_disable();
 367         smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
 368         _tlbil_pid(0);
 369         preempt_enable();
 370 #else
 371         _tlbil_pid(0);
 372 #endif
 373 }
 374 EXPORT_SYMBOL(flush_tlb_kernel_range);
 375 
 376 /*
 377  * Currently, for range flushing, we just do a full mm flush. This should
 378  * be optimized based on a threshold on the size of the range, since
 379  * some implementation can stack multiple tlbivax before a tlbsync but
 380  * for now, we keep it that way
 381  */
 382 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 383                      unsigned long end)
 384 
 385 {
 386         if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
 387                 flush_tlb_page(vma, start);
 388         else
 389                 flush_tlb_mm(vma->vm_mm);
 390 }
 391 EXPORT_SYMBOL(flush_tlb_range);
 392 
 393 void tlb_flush(struct mmu_gather *tlb)
 394 {
 395         flush_tlb_mm(tlb->mm);
 396 }
 397 
 398 /*
 399  * Below are functions specific to the 64-bit variant of Book3E though that
 400  * may change in the future
 401  */
 402 
 403 #ifdef CONFIG_PPC64
 404 
 405 /*
 406  * Handling of virtual linear page tables or indirect TLB entries
 407  * flushing when PTE pages are freed
 408  */
 409 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
 410 {
 411         int tsize = mmu_psize_defs[mmu_pte_psize].enc;
 412 
 413         if (book3e_htw_mode != PPC_HTW_NONE) {
 414                 unsigned long start = address & PMD_MASK;
 415                 unsigned long end = address + PMD_SIZE;
 416                 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
 417 
 418                 /* This isn't the most optimal, ideally we would factor out the
 419                  * while preempt & CPU mask mucking around, or even the IPI but
 420                  * it will do for now
 421                  */
 422                 while (start < end) {
 423                         __flush_tlb_page(tlb->mm, start, tsize, 1);
 424                         start += size;
 425                 }
 426         } else {
 427                 unsigned long rmask = 0xf000000000000000ul;
 428                 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
 429                 unsigned long vpte = address & ~rmask;
 430 
 431                 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
 432                 vpte |= rid;
 433                 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
 434         }
 435 }
 436 
 437 static void setup_page_sizes(void)
 438 {
 439         unsigned int tlb0cfg;
 440         unsigned int tlb0ps;
 441         unsigned int eptcfg;
 442         int i, psize;
 443 
 444 #ifdef CONFIG_PPC_FSL_BOOK3E
 445         unsigned int mmucfg = mfspr(SPRN_MMUCFG);
 446         int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
 447 
 448         if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
 449                 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
 450                 unsigned int min_pg, max_pg;
 451 
 452                 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
 453                 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
 454 
 455                 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 456                         struct mmu_psize_def *def;
 457                         unsigned int shift;
 458 
 459                         def = &mmu_psize_defs[psize];
 460                         shift = def->shift;
 461 
 462                         if (shift == 0 || shift & 1)
 463                                 continue;
 464 
 465                         /* adjust to be in terms of 4^shift Kb */
 466                         shift = (shift - 10) >> 1;
 467 
 468                         if ((shift >= min_pg) && (shift <= max_pg))
 469                                 def->flags |= MMU_PAGE_SIZE_DIRECT;
 470                 }
 471 
 472                 goto out;
 473         }
 474 
 475         if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
 476                 u32 tlb1cfg, tlb1ps;
 477 
 478                 tlb0cfg = mfspr(SPRN_TLB0CFG);
 479                 tlb1cfg = mfspr(SPRN_TLB1CFG);
 480                 tlb1ps = mfspr(SPRN_TLB1PS);
 481                 eptcfg = mfspr(SPRN_EPTCFG);
 482 
 483                 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
 484                         book3e_htw_mode = PPC_HTW_E6500;
 485 
 486                 /*
 487                  * We expect 4K subpage size and unrestricted indirect size.
 488                  * The lack of a restriction on indirect size is a Freescale
 489                  * extension, indicated by PSn = 0 but SPSn != 0.
 490                  */
 491                 if (eptcfg != 2)
 492                         book3e_htw_mode = PPC_HTW_NONE;
 493 
 494                 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 495                         struct mmu_psize_def *def = &mmu_psize_defs[psize];
 496 
 497                         if (!def->shift)
 498                                 continue;
 499 
 500                         if (tlb1ps & (1U << (def->shift - 10))) {
 501                                 def->flags |= MMU_PAGE_SIZE_DIRECT;
 502 
 503                                 if (book3e_htw_mode && psize == MMU_PAGE_2M)
 504                                         def->flags |= MMU_PAGE_SIZE_INDIRECT;
 505                         }
 506                 }
 507 
 508                 goto out;
 509         }
 510 #endif
 511 
 512         tlb0cfg = mfspr(SPRN_TLB0CFG);
 513         tlb0ps = mfspr(SPRN_TLB0PS);
 514         eptcfg = mfspr(SPRN_EPTCFG);
 515 
 516         /* Look for supported direct sizes */
 517         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 518                 struct mmu_psize_def *def = &mmu_psize_defs[psize];
 519 
 520                 if (tlb0ps & (1U << (def->shift - 10)))
 521                         def->flags |= MMU_PAGE_SIZE_DIRECT;
 522         }
 523 
 524         /* Indirect page sizes supported ? */
 525         if ((tlb0cfg & TLBnCFG_IND) == 0 ||
 526             (tlb0cfg & TLBnCFG_PT) == 0)
 527                 goto out;
 528 
 529         book3e_htw_mode = PPC_HTW_IBM;
 530 
 531         /* Now, we only deal with one IND page size for each
 532          * direct size. Hopefully all implementations today are
 533          * unambiguous, but we might want to be careful in the
 534          * future.
 535          */
 536         for (i = 0; i < 3; i++) {
 537                 unsigned int ps, sps;
 538 
 539                 sps = eptcfg & 0x1f;
 540                 eptcfg >>= 5;
 541                 ps = eptcfg & 0x1f;
 542                 eptcfg >>= 5;
 543                 if (!ps || !sps)
 544                         continue;
 545                 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
 546                         struct mmu_psize_def *def = &mmu_psize_defs[psize];
 547 
 548                         if (ps == (def->shift - 10))
 549                                 def->flags |= MMU_PAGE_SIZE_INDIRECT;
 550                         if (sps == (def->shift - 10))
 551                                 def->ind = ps + 10;
 552                 }
 553         }
 554 
 555 out:
 556         /* Cleanup array and print summary */
 557         pr_info("MMU: Supported page sizes\n");
 558         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 559                 struct mmu_psize_def *def = &mmu_psize_defs[psize];
 560                 const char *__page_type_names[] = {
 561                         "unsupported",
 562                         "direct",
 563                         "indirect",
 564                         "direct & indirect"
 565                 };
 566                 if (def->flags == 0) {
 567                         def->shift = 0; 
 568                         continue;
 569                 }
 570                 pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
 571                         __page_type_names[def->flags & 0x3]);
 572         }
 573 }
 574 
 575 static void setup_mmu_htw(void)
 576 {
 577         /*
 578          * If we want to use HW tablewalk, enable it by patching the TLB miss
 579          * handlers to branch to the one dedicated to it.
 580          */
 581 
 582         switch (book3e_htw_mode) {
 583         case PPC_HTW_IBM:
 584                 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
 585                 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
 586                 break;
 587 #ifdef CONFIG_PPC_FSL_BOOK3E
 588         case PPC_HTW_E6500:
 589                 extlb_level_exc = EX_TLB_SIZE;
 590                 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
 591                 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
 592                 break;
 593 #endif
 594         }
 595         pr_info("MMU: Book3E HW tablewalk %s\n",
 596                 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
 597 }
 598 
 599 /*
 600  * Early initialization of the MMU TLB code
 601  */
 602 static void early_init_this_mmu(void)
 603 {
 604         unsigned int mas4;
 605 
 606         /* Set MAS4 based on page table setting */
 607 
 608         mas4 = 0x4 << MAS4_WIMGED_SHIFT;
 609         switch (book3e_htw_mode) {
 610         case PPC_HTW_E6500:
 611                 mas4 |= MAS4_INDD;
 612                 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
 613                 mas4 |= MAS4_TLBSELD(1);
 614                 mmu_pte_psize = MMU_PAGE_2M;
 615                 break;
 616 
 617         case PPC_HTW_IBM:
 618                 mas4 |= MAS4_INDD;
 619                 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
 620                 mmu_pte_psize = MMU_PAGE_1M;
 621                 break;
 622 
 623         case PPC_HTW_NONE:
 624                 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
 625                 mmu_pte_psize = mmu_virtual_psize;
 626                 break;
 627         }
 628         mtspr(SPRN_MAS4, mas4);
 629 
 630 #ifdef CONFIG_PPC_FSL_BOOK3E
 631         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 632                 unsigned int num_cams;
 633                 bool map = true;
 634 
 635                 /* use a quarter of the TLBCAM for bolted linear map */
 636                 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
 637 
 638                 /*
 639                  * Only do the mapping once per core, or else the
 640                  * transient mapping would cause problems.
 641                  */
 642 #ifdef CONFIG_SMP
 643                 if (hweight32(get_tensr()) > 1)
 644                         map = false;
 645 #endif
 646 
 647                 if (map)
 648                         linear_map_top = map_mem_in_cams(linear_map_top,
 649                                                          num_cams, false);
 650         }
 651 #endif
 652 
 653         /* A sync won't hurt us after mucking around with
 654          * the MMU configuration
 655          */
 656         mb();
 657 }
 658 
 659 static void __init early_init_mmu_global(void)
 660 {
 661         /* XXX This will have to be decided at runtime, but right
 662          * now our boot and TLB miss code hard wires it. Ideally
 663          * we should find out a suitable page size and patch the
 664          * TLB miss code (either that or use the PACA to store
 665          * the value we want)
 666          */
 667         mmu_linear_psize = MMU_PAGE_1G;
 668 
 669         /* XXX This should be decided at runtime based on supported
 670          * page sizes in the TLB, but for now let's assume 16M is
 671          * always there and a good fit (which it probably is)
 672          *
 673          * Freescale booke only supports 4K pages in TLB0, so use that.
 674          */
 675         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
 676                 mmu_vmemmap_psize = MMU_PAGE_4K;
 677         else
 678                 mmu_vmemmap_psize = MMU_PAGE_16M;
 679 
 680         /* XXX This code only checks for TLB 0 capabilities and doesn't
 681          *     check what page size combos are supported by the HW. It
 682          *     also doesn't handle the case where a separate array holds
 683          *     the IND entries from the array loaded by the PT.
 684          */
 685         /* Look for supported page sizes */
 686         setup_page_sizes();
 687 
 688         /* Look for HW tablewalk support */
 689         setup_mmu_htw();
 690 
 691 #ifdef CONFIG_PPC_FSL_BOOK3E
 692         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 693                 if (book3e_htw_mode == PPC_HTW_NONE) {
 694                         extlb_level_exc = EX_TLB_SIZE;
 695                         patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
 696                         patch_exception(0x1e0,
 697                                 exc_instruction_tlb_miss_bolted_book3e);
 698                 }
 699         }
 700 #endif
 701 
 702         /* Set the global containing the top of the linear mapping
 703          * for use by the TLB miss code
 704          */
 705         linear_map_top = memblock_end_of_DRAM();
 706 
 707         ioremap_bot = IOREMAP_BASE;
 708 }
 709 
 710 static void __init early_mmu_set_memory_limit(void)
 711 {
 712 #ifdef CONFIG_PPC_FSL_BOOK3E
 713         if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 714                 /*
 715                  * Limit memory so we dont have linear faults.
 716                  * Unlike memblock_set_current_limit, which limits
 717                  * memory available during early boot, this permanently
 718                  * reduces the memory available to Linux.  We need to
 719                  * do this because highmem is not supported on 64-bit.
 720                  */
 721                 memblock_enforce_memory_limit(linear_map_top);
 722         }
 723 #endif
 724 
 725         memblock_set_current_limit(linear_map_top);
 726 }
 727 
 728 /* boot cpu only */
 729 void __init early_init_mmu(void)
 730 {
 731         early_init_mmu_global();
 732         early_init_this_mmu();
 733         early_mmu_set_memory_limit();
 734 }
 735 
 736 void early_init_mmu_secondary(void)
 737 {
 738         early_init_this_mmu();
 739 }
 740 
 741 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 742                                 phys_addr_t first_memblock_size)
 743 {
 744         /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
 745          * the bolted TLB entry. We know for now that only 1G
 746          * entries are supported though that may eventually
 747          * change.
 748          *
 749          * on FSL Embedded 64-bit, usually all RAM is bolted, but with
 750          * unusual memory sizes it's possible for some RAM to not be mapped
 751          * (such RAM is not used at all by Linux, since we don't support
 752          * highmem on 64-bit).  We limit ppc64_rma_size to what would be
 753          * mappable if this memblock is the only one.  Additional memblocks
 754          * can only increase, not decrease, the amount that ends up getting
 755          * mapped.  We still limit max to 1G even if we'll eventually map
 756          * more.  This is due to what the early init code is set up to do.
 757          *
 758          * We crop it to the size of the first MEMBLOCK to
 759          * avoid going over total available memory just in case...
 760          */
 761 #ifdef CONFIG_PPC_FSL_BOOK3E
 762         if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
 763                 unsigned long linear_sz;
 764                 unsigned int num_cams;
 765 
 766                 /* use a quarter of the TLBCAM for bolted linear map */
 767                 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
 768 
 769                 linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
 770                                             true);
 771 
 772                 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
 773         } else
 774 #endif
 775                 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
 776 
 777         /* Finally limit subsequent allocations */
 778         memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
 779 }
 780 #else /* ! CONFIG_PPC64 */
 781 void __init early_init_mmu(void)
 782 {
 783 #ifdef CONFIG_PPC_47x
 784         early_init_mmu_47x();
 785 #endif
 786 
 787 #ifdef CONFIG_PPC_MM_SLICES
 788         mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
 789 #endif
 790 }
 791 #endif /* CONFIG_PPC64 */

/* [<][>][^][v][top][bottom][index][help] */