root/arch/mips/mm/tlb-r4k.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. flush_micro_tlb
  2. flush_micro_tlb_vm
  3. local_flush_tlb_all
  4. local_flush_tlb_range
  5. local_flush_tlb_kernel_range
  6. local_flush_tlb_page
  7. local_flush_tlb_one
  8. __update_tlb
  9. add_wired_entry
  10. has_transparent_hugepage
  11. add_temporary_entry
  12. set_ntlb
  13. r4k_tlb_configure
  14. tlb_init
  15. r4k_tlb_pm_notifier
  16. r4k_tlb_init_pm

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
   8  * Carsten Langgaard, carstenl@mips.com
   9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  10  */
  11 #include <linux/cpu_pm.h>
  12 #include <linux/init.h>
  13 #include <linux/sched.h>
  14 #include <linux/smp.h>
  15 #include <linux/mm.h>
  16 #include <linux/hugetlb.h>
  17 #include <linux/export.h>
  18 
  19 #include <asm/cpu.h>
  20 #include <asm/cpu-type.h>
  21 #include <asm/bootinfo.h>
  22 #include <asm/hazards.h>
  23 #include <asm/mmu_context.h>
  24 #include <asm/pgtable.h>
  25 #include <asm/tlb.h>
  26 #include <asm/tlbmisc.h>
  27 
  28 extern void build_tlb_refill_handler(void);
  29 
  30 /*
  31  * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
  32  * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
  33  * itlb/dtlb are not totally transparent to software.
  34  */
  35 static inline void flush_micro_tlb(void)
  36 {
  37         switch (current_cpu_type()) {
  38         case CPU_LOONGSON2:
  39                 write_c0_diag(LOONGSON_DIAG_ITLB);
  40                 break;
  41         case CPU_LOONGSON3:
  42                 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
  43                 break;
  44         default:
  45                 break;
  46         }
  47 }
  48 
  49 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
  50 {
  51         if (vma->vm_flags & VM_EXEC)
  52                 flush_micro_tlb();
  53 }
  54 
  55 void local_flush_tlb_all(void)
  56 {
  57         unsigned long flags;
  58         unsigned long old_ctx;
  59         int entry, ftlbhighset;
  60 
  61         local_irq_save(flags);
  62         /* Save old context and create impossible VPN2 value */
  63         old_ctx = read_c0_entryhi();
  64         htw_stop();
  65         write_c0_entrylo0(0);
  66         write_c0_entrylo1(0);
  67 
  68         entry = num_wired_entries();
  69 
  70         /*
  71          * Blast 'em all away.
  72          * If there are any wired entries, fall back to iterating
  73          */
  74         if (cpu_has_tlbinv && !entry) {
  75                 if (current_cpu_data.tlbsizevtlb) {
  76                         write_c0_index(0);
  77                         mtc0_tlbw_hazard();
  78                         tlbinvf();  /* invalidate VTLB */
  79                 }
  80                 ftlbhighset = current_cpu_data.tlbsizevtlb +
  81                         current_cpu_data.tlbsizeftlbsets;
  82                 for (entry = current_cpu_data.tlbsizevtlb;
  83                      entry < ftlbhighset;
  84                      entry++) {
  85                         write_c0_index(entry);
  86                         mtc0_tlbw_hazard();
  87                         tlbinvf();  /* invalidate one FTLB set */
  88                 }
  89         } else {
  90                 while (entry < current_cpu_data.tlbsize) {
  91                         /* Make sure all entries differ. */
  92                         write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  93                         write_c0_index(entry);
  94                         mtc0_tlbw_hazard();
  95                         tlb_write_indexed();
  96                         entry++;
  97                 }
  98         }
  99         tlbw_use_hazard();
 100         write_c0_entryhi(old_ctx);
 101         htw_start();
 102         flush_micro_tlb();
 103         local_irq_restore(flags);
 104 }
 105 EXPORT_SYMBOL(local_flush_tlb_all);
 106 
 107 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 108         unsigned long end)
 109 {
 110         struct mm_struct *mm = vma->vm_mm;
 111         int cpu = smp_processor_id();
 112 
 113         if (cpu_context(cpu, mm) != 0) {
 114                 unsigned long size, flags;
 115 
 116                 local_irq_save(flags);
 117                 start = round_down(start, PAGE_SIZE << 1);
 118                 end = round_up(end, PAGE_SIZE << 1);
 119                 size = (end - start) >> (PAGE_SHIFT + 1);
 120                 if (size <= (current_cpu_data.tlbsizeftlbsets ?
 121                              current_cpu_data.tlbsize / 8 :
 122                              current_cpu_data.tlbsize / 2)) {
 123                         unsigned long old_entryhi, uninitialized_var(old_mmid);
 124                         int newpid = cpu_asid(cpu, mm);
 125 
 126                         old_entryhi = read_c0_entryhi();
 127                         if (cpu_has_mmid) {
 128                                 old_mmid = read_c0_memorymapid();
 129                                 write_c0_memorymapid(newpid);
 130                         }
 131 
 132                         htw_stop();
 133                         while (start < end) {
 134                                 int idx;
 135 
 136                                 if (cpu_has_mmid)
 137                                         write_c0_entryhi(start);
 138                                 else
 139                                         write_c0_entryhi(start | newpid);
 140                                 start += (PAGE_SIZE << 1);
 141                                 mtc0_tlbw_hazard();
 142                                 tlb_probe();
 143                                 tlb_probe_hazard();
 144                                 idx = read_c0_index();
 145                                 write_c0_entrylo0(0);
 146                                 write_c0_entrylo1(0);
 147                                 if (idx < 0)
 148                                         continue;
 149                                 /* Make sure all entries differ. */
 150                                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 151                                 mtc0_tlbw_hazard();
 152                                 tlb_write_indexed();
 153                         }
 154                         tlbw_use_hazard();
 155                         write_c0_entryhi(old_entryhi);
 156                         if (cpu_has_mmid)
 157                                 write_c0_memorymapid(old_mmid);
 158                         htw_start();
 159                 } else {
 160                         drop_mmu_context(mm);
 161                 }
 162                 flush_micro_tlb();
 163                 local_irq_restore(flags);
 164         }
 165 }
 166 
 167 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 168 {
 169         unsigned long size, flags;
 170 
 171         local_irq_save(flags);
 172         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 173         size = (size + 1) >> 1;
 174         if (size <= (current_cpu_data.tlbsizeftlbsets ?
 175                      current_cpu_data.tlbsize / 8 :
 176                      current_cpu_data.tlbsize / 2)) {
 177                 int pid = read_c0_entryhi();
 178 
 179                 start &= (PAGE_MASK << 1);
 180                 end += ((PAGE_SIZE << 1) - 1);
 181                 end &= (PAGE_MASK << 1);
 182                 htw_stop();
 183 
 184                 while (start < end) {
 185                         int idx;
 186 
 187                         write_c0_entryhi(start);
 188                         start += (PAGE_SIZE << 1);
 189                         mtc0_tlbw_hazard();
 190                         tlb_probe();
 191                         tlb_probe_hazard();
 192                         idx = read_c0_index();
 193                         write_c0_entrylo0(0);
 194                         write_c0_entrylo1(0);
 195                         if (idx < 0)
 196                                 continue;
 197                         /* Make sure all entries differ. */
 198                         write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 199                         mtc0_tlbw_hazard();
 200                         tlb_write_indexed();
 201                 }
 202                 tlbw_use_hazard();
 203                 write_c0_entryhi(pid);
 204                 htw_start();
 205         } else {
 206                 local_flush_tlb_all();
 207         }
 208         flush_micro_tlb();
 209         local_irq_restore(flags);
 210 }
 211 
 212 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 213 {
 214         int cpu = smp_processor_id();
 215 
 216         if (cpu_context(cpu, vma->vm_mm) != 0) {
 217                 unsigned long uninitialized_var(old_mmid);
 218                 unsigned long flags, old_entryhi;
 219                 int idx;
 220 
 221                 page &= (PAGE_MASK << 1);
 222                 local_irq_save(flags);
 223                 old_entryhi = read_c0_entryhi();
 224                 htw_stop();
 225                 if (cpu_has_mmid) {
 226                         old_mmid = read_c0_memorymapid();
 227                         write_c0_entryhi(page);
 228                         write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
 229                 } else {
 230                         write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
 231                 }
 232                 mtc0_tlbw_hazard();
 233                 tlb_probe();
 234                 tlb_probe_hazard();
 235                 idx = read_c0_index();
 236                 write_c0_entrylo0(0);
 237                 write_c0_entrylo1(0);
 238                 if (idx < 0)
 239                         goto finish;
 240                 /* Make sure all entries differ. */
 241                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 242                 mtc0_tlbw_hazard();
 243                 tlb_write_indexed();
 244                 tlbw_use_hazard();
 245 
 246         finish:
 247                 write_c0_entryhi(old_entryhi);
 248                 if (cpu_has_mmid)
 249                         write_c0_memorymapid(old_mmid);
 250                 htw_start();
 251                 flush_micro_tlb_vm(vma);
 252                 local_irq_restore(flags);
 253         }
 254 }
 255 
 256 /*
 257  * This one is only used for pages with the global bit set so we don't care
 258  * much about the ASID.
 259  */
 260 void local_flush_tlb_one(unsigned long page)
 261 {
 262         unsigned long flags;
 263         int oldpid, idx;
 264 
 265         local_irq_save(flags);
 266         oldpid = read_c0_entryhi();
 267         htw_stop();
 268         page &= (PAGE_MASK << 1);
 269         write_c0_entryhi(page);
 270         mtc0_tlbw_hazard();
 271         tlb_probe();
 272         tlb_probe_hazard();
 273         idx = read_c0_index();
 274         write_c0_entrylo0(0);
 275         write_c0_entrylo1(0);
 276         if (idx >= 0) {
 277                 /* Make sure all entries differ. */
 278                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 279                 mtc0_tlbw_hazard();
 280                 tlb_write_indexed();
 281                 tlbw_use_hazard();
 282         }
 283         write_c0_entryhi(oldpid);
 284         htw_start();
 285         flush_micro_tlb();
 286         local_irq_restore(flags);
 287 }
 288 
 289 /*
 290  * We will need multiple versions of update_mmu_cache(), one that just
 291  * updates the TLB with the new pte(s), and another which also checks
 292  * for the R4k "end of page" hardware bug and does the needy.
 293  */
 294 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 295 {
 296         unsigned long flags;
 297         pgd_t *pgdp;
 298         pud_t *pudp;
 299         pmd_t *pmdp;
 300         pte_t *ptep;
 301         int idx, pid;
 302 
 303         /*
 304          * Handle debugger faulting in for debugee.
 305          */
 306         if (current->active_mm != vma->vm_mm)
 307                 return;
 308 
 309         local_irq_save(flags);
 310 
 311         htw_stop();
 312         address &= (PAGE_MASK << 1);
 313         if (cpu_has_mmid) {
 314                 write_c0_entryhi(address);
 315         } else {
 316                 pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
 317                 write_c0_entryhi(address | pid);
 318         }
 319         pgdp = pgd_offset(vma->vm_mm, address);
 320         mtc0_tlbw_hazard();
 321         tlb_probe();
 322         tlb_probe_hazard();
 323         pudp = pud_offset(pgdp, address);
 324         pmdp = pmd_offset(pudp, address);
 325         idx = read_c0_index();
 326 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 327         /* this could be a huge page  */
 328         if (pmd_huge(*pmdp)) {
 329                 unsigned long lo;
 330                 write_c0_pagemask(PM_HUGE_MASK);
 331                 ptep = (pte_t *)pmdp;
 332                 lo = pte_to_entrylo(pte_val(*ptep));
 333                 write_c0_entrylo0(lo);
 334                 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 335 
 336                 mtc0_tlbw_hazard();
 337                 if (idx < 0)
 338                         tlb_write_random();
 339                 else
 340                         tlb_write_indexed();
 341                 tlbw_use_hazard();
 342                 write_c0_pagemask(PM_DEFAULT_MASK);
 343         } else
 344 #endif
 345         {
 346                 ptep = pte_offset_map(pmdp, address);
 347 
 348 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 349 #ifdef CONFIG_XPA
 350                 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
 351                 if (cpu_has_xpa)
 352                         writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
 353                 ptep++;
 354                 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
 355                 if (cpu_has_xpa)
 356                         writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
 357 #else
 358                 write_c0_entrylo0(ptep->pte_high);
 359                 ptep++;
 360                 write_c0_entrylo1(ptep->pte_high);
 361 #endif
 362 #else
 363                 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
 364                 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
 365 #endif
 366                 mtc0_tlbw_hazard();
 367                 if (idx < 0)
 368                         tlb_write_random();
 369                 else
 370                         tlb_write_indexed();
 371         }
 372         tlbw_use_hazard();
 373         htw_start();
 374         flush_micro_tlb_vm(vma);
 375         local_irq_restore(flags);
 376 }
 377 
 378 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 379                      unsigned long entryhi, unsigned long pagemask)
 380 {
 381 #ifdef CONFIG_XPA
 382         panic("Broken for XPA kernels");
 383 #else
 384         unsigned int uninitialized_var(old_mmid);
 385         unsigned long flags;
 386         unsigned long wired;
 387         unsigned long old_pagemask;
 388         unsigned long old_ctx;
 389 
 390         local_irq_save(flags);
 391         if (cpu_has_mmid) {
 392                 old_mmid = read_c0_memorymapid();
 393                 write_c0_memorymapid(MMID_KERNEL_WIRED);
 394         }
 395         /* Save old context and create impossible VPN2 value */
 396         old_ctx = read_c0_entryhi();
 397         htw_stop();
 398         old_pagemask = read_c0_pagemask();
 399         wired = num_wired_entries();
 400         write_c0_wired(wired + 1);
 401         write_c0_index(wired);
 402         tlbw_use_hazard();      /* What is the hazard here? */
 403         write_c0_pagemask(pagemask);
 404         write_c0_entryhi(entryhi);
 405         write_c0_entrylo0(entrylo0);
 406         write_c0_entrylo1(entrylo1);
 407         mtc0_tlbw_hazard();
 408         tlb_write_indexed();
 409         tlbw_use_hazard();
 410 
 411         write_c0_entryhi(old_ctx);
 412         if (cpu_has_mmid)
 413                 write_c0_memorymapid(old_mmid);
 414         tlbw_use_hazard();      /* What is the hazard here? */
 415         htw_start();
 416         write_c0_pagemask(old_pagemask);
 417         local_flush_tlb_all();
 418         local_irq_restore(flags);
 419 #endif
 420 }
 421 
 422 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 423 
 424 int has_transparent_hugepage(void)
 425 {
 426         static unsigned int mask = -1;
 427 
 428         if (mask == -1) {       /* first call comes during __init */
 429                 unsigned long flags;
 430 
 431                 local_irq_save(flags);
 432                 write_c0_pagemask(PM_HUGE_MASK);
 433                 back_to_back_c0_hazard();
 434                 mask = read_c0_pagemask();
 435                 write_c0_pagemask(PM_DEFAULT_MASK);
 436                 local_irq_restore(flags);
 437         }
 438         return mask == PM_HUGE_MASK;
 439 }
 440 
 441 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 442 
 443 /*
 444  * Used for loading TLB entries before trap_init() has started, when we
 445  * don't actually want to add a wired entry which remains throughout the
 446  * lifetime of the system
 447  */
 448 
 449 int temp_tlb_entry;
 450 
 451 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 452                                unsigned long entryhi, unsigned long pagemask)
 453 {
 454         int ret = 0;
 455         unsigned long flags;
 456         unsigned long wired;
 457         unsigned long old_pagemask;
 458         unsigned long old_ctx;
 459 
 460         local_irq_save(flags);
 461         /* Save old context and create impossible VPN2 value */
 462         htw_stop();
 463         old_ctx = read_c0_entryhi();
 464         old_pagemask = read_c0_pagemask();
 465         wired = num_wired_entries();
 466         if (--temp_tlb_entry < wired) {
 467                 printk(KERN_WARNING
 468                        "No TLB space left for add_temporary_entry\n");
 469                 ret = -ENOSPC;
 470                 goto out;
 471         }
 472 
 473         write_c0_index(temp_tlb_entry);
 474         write_c0_pagemask(pagemask);
 475         write_c0_entryhi(entryhi);
 476         write_c0_entrylo0(entrylo0);
 477         write_c0_entrylo1(entrylo1);
 478         mtc0_tlbw_hazard();
 479         tlb_write_indexed();
 480         tlbw_use_hazard();
 481 
 482         write_c0_entryhi(old_ctx);
 483         write_c0_pagemask(old_pagemask);
 484         htw_start();
 485 out:
 486         local_irq_restore(flags);
 487         return ret;
 488 }
 489 
 490 static int ntlb;
 491 static int __init set_ntlb(char *str)
 492 {
 493         get_option(&str, &ntlb);
 494         return 1;
 495 }
 496 
 497 __setup("ntlb=", set_ntlb);
 498 
 499 /*
 500  * Configure TLB (for init or after a CPU has been powered off).
 501  */
 502 static void r4k_tlb_configure(void)
 503 {
 504         /*
 505          * You should never change this register:
 506          *   - On R4600 1.7 the tlbp never hits for pages smaller than
 507          *     the value in the c0_pagemask register.
 508          *   - The entire mm handling assumes the c0_pagemask register to
 509          *     be set to fixed-size pages.
 510          */
 511         write_c0_pagemask(PM_DEFAULT_MASK);
 512         back_to_back_c0_hazard();
 513         if (read_c0_pagemask() != PM_DEFAULT_MASK)
 514                 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
 515 
 516         write_c0_wired(0);
 517         if (current_cpu_type() == CPU_R10000 ||
 518             current_cpu_type() == CPU_R12000 ||
 519             current_cpu_type() == CPU_R14000 ||
 520             current_cpu_type() == CPU_R16000)
 521                 write_c0_framemask(0);
 522 
 523         if (cpu_has_rixi) {
 524                 /*
 525                  * Enable the no read, no exec bits, and enable large physical
 526                  * address.
 527                  */
 528 #ifdef CONFIG_64BIT
 529                 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
 530 #else
 531                 set_c0_pagegrain(PG_RIE | PG_XIE);
 532 #endif
 533         }
 534 
 535         temp_tlb_entry = current_cpu_data.tlbsize - 1;
 536 
 537         /* From this point on the ARC firmware is dead.  */
 538         local_flush_tlb_all();
 539 
 540         /* Did I tell you that ARC SUCKS?  */
 541 }
 542 
 543 void tlb_init(void)
 544 {
 545         r4k_tlb_configure();
 546 
 547         if (ntlb) {
 548                 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
 549                         int wired = current_cpu_data.tlbsize - ntlb;
 550                         write_c0_wired(wired);
 551                         write_c0_index(wired-1);
 552                         printk("Restricting TLB to %d entries\n", ntlb);
 553                 } else
 554                         printk("Ignoring invalid argument ntlb=%d\n", ntlb);
 555         }
 556 
 557         build_tlb_refill_handler();
 558 }
 559 
 560 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
 561                                void *v)
 562 {
 563         switch (cmd) {
 564         case CPU_PM_ENTER_FAILED:
 565         case CPU_PM_EXIT:
 566                 r4k_tlb_configure();
 567                 break;
 568         }
 569 
 570         return NOTIFY_OK;
 571 }
 572 
 573 static struct notifier_block r4k_tlb_pm_notifier_block = {
 574         .notifier_call = r4k_tlb_pm_notifier,
 575 };
 576 
 577 static int __init r4k_tlb_init_pm(void)
 578 {
 579         return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
 580 }
 581 arch_initcall(r4k_tlb_init_pm);

/* [<][>][^][v][top][bottom][index][help] */