root/arch/sh/mm/tlb-pteaex.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __update_tlb
  2. local_flush_tlb_one
  3. local_flush_tlb_all

   1 /*
   2  * arch/sh/mm/tlb-pteaex.c
   3  *
   4  * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
   5  *
   6  * Copyright (C) 2009 Paul Mundt
   7  *
   8  * This file is subject to the terms and conditions of the GNU General Public
   9  * License.  See the file "COPYING" in the main directory of this archive
  10  * for more details.
  11  */
  12 #include <linux/kernel.h>
  13 #include <linux/mm.h>
  14 #include <linux/io.h>
  15 #include <asm/mmu_context.h>
  16 #include <asm/cacheflush.h>
  17 
  18 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  19 {
  20         unsigned long flags, pteval, vpn;
  21 
  22         /*
  23          * Handle debugger faulting in for debugee.
  24          */
  25         if (vma && current->active_mm != vma->vm_mm)
  26                 return;
  27 
  28         local_irq_save(flags);
  29 
  30         /* Set PTEH register */
  31         vpn = address & MMU_VPN_MASK;
  32         __raw_writel(vpn, MMU_PTEH);
  33 
  34         /* Set PTEAEX */
  35         __raw_writel(get_asid(), MMU_PTEAEX);
  36 
  37         pteval = pte.pte_low;
  38 
  39         /* Set PTEA register */
  40 #ifdef CONFIG_X2TLB
  41         /*
  42          * For the extended mode TLB this is trivial, only the ESZ and
  43          * EPR bits need to be written out to PTEA, with the remainder of
  44          * the protection bits (with the exception of the compat-mode SZ
  45          * and PR bits, which are cleared) being written out in PTEL.
  46          */
  47         __raw_writel(pte.pte_high, MMU_PTEA);
  48 #endif
  49 
  50         /* Set PTEL register */
  51         pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
  52 #ifdef CONFIG_CACHE_WRITETHROUGH
  53         pteval |= _PAGE_WT;
  54 #endif
  55         /* conveniently, we want all the software flags to be 0 anyway */
  56         __raw_writel(pteval, MMU_PTEL);
  57 
  58         /* Load the TLB */
  59         asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
  60         local_irq_restore(flags);
  61 }
  62 
  63 /*
  64  * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
  65  * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
  66  * address arrays. In compat mode the second array is inaccessible, while
  67  * in extended mode, the legacy 8-bit ASID field in address array 1 has
  68  * undefined behaviour.
  69  */
  70 void local_flush_tlb_one(unsigned long asid, unsigned long page)
  71 {
  72         jump_to_uncached();
  73         __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
  74         __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
  75         __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
  76         __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
  77         back_to_cached();
  78 }
  79 
  80 void local_flush_tlb_all(void)
  81 {
  82         unsigned long flags, status;
  83         int i;
  84 
  85         /*
  86          * Flush all the TLB.
  87          */
  88         local_irq_save(flags);
  89         jump_to_uncached();
  90 
  91         status = __raw_readl(MMUCR);
  92         status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
  93 
  94         if (status == 0)
  95                 status = MMUCR_URB_NENTRIES;
  96 
  97         for (i = 0; i < status; i++)
  98                 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
  99 
 100         for (i = 0; i < 4; i++)
 101                 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
 102 
 103         back_to_cached();
 104         ctrl_barrier();
 105         local_irq_restore(flags);
 106 }

/* [<][>][^][v][top][bottom][index][help] */