root/arch/powerpc/mm/nohash/book3e_hugetlbpage.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tlb1_next
  2. book3e_tlb_lock
  3. book3e_tlb_unlock
  4. tlb1_next
  5. book3e_tlb_lock
  6. book3e_tlb_unlock
  7. book3e_tlb_exists
  8. book3e_hugetlb_preload
  9. update_mmu_cache
  10. flush_hugetlb_page

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * PPC Huge TLB Page Support for Book3E MMU
   4  *
   5  * Copyright (C) 2009 David Gibson, IBM Corporation.
   6  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
   7  *
   8  */
   9 #include <linux/mm.h>
  10 #include <linux/hugetlb.h>
  11 
  12 #include <asm/mmu.h>
  13 
  14 #ifdef CONFIG_PPC64
  15 #include <asm/paca.h>
  16 
  17 static inline int tlb1_next(void)
  18 {
  19         struct paca_struct *paca = get_paca();
  20         struct tlb_core_data *tcd;
  21         int this, next;
  22 
  23         tcd = paca->tcd_ptr;
  24         this = tcd->esel_next;
  25 
  26         next = this + 1;
  27         if (next >= tcd->esel_max)
  28                 next = tcd->esel_first;
  29 
  30         tcd->esel_next = next;
  31         return this;
  32 }
  33 
  34 static inline void book3e_tlb_lock(void)
  35 {
  36         struct paca_struct *paca = get_paca();
  37         unsigned long tmp;
  38         int token = smp_processor_id() + 1;
  39 
  40         /*
  41          * Besides being unnecessary in the absence of SMT, this
  42          * check prevents trying to do lbarx/stbcx. on e5500 which
  43          * doesn't implement either feature.
  44          */
  45         if (!cpu_has_feature(CPU_FTR_SMT))
  46                 return;
  47 
  48         asm volatile("1: lbarx %0, 0, %1;"
  49                      "cmpwi %0, 0;"
  50                      "bne 2f;"
  51                      "stbcx. %2, 0, %1;"
  52                      "bne 1b;"
  53                      "b 3f;"
  54                      "2: lbzx %0, 0, %1;"
  55                      "cmpwi %0, 0;"
  56                      "bne 2b;"
  57                      "b 1b;"
  58                      "3:"
  59                      : "=&r" (tmp)
  60                      : "r" (&paca->tcd_ptr->lock), "r" (token)
  61                      : "memory");
  62 }
  63 
  64 static inline void book3e_tlb_unlock(void)
  65 {
  66         struct paca_struct *paca = get_paca();
  67 
  68         if (!cpu_has_feature(CPU_FTR_SMT))
  69                 return;
  70 
  71         isync();
  72         paca->tcd_ptr->lock = 0;
  73 }
  74 #else
  75 static inline int tlb1_next(void)
  76 {
  77         int index, ncams;
  78 
  79         ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
  80 
  81         index = this_cpu_read(next_tlbcam_idx);
  82 
  83         /* Just round-robin the entries and wrap when we hit the end */
  84         if (unlikely(index == ncams - 1))
  85                 __this_cpu_write(next_tlbcam_idx, tlbcam_index);
  86         else
  87                 __this_cpu_inc(next_tlbcam_idx);
  88 
  89         return index;
  90 }
  91 
  92 static inline void book3e_tlb_lock(void)
  93 {
  94 }
  95 
  96 static inline void book3e_tlb_unlock(void)
  97 {
  98 }
  99 #endif
 100 
 101 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
 102 {
 103         int found = 0;
 104 
 105         mtspr(SPRN_MAS6, pid << 16);
 106         if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
 107                 asm volatile(
 108                         "li     %0,0\n"
 109                         "tlbsx. 0,%1\n"
 110                         "bne    1f\n"
 111                         "li     %0,1\n"
 112                         "1:\n"
 113                         : "=&r"(found) : "r"(ea));
 114         } else {
 115                 asm volatile(
 116                         "tlbsx  0,%1\n"
 117                         "mfspr  %0,0x271\n"
 118                         "srwi   %0,%0,31\n"
 119                         : "=&r"(found) : "r"(ea));
 120         }
 121 
 122         return found;
 123 }
 124 
 125 static void
 126 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
 127 {
 128         unsigned long mas1, mas2;
 129         u64 mas7_3;
 130         unsigned long psize, tsize, shift;
 131         unsigned long flags;
 132         struct mm_struct *mm;
 133         int index;
 134 
 135         if (unlikely(is_kernel_addr(ea)))
 136                 return;
 137 
 138         mm = vma->vm_mm;
 139 
 140         psize = vma_mmu_pagesize(vma);
 141         shift = __ilog2(psize);
 142         tsize = shift - 10;
 143         /*
 144          * We can't be interrupted while we're setting up the MAS
 145          * regusters or after we've confirmed that no tlb exists.
 146          */
 147         local_irq_save(flags);
 148 
 149         book3e_tlb_lock();
 150 
 151         if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
 152                 book3e_tlb_unlock();
 153                 local_irq_restore(flags);
 154                 return;
 155         }
 156 
 157         /* We have to use the CAM(TLB1) on FSL parts for hugepages */
 158         index = tlb1_next();
 159         mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
 160 
 161         mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
 162         mas2 = ea & ~((1UL << shift) - 1);
 163         mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
 164         mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
 165         mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
 166         if (!pte_dirty(pte))
 167                 mas7_3 &= ~(MAS3_SW|MAS3_UW);
 168 
 169         mtspr(SPRN_MAS1, mas1);
 170         mtspr(SPRN_MAS2, mas2);
 171 
 172         if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
 173                 mtspr(SPRN_MAS7_MAS3, mas7_3);
 174         } else {
 175                 if (mmu_has_feature(MMU_FTR_BIG_PHYS))
 176                         mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
 177                 mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
 178         }
 179 
 180         asm volatile ("tlbwe");
 181 
 182         book3e_tlb_unlock();
 183         local_irq_restore(flags);
 184 }
 185 
 186 /*
 187  * This is called at the end of handling a user page fault, when the
 188  * fault has been handled by updating a PTE in the linux page tables.
 189  *
 190  * This must always be called with the pte lock held.
 191  */
 192 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 193 {
 194         if (is_vm_hugetlb_page(vma))
 195                 book3e_hugetlb_preload(vma, address, *ptep);
 196 }
 197 
 198 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 199 {
 200         struct hstate *hstate = hstate_file(vma->vm_file);
 201         unsigned long tsize = huge_page_shift(hstate) - 10;
 202 
 203         __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0);
 204 }

/* [<][>][^][v][top][bottom][index][help] */