1/* 2 * PPC Huge TLB Page Support for Book3E MMU 3 * 4 * Copyright (C) 2009 David Gibson, IBM Corporation. 5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor 6 * 7 */ 8#include <linux/mm.h> 9#include <linux/hugetlb.h> 10 11#ifdef CONFIG_PPC_FSL_BOOK3E 12#ifdef CONFIG_PPC64 13static inline int tlb1_next(void) 14{ 15 struct paca_struct *paca = get_paca(); 16 struct tlb_core_data *tcd; 17 int this, next; 18 19 tcd = paca->tcd_ptr; 20 this = tcd->esel_next; 21 22 next = this + 1; 23 if (next >= tcd->esel_max) 24 next = tcd->esel_first; 25 26 tcd->esel_next = next; 27 return this; 28} 29#else 30static inline int tlb1_next(void) 31{ 32 int index, ncams; 33 34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 35 36 index = this_cpu_read(next_tlbcam_idx); 37 38 /* Just round-robin the entries and wrap when we hit the end */ 39 if (unlikely(index == ncams - 1)) 40 __this_cpu_write(next_tlbcam_idx, tlbcam_index); 41 else 42 __this_cpu_inc(next_tlbcam_idx); 43 44 return index; 45} 46#endif /* !PPC64 */ 47#endif /* FSL */ 48 49static inline int mmu_get_tsize(int psize) 50{ 51 return mmu_psize_defs[psize].enc; 52} 53 54static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) 55{ 56 int found = 0; 57 58 mtspr(SPRN_MAS6, pid << 16); 59 if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) { 60 asm volatile( 61 "li %0,0\n" 62 "tlbsx. 0,%1\n" 63 "bne 1f\n" 64 "li %0,1\n" 65 "1:\n" 66 : "=&r"(found) : "r"(ea)); 67 } else { 68 asm volatile( 69 "tlbsx 0,%1\n" 70 "mfspr %0,0x271\n" 71 "srwi %0,%0,31\n" 72 : "=&r"(found) : "r"(ea)); 73 } 74 75 return found; 76} 77 78void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, 79 pte_t pte) 80{ 81 unsigned long mas1, mas2; 82 u64 mas7_3; 83 unsigned long psize, tsize, shift; 84 unsigned long flags; 85 struct mm_struct *mm; 86 87#ifdef CONFIG_PPC_FSL_BOOK3E 88 int index; 89#endif 90 91 if (unlikely(is_kernel_addr(ea))) 92 return; 93 94 mm = vma->vm_mm; 95 96#ifdef CONFIG_PPC_MM_SLICES 97 psize = get_slice_psize(mm, ea); 98 tsize = mmu_get_tsize(psize); 99 shift = mmu_psize_defs[psize].shift; 100#else 101 psize = vma_mmu_pagesize(vma); 102 shift = __ilog2(psize); 103 tsize = shift - 10; 104#endif 105 106 /* 107 * We can't be interrupted while we're setting up the MAS 108 * regusters or after we've confirmed that no tlb exists. 109 */ 110 local_irq_save(flags); 111 112 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { 113 local_irq_restore(flags); 114 return; 115 } 116 117#ifdef CONFIG_PPC_FSL_BOOK3E 118 /* We have to use the CAM(TLB1) on FSL parts for hugepages */ 119 index = tlb1_next(); 120 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); 121#endif 122 123 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); 124 mas2 = ea & ~((1UL << shift) - 1); 125 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; 126 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; 127 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; 128 if (!pte_dirty(pte)) 129 mas7_3 &= ~(MAS3_SW|MAS3_UW); 130 131 mtspr(SPRN_MAS1, mas1); 132 mtspr(SPRN_MAS2, mas2); 133 134 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) { 135 mtspr(SPRN_MAS7_MAS3, mas7_3); 136 } else { 137 if (mmu_has_feature(MMU_FTR_BIG_PHYS)) 138 mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 139 mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); 140 } 141 142 asm volatile ("tlbwe"); 143 144 local_irq_restore(flags); 145} 146 147void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 148{ 149 struct hstate *hstate = hstate_file(vma->vm_file); 150 unsigned long tsize = huge_page_shift(hstate) - 10; 151 152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); 153} 154