Lines Matching refs:tlb

183 static int tlb_next_batch(struct mmu_gather *tlb)  in tlb_next_batch()  argument
187 batch = tlb->active; in tlb_next_batch()
189 tlb->active = batch->next; in tlb_next_batch()
193 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
200 tlb->batch_count++; in tlb_next_batch()
205 tlb->active->next = batch; in tlb_next_batch()
206 tlb->active = batch; in tlb_next_batch()
216 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument
218 tlb->mm = mm; in tlb_gather_mmu()
221 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu()
222 tlb->need_flush_all = 0; in tlb_gather_mmu()
223 tlb->local.next = NULL; in tlb_gather_mmu()
224 tlb->local.nr = 0; in tlb_gather_mmu()
225 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
226 tlb->active = &tlb->local; in tlb_gather_mmu()
227 tlb->batch_count = 0; in tlb_gather_mmu()
230 tlb->batch = NULL; in tlb_gather_mmu()
233 __tlb_reset_range(tlb); in tlb_gather_mmu()
236 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
238 if (!tlb->end) in tlb_flush_mmu_tlbonly()
241 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
244 tlb_table_flush(tlb); in tlb_flush_mmu_tlbonly()
246 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
249 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
253 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_flush_mmu_free()
257 tlb->active = &tlb->local; in tlb_flush_mmu_free()
260 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
262 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
263 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
270 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) in tlb_finish_mmu() argument
274 tlb_flush_mmu(tlb); in tlb_finish_mmu()
279 for (batch = tlb->local.next; batch; batch = next) { in tlb_finish_mmu()
283 tlb->local.next = NULL; in tlb_finish_mmu()
292 int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) in __tlb_remove_page() argument
296 VM_BUG_ON(!tlb->end); in __tlb_remove_page()
298 batch = tlb->active; in __tlb_remove_page()
301 if (!tlb_next_batch(tlb)) in __tlb_remove_page()
303 batch = tlb->active; in __tlb_remove_page()
349 void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
351 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
359 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
361 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
367 if (atomic_read(&tlb->mm->mm_users) < 2) { in tlb_remove_table()
382 tlb_table_flush(tlb); in tlb_remove_table()
391 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument
396 pte_free_tlb(tlb, token, addr); in free_pte_range()
397 atomic_long_dec(&tlb->mm->nr_ptes); in free_pte_range()
400 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument
414 free_pte_range(tlb, pmd, addr); in free_pmd_range()
430 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
431 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
434 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, in free_pud_range() argument
448 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
464 pud_free_tlb(tlb, pud, start); in free_pud_range()
470 void free_pgd_range(struct mmu_gather *tlb, in free_pgd_range() argument
519 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
524 free_pud_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range()
528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
556 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
1072 static unsigned long zap_pte_range(struct mmu_gather *tlb, in zap_pte_range() argument
1077 struct mm_struct *mm = tlb->mm; in zap_pte_range()
1111 tlb->fullmm); in zap_pte_range()
1112 tlb_remove_tlb_entry(tlb, pte, addr); in zap_pte_range()
1130 if (unlikely(!__tlb_remove_page(tlb, page))) { in zap_pte_range()
1156 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1164 tlb_flush_mmu_tlbonly(tlb); in zap_pte_range()
1175 tlb_flush_mmu_free(tlb); in zap_pte_range()
1184 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, in zap_pmd_range() argument
1198 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { in zap_pmd_range()
1207 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1220 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1228 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, in zap_pud_range() argument
1241 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1247 static void unmap_page_range(struct mmu_gather *tlb, in unmap_page_range() argument
1259 tlb_start_vma(tlb, vma); in unmap_page_range()
1265 next = zap_pud_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1267 tlb_end_vma(tlb, vma); in unmap_page_range()
1271 static void unmap_single_vma(struct mmu_gather *tlb, in unmap_single_vma() argument
1306 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1310 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1332 void unmap_vmas(struct mmu_gather *tlb, in unmap_vmas() argument
1340 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1357 struct mmu_gather tlb; in zap_page_range() local
1361 tlb_gather_mmu(&tlb, mm, start, end); in zap_page_range()
1365 unmap_single_vma(&tlb, vma, start, end, details); in zap_page_range()
1367 tlb_finish_mmu(&tlb, start, end); in zap_page_range()
1383 struct mmu_gather tlb; in zap_page_range_single() local
1387 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1390 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1392 tlb_finish_mmu(&tlb, address, end); in zap_page_range_single()