Lines Matching refs:tlb
184 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
188 batch = tlb->active; in tlb_next_batch()
190 tlb->active = batch->next; in tlb_next_batch()
194 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
201 tlb->batch_count++; in tlb_next_batch()
206 tlb->active->next = batch; in tlb_next_batch()
207 tlb->active = batch; in tlb_next_batch()
217 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument
219 tlb->mm = mm; in tlb_gather_mmu()
222 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu()
223 tlb->need_flush_all = 0; in tlb_gather_mmu()
224 tlb->local.next = NULL; in tlb_gather_mmu()
225 tlb->local.nr = 0; in tlb_gather_mmu()
226 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
227 tlb->active = &tlb->local; in tlb_gather_mmu()
228 tlb->batch_count = 0; in tlb_gather_mmu()
231 tlb->batch = NULL; in tlb_gather_mmu()
234 __tlb_reset_range(tlb); in tlb_gather_mmu()
237 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
239 if (!tlb->end) in tlb_flush_mmu_tlbonly()
242 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
243 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
245 tlb_table_flush(tlb); in tlb_flush_mmu_tlbonly()
247 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
250 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
254 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_flush_mmu_free()
258 tlb->active = &tlb->local; in tlb_flush_mmu_free()
261 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
263 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
264 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
271 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) in tlb_finish_mmu() argument
275 tlb_flush_mmu(tlb); in tlb_finish_mmu()
280 for (batch = tlb->local.next; batch; batch = next) { in tlb_finish_mmu()
284 tlb->local.next = NULL; in tlb_finish_mmu()
293 int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) in __tlb_remove_page() argument
297 VM_BUG_ON(!tlb->end); in __tlb_remove_page()
299 batch = tlb->active; in __tlb_remove_page()
302 if (!tlb_next_batch(tlb)) in __tlb_remove_page()
304 batch = tlb->active; in __tlb_remove_page()
350 void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
352 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
360 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
362 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
368 if (atomic_read(&tlb->mm->mm_users) < 2) { in tlb_remove_table()
383 tlb_table_flush(tlb); in tlb_remove_table()
392 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument
397 pte_free_tlb(tlb, token, addr); in free_pte_range()
398 atomic_long_dec(&tlb->mm->nr_ptes); in free_pte_range()
401 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument
415 free_pte_range(tlb, pmd, addr); in free_pmd_range()
431 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
432 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
435 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, in free_pud_range() argument
449 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
465 pud_free_tlb(tlb, pud, start); in free_pud_range()
471 void free_pgd_range(struct mmu_gather *tlb, in free_pgd_range() argument
520 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
525 free_pud_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range()
529 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
544 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
557 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
1113 static unsigned long zap_pte_range(struct mmu_gather *tlb, in zap_pte_range() argument
1118 struct mm_struct *mm = tlb->mm; in zap_pte_range()
1152 tlb->fullmm); in zap_pte_range()
1153 tlb_remove_tlb_entry(tlb, pte, addr); in zap_pte_range()
1171 if (unlikely(!__tlb_remove_page(tlb, page))) { in zap_pte_range()
1197 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1205 tlb_flush_mmu_tlbonly(tlb); in zap_pte_range()
1216 tlb_flush_mmu_free(tlb); in zap_pte_range()
1225 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, in zap_pmd_range() argument
1239 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { in zap_pmd_range()
1248 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1261 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1269 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, in zap_pud_range() argument
1282 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1288 static void unmap_page_range(struct mmu_gather *tlb, in unmap_page_range() argument
1300 tlb_start_vma(tlb, vma); in unmap_page_range()
1306 next = zap_pud_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1308 tlb_end_vma(tlb, vma); in unmap_page_range()
1312 static void unmap_single_vma(struct mmu_gather *tlb, in unmap_single_vma() argument
1347 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1351 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1373 void unmap_vmas(struct mmu_gather *tlb, in unmap_vmas() argument
1381 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1398 struct mmu_gather tlb; in zap_page_range() local
1402 tlb_gather_mmu(&tlb, mm, start, end); in zap_page_range()
1406 unmap_single_vma(&tlb, vma, start, end, details); in zap_page_range()
1408 tlb_finish_mmu(&tlb, start, end); in zap_page_range()
1424 struct mmu_gather tlb; in zap_page_range_single() local
1428 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1431 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1433 tlb_finish_mmu(&tlb, address, end); in zap_page_range_single()