Lines Matching refs:pt
241 static void psb_mmu_free_pt(struct psb_mmu_pt *pt) in psb_mmu_free_pt() argument
243 __free_page(pt->p); in psb_mmu_free_pt()
244 kfree(pt); in psb_mmu_free_pt()
252 struct psb_mmu_pt *pt; in psb_mmu_free_pagedir() local
265 pt = pd->tables[i]; in psb_mmu_free_pagedir()
266 if (pt) in psb_mmu_free_pagedir()
267 psb_mmu_free_pt(pt); in psb_mmu_free_pagedir()
280 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); in psb_mmu_alloc_pt() local
289 if (!pt) in psb_mmu_alloc_pt()
292 pt->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pt()
293 if (!pt->p) { in psb_mmu_alloc_pt()
294 kfree(pt); in psb_mmu_alloc_pt()
300 v = kmap_atomic(pt->p); in psb_mmu_alloc_pt()
319 pt->count = 0; in psb_mmu_alloc_pt()
320 pt->pd = pd; in psb_mmu_alloc_pt()
321 pt->index = 0; in psb_mmu_alloc_pt()
323 return pt; in psb_mmu_alloc_pt()
330 struct psb_mmu_pt *pt; in psb_mmu_pt_alloc_map_lock() local
335 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
336 while (!pt) { in psb_mmu_pt_alloc_map_lock()
338 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
339 if (!pt) in psb_mmu_pt_alloc_map_lock()
345 psb_mmu_free_pt(pt); in psb_mmu_pt_alloc_map_lock()
347 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
352 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
353 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
354 pt->index = index; in psb_mmu_pt_alloc_map_lock()
362 pt->v = kmap_atomic(pt->p); in psb_mmu_pt_alloc_map_lock()
363 return pt; in psb_mmu_pt_alloc_map_lock()
370 struct psb_mmu_pt *pt; in psb_mmu_pt_map_lock() local
374 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
375 if (!pt) { in psb_mmu_pt_map_lock()
379 pt->v = kmap_atomic(pt->p); in psb_mmu_pt_map_lock()
380 return pt; in psb_mmu_pt_map_lock()
383 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt) in psb_mmu_pt_unmap_unlock() argument
385 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock()
388 kunmap_atomic(pt->v); in psb_mmu_pt_unmap_unlock()
389 if (pt->count == 0) { in psb_mmu_pt_unmap_unlock()
391 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
392 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
395 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); in psb_mmu_pt_unmap_unlock()
398 kunmap_atomic(pt->v); in psb_mmu_pt_unmap_unlock()
400 psb_mmu_free_pt(pt); in psb_mmu_pt_unmap_unlock()
406 static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr, in psb_mmu_set_pte() argument
409 pt->v[psb_mmu_pt_index(addr)] = pte; in psb_mmu_set_pte()
412 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt, in psb_mmu_invalidate_pte() argument
415 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
513 struct psb_mmu_pt *pt; in psb_mmu_flush_ptes() local
542 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
543 if (!pt) in psb_mmu_flush_ptes()
546 psb_clflush(&pt->v[psb_mmu_pt_index(addr)]); in psb_mmu_flush_ptes()
550 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_flush_ptes()
568 struct psb_mmu_pt *pt; in psb_mmu_remove_pfn_sequence() local
581 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
582 if (!pt) in psb_mmu_remove_pfn_sequence()
585 psb_mmu_invalidate_pte(pt, addr); in psb_mmu_remove_pfn_sequence()
586 --pt->count; in psb_mmu_remove_pfn_sequence()
588 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_remove_pfn_sequence()
608 struct psb_mmu_pt *pt; in psb_mmu_remove_pages() local
637 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
638 if (!pt) in psb_mmu_remove_pages()
641 psb_mmu_invalidate_pte(pt, addr); in psb_mmu_remove_pages()
642 --pt->count; in psb_mmu_remove_pages()
645 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_remove_pages()
664 struct psb_mmu_pt *pt; in psb_mmu_insert_pfn_sequence() local
679 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
680 if (!pt) { in psb_mmu_insert_pfn_sequence()
686 psb_mmu_set_pte(pt, addr, pte); in psb_mmu_insert_pfn_sequence()
687 pt->count++; in psb_mmu_insert_pfn_sequence()
689 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_insert_pfn_sequence()
711 struct psb_mmu_pt *pt; in psb_mmu_insert_pages() local
743 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
744 if (!pt) in psb_mmu_insert_pages()
749 psb_mmu_set_pte(pt, addr, pte); in psb_mmu_insert_pages()
750 pt->count++; in psb_mmu_insert_pages()
752 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_insert_pages()
777 struct psb_mmu_pt *pt; in psb_mmu_virtual_to_pfn() local
782 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
783 if (!pt) { in psb_mmu_virtual_to_pfn()
801 tmp = pt->v[psb_mmu_pt_index(virtual)]; in psb_mmu_virtual_to_pfn()
808 psb_mmu_pt_unmap_unlock(pt); in psb_mmu_virtual_to_pfn()