Lines Matching refs:dma_dom
283 static void alloc_unity_mapping(struct dma_ops_domain *dma_dom, in alloc_unity_mapping() argument
290 if (addr < dma_dom->aperture_size) in alloc_unity_mapping()
292 dma_dom->aperture[0]->bitmap); in alloc_unity_mapping()
300 struct dma_ops_domain *dma_dom) in init_unity_mappings_for_device() argument
310 alloc_unity_mapping(dma_dom, e); in init_unity_mappings_for_device()
1440 static int alloc_new_range(struct dma_ops_domain *dma_dom, in alloc_new_range() argument
1443 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; in alloc_new_range()
1454 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); in alloc_new_range()
1455 if (!dma_dom->aperture[index]) in alloc_new_range()
1458 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); in alloc_new_range()
1459 if (!dma_dom->aperture[index]->bitmap) in alloc_new_range()
1462 dma_dom->aperture[index]->offset = dma_dom->aperture_size; in alloc_new_range()
1465 unsigned long address = dma_dom->aperture_size; in alloc_new_range()
1470 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1475 dma_dom->aperture[index]->pte_pages[i] = pte_page; in alloc_new_range()
1481 old_size = dma_dom->aperture_size; in alloc_new_range()
1482 dma_dom->aperture_size += APERTURE_RANGE_SIZE; in alloc_new_range()
1486 dma_dom->aperture_size > MSI_ADDR_BASE_LO) { in alloc_new_range()
1493 dma_ops_reserve_addresses(dma_dom, spage, pages); in alloc_new_range()
1499 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1500 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1506 dma_ops_reserve_addresses(dma_dom, startpage, pages); in alloc_new_range()
1516 for (i = dma_dom->aperture[index]->offset; in alloc_new_range()
1517 i < dma_dom->aperture_size; in alloc_new_range()
1519 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize); in alloc_new_range()
1523 dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, in alloc_new_range()
1527 update_domain(&dma_dom->domain); in alloc_new_range()
1532 update_domain(&dma_dom->domain); in alloc_new_range()
1534 free_page((unsigned long)dma_dom->aperture[index]->bitmap); in alloc_new_range()
1536 kfree(dma_dom->aperture[index]); in alloc_new_range()
1537 dma_dom->aperture[index] = NULL; in alloc_new_range()
1844 struct dma_ops_domain *dma_dom; in dma_ops_domain_alloc() local
1846 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); in dma_ops_domain_alloc()
1847 if (!dma_dom) in dma_ops_domain_alloc()
1850 if (protection_domain_init(&dma_dom->domain)) in dma_ops_domain_alloc()
1853 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
1854 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
1855 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
1856 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
1857 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
1860 dma_dom->need_flush = false; in dma_ops_domain_alloc()
1862 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
1864 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) in dma_ops_domain_alloc()
1871 dma_dom->aperture[0]->bitmap[0] = 1; in dma_ops_domain_alloc()
1872 dma_dom->next_address = 0; in dma_ops_domain_alloc()
1875 return dma_dom; in dma_ops_domain_alloc()
1878 dma_ops_domain_free(dma_dom); in dma_ops_domain_alloc()
2437 struct dma_ops_domain *dma_dom, in __map_single() argument
2462 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, in __map_single()
2470 dma_dom->next_address = dma_dom->aperture_size; in __map_single()
2472 if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) in __map_single()
2484 ret = dma_ops_domain_map(dma_dom, start, paddr, dir); in __map_single()
2495 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { in __map_single()
2496 domain_flush_tlb(&dma_dom->domain); in __map_single()
2497 dma_dom->need_flush = false; in __map_single()
2499 domain_flush_pages(&dma_dom->domain, address, size); in __map_single()
2508 dma_ops_domain_unmap(dma_dom, start); in __map_single()
2511 dma_ops_free_addresses(dma_dom, address, pages); in __map_single()
2520 static void __unmap_single(struct dma_ops_domain *dma_dom, in __unmap_single() argument
2530 (dma_addr + size > dma_dom->aperture_size)) in __unmap_single()
2539 dma_ops_domain_unmap(dma_dom, start); in __unmap_single()
2545 dma_ops_free_addresses(dma_dom, dma_addr, pages); in __unmap_single()
2547 if (amd_iommu_unmap_flush || dma_dom->need_flush) { in __unmap_single()
2548 domain_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
2549 dma_dom->need_flush = false; in __unmap_single()