Lines Matching refs:pd
198 struct usnic_uiom_pd *pd) in usnic_uiom_unmap_sorted_intervals() argument
209 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, in __usnic_uiom_reg_release() argument
230 spin_lock(&pd->lock); in __usnic_uiom_reg_release()
231 usnic_uiom_remove_interval(&pd->rb_root, vpn_start, in __usnic_uiom_reg_release()
233 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); in __usnic_uiom_reg_release()
243 spin_unlock(&pd->lock); in __usnic_uiom_reg_release()
257 struct usnic_uiom_pd *pd = uiomr->pd; in usnic_uiom_map_sorted_intervals() local
286 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
303 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
332 usnic_uiom_unmap_sorted_intervals(intervals, pd); in usnic_uiom_map_sorted_intervals()
336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, in usnic_uiom_reg_get() argument
369 uiomr->pd = pd; in usnic_uiom_reg_get()
379 spin_lock(&pd->lock); in usnic_uiom_reg_get()
383 &pd->rb_root, in usnic_uiom_reg_get()
399 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, in usnic_uiom_reg_get()
408 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
413 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); in usnic_uiom_reg_get()
418 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
429 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); in usnic_uiom_reg_release()
467 struct usnic_uiom_pd *pd; in usnic_uiom_alloc_pd() local
470 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in usnic_uiom_alloc_pd()
471 if (!pd) in usnic_uiom_alloc_pd()
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); in usnic_uiom_alloc_pd()
477 PTR_ERR(pd->domain)); in usnic_uiom_alloc_pd()
478 kfree(pd); in usnic_uiom_alloc_pd()
482 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); in usnic_uiom_alloc_pd()
484 spin_lock_init(&pd->lock); in usnic_uiom_alloc_pd()
485 INIT_LIST_HEAD(&pd->devs); in usnic_uiom_alloc_pd()
487 return pd; in usnic_uiom_alloc_pd()
490 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) in usnic_uiom_dealloc_pd() argument
492 iommu_domain_free(pd->domain); in usnic_uiom_dealloc_pd()
493 kfree(pd); in usnic_uiom_dealloc_pd()
496 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_attach_dev_to_pd() argument
506 err = iommu_attach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
517 spin_lock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
518 list_add_tail(&uiom_dev->link, &pd->devs); in usnic_uiom_attach_dev_to_pd()
519 pd->dev_cnt++; in usnic_uiom_attach_dev_to_pd()
520 spin_unlock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
525 iommu_detach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
531 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_detach_dev_from_pd() argument
536 spin_lock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
537 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_detach_dev_from_pd()
547 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
552 pd->dev_cnt--; in usnic_uiom_detach_dev_from_pd()
553 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
555 return iommu_detach_device(pd->domain, dev); in usnic_uiom_detach_dev_from_pd()
558 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) in usnic_uiom_get_dev_list() argument
564 spin_lock(&pd->lock); in usnic_uiom_get_dev_list()
565 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); in usnic_uiom_get_dev_list()
571 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_get_dev_list()
575 spin_unlock(&pd->lock); in usnic_uiom_get_dev_list()