Lines Matching refs:pd
198 struct usnic_uiom_pd *pd) in usnic_uiom_unmap_sorted_intervals() argument
209 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, in __usnic_uiom_reg_release() argument
230 spin_lock(&pd->lock); in __usnic_uiom_reg_release()
231 usnic_uiom_remove_interval(&pd->rb_root, vpn_start, in __usnic_uiom_reg_release()
233 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); in __usnic_uiom_reg_release()
243 spin_unlock(&pd->lock); in __usnic_uiom_reg_release()
257 struct usnic_uiom_pd *pd = uiomr->pd; in usnic_uiom_map_sorted_intervals() local
286 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
303 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
332 usnic_uiom_unmap_sorted_intervals(intervals, pd); in usnic_uiom_map_sorted_intervals()
336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, in usnic_uiom_reg_get() argument
369 uiomr->pd = pd; in usnic_uiom_reg_get()
379 spin_lock(&pd->lock); in usnic_uiom_reg_get()
383 &pd->rb_root, in usnic_uiom_reg_get()
399 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, in usnic_uiom_reg_get()
408 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
413 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); in usnic_uiom_reg_get()
418 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
429 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); in usnic_uiom_reg_release()
467 struct usnic_uiom_pd *pd; in usnic_uiom_alloc_pd() local
470 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in usnic_uiom_alloc_pd()
471 if (!pd) in usnic_uiom_alloc_pd()
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); in usnic_uiom_alloc_pd()
477 kfree(pd); in usnic_uiom_alloc_pd()
481 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); in usnic_uiom_alloc_pd()
483 spin_lock_init(&pd->lock); in usnic_uiom_alloc_pd()
484 INIT_LIST_HEAD(&pd->devs); in usnic_uiom_alloc_pd()
486 return pd; in usnic_uiom_alloc_pd()
489 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) in usnic_uiom_dealloc_pd() argument
491 iommu_domain_free(pd->domain); in usnic_uiom_dealloc_pd()
492 kfree(pd); in usnic_uiom_dealloc_pd()
495 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_attach_dev_to_pd() argument
505 err = iommu_attach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
516 spin_lock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
517 list_add_tail(&uiom_dev->link, &pd->devs); in usnic_uiom_attach_dev_to_pd()
518 pd->dev_cnt++; in usnic_uiom_attach_dev_to_pd()
519 spin_unlock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
524 iommu_detach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
530 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_detach_dev_from_pd() argument
535 spin_lock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
536 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_detach_dev_from_pd()
546 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
551 pd->dev_cnt--; in usnic_uiom_detach_dev_from_pd()
552 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
554 return iommu_detach_device(pd->domain, dev); in usnic_uiom_detach_dev_from_pd()
557 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) in usnic_uiom_get_dev_list() argument
563 spin_lock(&pd->lock); in usnic_uiom_get_dev_list()
564 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); in usnic_uiom_get_dev_list()
570 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_get_dev_list()
574 spin_unlock(&pd->lock); in usnic_uiom_get_dev_list()