Lines Matching refs:pq
103 struct ipath_user_sdma_queue *pq = in ipath_user_sdma_queue_create() local
106 if (!pq) in ipath_user_sdma_queue_create()
109 pq->counter = 0; in ipath_user_sdma_queue_create()
110 pq->sent_counter = 0; in ipath_user_sdma_queue_create()
111 INIT_LIST_HEAD(&pq->sent); in ipath_user_sdma_queue_create()
113 mutex_init(&pq->lock); in ipath_user_sdma_queue_create()
115 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), in ipath_user_sdma_queue_create()
117 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, in ipath_user_sdma_queue_create()
121 if (!pq->pkt_slab) in ipath_user_sdma_queue_create()
124 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), in ipath_user_sdma_queue_create()
126 pq->header_cache = dma_pool_create(pq->header_cache_name, in ipath_user_sdma_queue_create()
130 if (!pq->header_cache) in ipath_user_sdma_queue_create()
133 pq->dma_pages_root = RB_ROOT; in ipath_user_sdma_queue_create()
138 kmem_cache_destroy(pq->pkt_slab); in ipath_user_sdma_queue_create()
140 kfree(pq); in ipath_user_sdma_queue_create()
141 pq = NULL; in ipath_user_sdma_queue_create()
144 return pq; in ipath_user_sdma_queue_create()
248 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_free_pkt_frag() argument
270 dma_pool_free(pq->header_cache, in ipath_user_sdma_free_pkt_frag()
322 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_pin_pkt() argument
345 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in ipath_user_sdma_pin_pkt()
352 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_init_payload() argument
362 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in ipath_user_sdma_init_payload()
369 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_free_pkt_list() argument
378 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i); in ipath_user_sdma_free_pkt_list()
380 kmem_cache_free(pq->pkt_slab, pkt); in ipath_user_sdma_free_pkt_list()
392 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_queue_pkts() argument
407 u32 counter = pq->counter; in ipath_user_sdma_queue_pkts()
424 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in ipath_user_sdma_queue_pkts()
437 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL, in ipath_user_sdma_queue_pkts()
522 ret = ipath_user_sdma_init_payload(dd, pq, pkt, in ipath_user_sdma_queue_pkts()
546 dma_pool_free(pq->header_cache, pbc, dma_addr); in ipath_user_sdma_queue_pkts()
548 kmem_cache_free(pq->pkt_slab, pkt); in ipath_user_sdma_queue_pkts()
550 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); in ipath_user_sdma_queue_pkts()
555 static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq, in ipath_user_sdma_set_complete_counter() argument
558 pq->sent_counter = c; in ipath_user_sdma_set_complete_counter()
563 struct ipath_user_sdma_queue *pq) in ipath_user_sdma_queue_clean() argument
572 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in ipath_user_sdma_queue_clean()
591 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in ipath_user_sdma_queue_clean()
592 ipath_user_sdma_set_complete_counter(pq, counter); in ipath_user_sdma_queue_clean()
598 void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq) in ipath_user_sdma_queue_destroy() argument
600 if (!pq) in ipath_user_sdma_queue_destroy()
603 kmem_cache_destroy(pq->pkt_slab); in ipath_user_sdma_queue_destroy()
604 dma_pool_destroy(pq->header_cache); in ipath_user_sdma_queue_destroy()
605 kfree(pq); in ipath_user_sdma_queue_destroy()
623 struct ipath_user_sdma_queue *pq) in ipath_user_sdma_queue_drain() argument
627 if (!pq) in ipath_user_sdma_queue_drain()
631 mutex_lock(&pq->lock); in ipath_user_sdma_queue_drain()
632 if (list_empty(&pq->sent)) { in ipath_user_sdma_queue_drain()
633 mutex_unlock(&pq->lock); in ipath_user_sdma_queue_drain()
637 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_queue_drain()
638 mutex_unlock(&pq->lock); in ipath_user_sdma_queue_drain()
642 if (!list_empty(&pq->sent)) { in ipath_user_sdma_queue_drain()
647 mutex_lock(&pq->lock); in ipath_user_sdma_queue_drain()
648 list_splice_init(&pq->sent, &free_list); in ipath_user_sdma_queue_drain()
649 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in ipath_user_sdma_queue_drain()
650 mutex_unlock(&pq->lock); in ipath_user_sdma_queue_drain()
708 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_push_pkts() argument
773 list_move_tail(&pkt->list, &pq->sent); in ipath_user_sdma_push_pkts()
792 struct ipath_user_sdma_queue *pq, in ipath_user_sdma_writev() argument
802 mutex_lock(&pq->lock); in ipath_user_sdma_writev()
806 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_writev()
812 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); in ipath_user_sdma_writev()
829 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_writev()
832 ret = ipath_user_sdma_push_pkts(dd, pq, &list); in ipath_user_sdma_writev()
837 pq->counter += ret; in ipath_user_sdma_writev()
847 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); in ipath_user_sdma_writev()
848 mutex_unlock(&pq->lock); in ipath_user_sdma_writev()
854 struct ipath_user_sdma_queue *pq) in ipath_user_sdma_make_progress() argument
858 mutex_lock(&pq->lock); in ipath_user_sdma_make_progress()
860 ret = ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_make_progress()
861 mutex_unlock(&pq->lock); in ipath_user_sdma_make_progress()
866 u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq) in ipath_user_sdma_complete_counter() argument
868 return pq->sent_counter; in ipath_user_sdma_complete_counter()
871 u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq) in ipath_user_sdma_inflight_counter() argument
873 return pq->counter; in ipath_user_sdma_inflight_counter()