Lines Matching refs:pq

81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */  member
185 struct qib_user_sdma_queue *pq = in qib_user_sdma_queue_create() local
189 if (!pq) in qib_user_sdma_queue_create()
192 pq->counter = 0; in qib_user_sdma_queue_create()
193 pq->sent_counter = 0; in qib_user_sdma_queue_create()
194 pq->num_pending = 0; in qib_user_sdma_queue_create()
195 pq->num_sending = 0; in qib_user_sdma_queue_create()
196 pq->added = 0; in qib_user_sdma_queue_create()
197 pq->sdma_rb_node = NULL; in qib_user_sdma_queue_create()
199 INIT_LIST_HEAD(&pq->sent); in qib_user_sdma_queue_create()
200 spin_lock_init(&pq->sent_lock); in qib_user_sdma_queue_create()
201 mutex_init(&pq->lock); in qib_user_sdma_queue_create()
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), in qib_user_sdma_queue_create()
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, in qib_user_sdma_queue_create()
209 if (!pq->pkt_slab) in qib_user_sdma_queue_create()
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), in qib_user_sdma_queue_create()
214 pq->header_cache = dma_pool_create(pq->header_cache_name, in qib_user_sdma_queue_create()
218 if (!pq->header_cache) in qib_user_sdma_queue_create()
221 pq->dma_pages_root = RB_ROOT; in qib_user_sdma_queue_create()
242 pq->sdma_rb_node = sdma_rb_node; in qib_user_sdma_queue_create()
247 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_create()
249 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_create()
251 kfree(pq); in qib_user_sdma_queue_create()
252 pq = NULL; in qib_user_sdma_queue_create()
255 return pq; in qib_user_sdma_queue_create()
277 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq, in qib_user_sdma_alloc_header() argument
283 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, in qib_user_sdma_alloc_header()
300 struct qib_user_sdma_queue *pq, in qib_user_sdma_page_to_frags() argument
437 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr); in qib_user_sdma_page_to_frags()
562 struct qib_user_sdma_queue *pq, in qib_user_sdma_coalesce() argument
595 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
620 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_frag() argument
652 dma_pool_free(pq->header_cache, in qib_user_sdma_free_pkt_frag()
663 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pages() argument
691 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
720 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pkt() argument
732 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
743 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
762 struct qib_user_sdma_queue *pq, in qib_user_sdma_init_payload() argument
771 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
773 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
780 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_list() argument
789 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
794 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
808 struct qib_user_sdma_queue *pq, in qib_user_sdma_queue_pkts() argument
822 u32 counter = pq->counter; in qib_user_sdma_queue_pkts()
845 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr); in qib_user_sdma_queue_pkts()
962 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
987 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
1016 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1031 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1034 dma_pool_free(pq->header_cache, pbc, dma_addr); in qib_user_sdma_queue_pkts()
1038 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); in qib_user_sdma_queue_pkts()
1043 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, in qib_user_sdma_set_complete_counter() argument
1046 pq->sent_counter = c; in qib_user_sdma_set_complete_counter()
1051 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_clean() argument
1060 if (!pq->num_sending) in qib_user_sdma_queue_clean()
1070 spin_lock_irqsave(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1071 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1081 pq->num_sending--; in qib_user_sdma_queue_clean()
1083 spin_unlock_irqrestore(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1092 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_clean()
1093 qib_user_sdma_set_complete_counter(pq, counter); in qib_user_sdma_queue_clean()
1099 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_destroy() argument
1101 if (!pq) in qib_user_sdma_queue_destroy()
1104 pq->sdma_rb_node->refcount--; in qib_user_sdma_queue_destroy()
1105 if (pq->sdma_rb_node->refcount == 0) { in qib_user_sdma_queue_destroy()
1106 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); in qib_user_sdma_queue_destroy()
1107 kfree(pq->sdma_rb_node); in qib_user_sdma_queue_destroy()
1109 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_destroy()
1110 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_destroy()
1111 kfree(pq); in qib_user_sdma_queue_destroy()
1129 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_drain() argument
1135 if (!pq) in qib_user_sdma_queue_drain()
1139 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1140 if (!pq->num_pending && !pq->num_sending) { in qib_user_sdma_queue_drain()
1141 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1145 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_queue_drain()
1146 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1150 if (pq->num_pending || pq->num_sending) { in qib_user_sdma_queue_drain()
1155 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1160 if (pq->num_pending) { in qib_user_sdma_queue_drain()
1163 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1164 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1165 pq->num_pending--; in qib_user_sdma_queue_drain()
1166 pq->num_sending++; in qib_user_sdma_queue_drain()
1174 list_splice_init(&pq->sent, &free_list); in qib_user_sdma_queue_drain()
1175 pq->num_sending = 0; in qib_user_sdma_queue_drain()
1176 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_drain()
1177 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1302 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1303 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1304 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1305 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1306 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1307 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1325 struct qib_user_sdma_queue *pq, in qib_user_sdma_push_pkts() argument
1334 if (pq->sdma_rb_node->refcount > 1) { in qib_user_sdma_push_pkts()
1340 pq->num_pending += count; in qib_user_sdma_push_pkts()
1354 pq->num_pending += count; in qib_user_sdma_push_pkts()
1377 struct qib_user_sdma_queue *pq, in qib_user_sdma_writev() argument
1389 mutex_lock(&pq->lock); in qib_user_sdma_writev()
1396 if (pq->added > ppd->sdma_descq_removed) in qib_user_sdma_writev()
1399 if (pq->num_sending) in qib_user_sdma_writev()
1400 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1406 ret = qib_user_sdma_queue_pkts(dd, ppd, pq, in qib_user_sdma_writev()
1422 if (pq->num_sending) in qib_user_sdma_writev()
1423 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1426 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); in qib_user_sdma_writev()
1431 pq->counter += mxp; in qib_user_sdma_writev()
1438 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); in qib_user_sdma_writev()
1439 mutex_unlock(&pq->lock); in qib_user_sdma_writev()
1445 struct qib_user_sdma_queue *pq) in qib_user_sdma_make_progress() argument
1449 mutex_lock(&pq->lock); in qib_user_sdma_make_progress()
1451 ret = qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_make_progress()
1452 mutex_unlock(&pq->lock); in qib_user_sdma_make_progress()
1457 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) in qib_user_sdma_complete_counter() argument
1459 return pq ? pq->sent_counter : 0; in qib_user_sdma_complete_counter()
1462 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) in qib_user_sdma_inflight_counter() argument
1464 return pq ? pq->counter : 0; in qib_user_sdma_inflight_counter()