Lines Matching refs:pkt

258 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,  in qib_user_sdma_init_frag()  argument
265 pkt->addr[i].offset = offset; in qib_user_sdma_init_frag()
266 pkt->addr[i].length = len; in qib_user_sdma_init_frag()
267 pkt->addr[i].first_desc = first_desc; in qib_user_sdma_init_frag()
268 pkt->addr[i].last_desc = last_desc; in qib_user_sdma_init_frag()
269 pkt->addr[i].put_page = put_page; in qib_user_sdma_init_frag()
270 pkt->addr[i].dma_mapped = dma_mapped; in qib_user_sdma_init_frag()
271 pkt->addr[i].page = page; in qib_user_sdma_init_frag()
272 pkt->addr[i].kvaddr = kvaddr; in qib_user_sdma_init_frag()
273 pkt->addr[i].addr = dma_addr; in qib_user_sdma_init_frag()
274 pkt->addr[i].dma_length = dma_length; in qib_user_sdma_init_frag()
301 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_page_to_frags() argument
343 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
344 newlen = pkt->tidsm[pkt->tidsmidx].length; in qib_user_sdma_page_to_frags()
356 if ((pkt->payload_size + newlen) >= pkt->frag_size) { in qib_user_sdma_page_to_frags()
357 newlen = pkt->frag_size - pkt->payload_size; in qib_user_sdma_page_to_frags()
359 } else if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
360 if (newlen == pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
363 if (newlen == pkt->bytes_togo) in qib_user_sdma_page_to_frags()
368 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
374 pkt->bytes_togo -= newlen; in qib_user_sdma_page_to_frags()
375 pkt->payload_size += newlen; in qib_user_sdma_page_to_frags()
376 pkt->naddr++; in qib_user_sdma_page_to_frags()
377 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
383 if (pkt->bytes_togo == 0) { in qib_user_sdma_page_to_frags()
386 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
387 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
389 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
390 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
393 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
397 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
404 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
405 pkt->tidsm[pkt->tidsmidx].length -= newlen; in qib_user_sdma_page_to_frags()
406 if (pkt->tidsm[pkt->tidsmidx].length) { in qib_user_sdma_page_to_frags()
407 pkt->tidsm[pkt->tidsmidx].offset += newlen; in qib_user_sdma_page_to_frags()
409 pkt->tidsmidx++; in qib_user_sdma_page_to_frags()
410 if (pkt->tidsmidx == pkt->tidsmcount) { in qib_user_sdma_page_to_frags()
436 pbclen = pkt->addr[pkt->index].length; in qib_user_sdma_page_to_frags()
443 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr; in qib_user_sdma_page_to_frags()
450 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2)); in qib_user_sdma_page_to_frags()
455 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
477 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
478 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
480 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
481 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
484 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
488 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
496 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2)); in qib_user_sdma_page_to_frags()
501 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
505 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) + in qib_user_sdma_page_to_frags()
506 (pkt->tidsm[pkt->tidsmidx].offset>>2)); in qib_user_sdma_page_to_frags()
509 hdr->uwords[2] += pkt->payload_size; in qib_user_sdma_page_to_frags()
521 if (pkt->tiddma) in qib_user_sdma_page_to_frags()
524 seqnum.pkt++; in qib_user_sdma_page_to_frags()
528 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
534 pkt->index = pkt->naddr; in qib_user_sdma_page_to_frags()
535 pkt->payload_size = 0; in qib_user_sdma_page_to_frags()
536 pkt->naddr++; in qib_user_sdma_page_to_frags()
537 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
563 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_coalesce() argument
595 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
621 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_free_pkt_frag() argument
626 if (pkt->addr[i].page) { in qib_user_sdma_free_pkt_frag()
628 if (pkt->addr[i].dma_mapped) in qib_user_sdma_free_pkt_frag()
630 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
631 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
634 if (pkt->addr[i].kvaddr) in qib_user_sdma_free_pkt_frag()
635 kunmap(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
637 if (pkt->addr[i].put_page) in qib_user_sdma_free_pkt_frag()
638 put_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
640 __free_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
641 } else if (pkt->addr[i].kvaddr) { in qib_user_sdma_free_pkt_frag()
643 if (pkt->addr[i].dma_mapped) { in qib_user_sdma_free_pkt_frag()
646 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
647 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
649 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
650 } else if (pkt->addr[i].addr) { in qib_user_sdma_free_pkt_frag()
653 pkt->addr[i].kvaddr, pkt->addr[i].addr); in qib_user_sdma_free_pkt_frag()
656 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
664 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pages() argument
691 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
721 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pkt() argument
732 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
742 for (idx = 1; idx < pkt->naddr; idx++) in qib_user_sdma_pin_pkt()
743 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
748 if (pkt->addr[0].dma_mapped) { in qib_user_sdma_pin_pkt()
750 pkt->addr[0].addr, in qib_user_sdma_pin_pkt()
751 pkt->addr[0].dma_length, in qib_user_sdma_pin_pkt()
753 pkt->addr[0].addr = 0; in qib_user_sdma_pin_pkt()
754 pkt->addr[0].dma_mapped = 0; in qib_user_sdma_pin_pkt()
763 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_init_payload() argument
769 if (pkt->frag_size == pkt->bytes_togo && in qib_user_sdma_init_payload()
770 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
771 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
773 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
783 struct qib_user_sdma_pkt *pkt, *pkt_next; in qib_user_sdma_free_pkt_list() local
785 list_for_each_entry_safe(pkt, pkt_next, list, list) { in qib_user_sdma_free_pkt_list()
788 for (i = 0; i < pkt->naddr; i++) in qib_user_sdma_free_pkt_list()
789 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
791 if (pkt->largepkt) in qib_user_sdma_free_pkt_list()
792 kfree(pkt); in qib_user_sdma_free_pkt_list()
794 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
819 struct qib_user_sdma_pkt *pkt = NULL; in qib_user_sdma_queue_pkts() local
914 pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n; in qib_user_sdma_queue_pkts()
929 pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); in qib_user_sdma_queue_pkts()
930 if (!pkt) { in qib_user_sdma_queue_pkts()
934 pkt->largepkt = 1; in qib_user_sdma_queue_pkts()
935 pkt->frag_size = frag_size; in qib_user_sdma_queue_pkts()
936 pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
939 char *tidsm = (char *)pkt + pktsize; in qib_user_sdma_queue_pkts()
947 pkt->tidsm = in qib_user_sdma_queue_pkts()
949 pkt->tidsmcount = tidsmsize/ in qib_user_sdma_queue_pkts()
951 pkt->tidsmidx = 0; in qib_user_sdma_queue_pkts()
962 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
963 if (!pkt) { in qib_user_sdma_queue_pkts()
967 pkt->largepkt = 0; in qib_user_sdma_queue_pkts()
968 pkt->frag_size = bytes_togo; in qib_user_sdma_queue_pkts()
969 pkt->addrlimit = ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
971 pkt->bytes_togo = bytes_togo; in qib_user_sdma_queue_pkts()
972 pkt->payload_size = 0; in qib_user_sdma_queue_pkts()
973 pkt->counter = counter; in qib_user_sdma_queue_pkts()
974 pkt->tiddma = tiddma; in qib_user_sdma_queue_pkts()
977 qib_user_sdma_init_frag(pkt, 0, /* index */ in qib_user_sdma_queue_pkts()
983 pkt->index = 0; in qib_user_sdma_queue_pkts()
984 pkt->naddr = 1; in qib_user_sdma_queue_pkts()
987 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
995 pkt->addr[0].last_desc = 1; in qib_user_sdma_queue_pkts()
1009 pkt->addr[0].addr = dma_addr; in qib_user_sdma_queue_pkts()
1010 pkt->addr[0].dma_mapped = 1; in qib_user_sdma_queue_pkts()
1016 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1017 pkt->index = 0; /* reset index for push on hw */ in qib_user_sdma_queue_pkts()
1018 *ndesc += pkt->naddr; in qib_user_sdma_queue_pkts()
1020 list_add_tail(&pkt->list, list); in qib_user_sdma_queue_pkts()
1028 if (pkt->largepkt) in qib_user_sdma_queue_pkts()
1029 kfree(pkt); in qib_user_sdma_queue_pkts()
1031 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1055 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_clean() local
1071 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1072 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
1077 list_move_tail(&pkt->list, &free_list); in qib_user_sdma_queue_clean()
1088 pkt = list_entry(free_list.prev, in qib_user_sdma_queue_clean()
1090 counter = pkt->counter; in qib_user_sdma_queue_clean()
1151 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_drain() local
1161 list_for_each_entry_safe(pkt, pkt_prev, in qib_user_sdma_queue_drain()
1163 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1164 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1212 struct qib_user_sdma_pkt *pkt, int idx, in qib_user_sdma_send_frag() argument
1215 const u64 addr = (u64) pkt->addr[idx].addr + in qib_user_sdma_send_frag()
1216 (u64) pkt->addr[idx].offset; in qib_user_sdma_send_frag()
1217 const u64 dwlen = (u64) pkt->addr[idx].length / 4; in qib_user_sdma_send_frag()
1224 if (pkt->addr[idx].first_desc) in qib_user_sdma_send_frag()
1226 if (pkt->addr[idx].last_desc) { in qib_user_sdma_send_frag()
1255 struct qib_user_sdma_pkt *pkt = in qib_user_sdma_send_desc() local
1262 for (i = pkt->index; i < pkt->naddr && nfree; i++) { in qib_user_sdma_send_desc()
1263 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); in qib_user_sdma_send_desc()
1264 ofs += pkt->addr[i].length >> 2; in qib_user_sdma_send_desc()
1274 if (pkt->addr[i].last_desc == 0) in qib_user_sdma_send_desc()
1284 for (j = pkt->index; j <= i; j++) { in qib_user_sdma_send_desc()
1291 c += i + 1 - pkt->index; in qib_user_sdma_send_desc()
1292 pkt->index = i + 1; /* index for next first */ in qib_user_sdma_send_desc()
1300 if (pkt->index == pkt->naddr) { in qib_user_sdma_send_desc()
1301 pkt->added = ppd->sdma_descq_added; in qib_user_sdma_send_desc()
1302 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1303 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1304 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1305 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1306 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1307 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()