Lines Matching refs:dd
175 static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd, in ipath_user_sdma_coalesce() argument
208 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, in ipath_user_sdma_coalesce()
210 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { in ipath_user_sdma_coalesce()
275 static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, in ipath_user_sdma_pin_pages() argument
299 dma_map_page(&dd->pcidev->dev, in ipath_user_sdma_pin_pages()
303 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { in ipath_user_sdma_pin_pages()
321 static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd, in ipath_user_sdma_pin_pkt() argument
334 ret = ipath_user_sdma_pin_pages(dd, pkt, in ipath_user_sdma_pin_pkt()
345 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in ipath_user_sdma_pin_pkt()
351 static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd, in ipath_user_sdma_init_payload() argument
360 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov); in ipath_user_sdma_init_payload()
362 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in ipath_user_sdma_init_payload()
391 static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd, in ipath_user_sdma_queue_pkts() argument
508 dma_addr = dma_map_page(&dd->pcidev->dev, in ipath_user_sdma_queue_pkts()
510 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { in ipath_user_sdma_queue_pkts()
522 ret = ipath_user_sdma_init_payload(dd, pq, pkt, in ipath_user_sdma_queue_pkts()
540 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE); in ipath_user_sdma_queue_pkts()
550 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); in ipath_user_sdma_queue_pkts()
562 static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd, in ipath_user_sdma_queue_clean() argument
573 s64 descd = dd->ipath_sdma_descq_removed - pkt->added; in ipath_user_sdma_queue_clean()
591 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in ipath_user_sdma_queue_clean()
609 static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd) in ipath_user_sdma_hwqueue_clean() argument
614 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in ipath_user_sdma_hwqueue_clean()
615 ret = ipath_sdma_make_progress(dd); in ipath_user_sdma_hwqueue_clean()
616 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in ipath_user_sdma_hwqueue_clean()
622 void ipath_user_sdma_queue_drain(struct ipath_devdata *dd, in ipath_user_sdma_queue_drain() argument
636 ipath_user_sdma_hwqueue_clean(dd); in ipath_user_sdma_queue_drain()
637 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_queue_drain()
649 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in ipath_user_sdma_queue_drain()
654 static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd, in ipath_sdma_make_desc0() argument
660 ((dd->ipath_sdma_generation & 3ULL) << 30) | in ipath_sdma_make_desc0()
684 static void ipath_user_sdma_send_frag(struct ipath_devdata *dd, in ipath_user_sdma_send_frag() argument
694 descqp = &dd->ipath_sdma_descq[tail].qw[0]; in ipath_user_sdma_send_frag()
696 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs); in ipath_user_sdma_send_frag()
707 static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd, in ipath_user_sdma_push_pkts() argument
718 if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE))) in ipath_user_sdma_push_pkts()
721 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); in ipath_user_sdma_push_pkts()
723 if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) { in ipath_user_sdma_push_pkts()
728 tail = dd->ipath_sdma_descq_tail; in ipath_user_sdma_push_pkts()
737 if (pkt->naddr > ipath_sdma_descq_freecnt(dd)) in ipath_user_sdma_push_pkts()
741 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail); in ipath_user_sdma_push_pkts()
744 if (++tail == dd->ipath_sdma_descq_cnt) { in ipath_user_sdma_push_pkts()
746 ++dd->ipath_sdma_generation; in ipath_user_sdma_push_pkts()
750 if ((ofs<<2) > dd->ipath_ibmaxlen) { in ipath_user_sdma_push_pkts()
752 ofs<<2, dd->ipath_ibmaxlen); in ipath_user_sdma_push_pkts()
764 dd->ipath_sdma_descq[dtail].qw[0] |= in ipath_user_sdma_push_pkts()
766 if (++dtail == dd->ipath_sdma_descq_cnt) in ipath_user_sdma_push_pkts()
771 dd->ipath_sdma_descq_added += pkt->naddr; in ipath_user_sdma_push_pkts()
772 pkt->added = dd->ipath_sdma_descq_added; in ipath_user_sdma_push_pkts()
779 if (dd->ipath_sdma_descq_tail != tail) { in ipath_user_sdma_push_pkts()
781 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); in ipath_user_sdma_push_pkts()
782 dd->ipath_sdma_descq_tail = tail; in ipath_user_sdma_push_pkts()
786 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); in ipath_user_sdma_push_pkts()
791 int ipath_user_sdma_writev(struct ipath_devdata *dd, in ipath_user_sdma_writev() argument
804 if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) { in ipath_user_sdma_writev()
805 ipath_user_sdma_hwqueue_clean(dd); in ipath_user_sdma_writev()
806 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_writev()
812 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp); in ipath_user_sdma_writev()
827 if (ipath_sdma_descq_freecnt(dd) < ret * 4) { in ipath_user_sdma_writev()
828 ipath_user_sdma_hwqueue_clean(dd); in ipath_user_sdma_writev()
829 ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_writev()
832 ret = ipath_user_sdma_push_pkts(dd, pq, &list); in ipath_user_sdma_writev()
847 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); in ipath_user_sdma_writev()
853 int ipath_user_sdma_make_progress(struct ipath_devdata *dd, in ipath_user_sdma_make_progress() argument
859 ipath_user_sdma_hwqueue_clean(dd); in ipath_user_sdma_make_progress()
860 ret = ipath_user_sdma_queue_clean(dd, pq); in ipath_user_sdma_make_progress()