Lines Matching refs:qp
106 struct ntb_transport_qp *qp; member
134 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
249 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument
439 struct ntb_transport_qp *qp; in debugfs_read() local
443 qp = filp->private_data; in debugfs_read()
445 if (!qp || !qp->link_is_up) in debugfs_read()
458 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read()
460 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read()
462 "rx_memcpy - \t%llu\n", qp->rx_memcpy); in debugfs_read()
464 "rx_async - \t%llu\n", qp->rx_async); in debugfs_read()
466 "rx_ring_empty - %llu\n", qp->rx_ring_empty); in debugfs_read()
468 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); in debugfs_read()
470 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); in debugfs_read()
472 "rx_err_ver - \t%llu\n", qp->rx_err_ver); in debugfs_read()
474 "rx_buff - \t0x%p\n", qp->rx_buff); in debugfs_read()
476 "rx_index - \t%u\n", qp->rx_index); in debugfs_read()
478 "rx_max_entry - \t%u\n\n", qp->rx_max_entry); in debugfs_read()
481 "tx_bytes - \t%llu\n", qp->tx_bytes); in debugfs_read()
483 "tx_pkts - \t%llu\n", qp->tx_pkts); in debugfs_read()
485 "tx_memcpy - \t%llu\n", qp->tx_memcpy); in debugfs_read()
487 "tx_async - \t%llu\n", qp->tx_async); in debugfs_read()
489 "tx_ring_full - \t%llu\n", qp->tx_ring_full); in debugfs_read()
491 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); in debugfs_read()
493 "tx_mw - \t0x%p\n", qp->tx_mw); in debugfs_read()
495 "tx_index (H) - \t%u\n", qp->tx_index); in debugfs_read()
498 qp->remote_rx_info->entry); in debugfs_read()
500 "tx_max_entry - \t%u\n", qp->tx_max_entry); in debugfs_read()
503 ntb_transport_tx_free_entry(qp)); in debugfs_read()
509 qp->tx_dma_chan ? "Yes" : "No"); in debugfs_read()
512 qp->rx_dma_chan ? "Yes" : "No"); in debugfs_read()
515 qp->link_is_up ? "Up" : "Down"); in debugfs_read()
587 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; in ntb_transport_setup_qp_mw() local
608 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); in ntb_transport_setup_qp_mw()
611 qp->remote_rx_info = qp->rx_buff + rx_size; in ntb_transport_setup_qp_mw()
614 qp->rx_max_frame = min(transport_mtu, rx_size / 2); in ntb_transport_setup_qp_mw()
615 qp->rx_max_entry = rx_size / qp->rx_max_frame; in ntb_transport_setup_qp_mw()
616 qp->rx_index = 0; in ntb_transport_setup_qp_mw()
618 qp->remote_rx_info->entry = qp->rx_max_entry - 1; in ntb_transport_setup_qp_mw()
621 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_setup_qp_mw()
622 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - in ntb_transport_setup_qp_mw()
627 qp->rx_pkts = 0; in ntb_transport_setup_qp_mw()
628 qp->tx_pkts = 0; in ntb_transport_setup_qp_mw()
629 qp->tx_index = 0; in ntb_transport_setup_qp_mw()
709 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) in ntb_qp_link_down_reset() argument
711 qp->link_is_up = false; in ntb_qp_link_down_reset()
713 qp->tx_index = 0; in ntb_qp_link_down_reset()
714 qp->rx_index = 0; in ntb_qp_link_down_reset()
715 qp->rx_bytes = 0; in ntb_qp_link_down_reset()
716 qp->rx_pkts = 0; in ntb_qp_link_down_reset()
717 qp->rx_ring_empty = 0; in ntb_qp_link_down_reset()
718 qp->rx_err_no_buf = 0; in ntb_qp_link_down_reset()
719 qp->rx_err_oflow = 0; in ntb_qp_link_down_reset()
720 qp->rx_err_ver = 0; in ntb_qp_link_down_reset()
721 qp->rx_memcpy = 0; in ntb_qp_link_down_reset()
722 qp->rx_async = 0; in ntb_qp_link_down_reset()
723 qp->tx_bytes = 0; in ntb_qp_link_down_reset()
724 qp->tx_pkts = 0; in ntb_qp_link_down_reset()
725 qp->tx_ring_full = 0; in ntb_qp_link_down_reset()
726 qp->tx_err_no_buf = 0; in ntb_qp_link_down_reset()
727 qp->tx_memcpy = 0; in ntb_qp_link_down_reset()
728 qp->tx_async = 0; in ntb_qp_link_down_reset()
731 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) in ntb_qp_link_cleanup() argument
733 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup()
736 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); in ntb_qp_link_cleanup()
738 cancel_delayed_work_sync(&qp->link_work); in ntb_qp_link_cleanup()
739 ntb_qp_link_down_reset(qp); in ntb_qp_link_cleanup()
741 if (qp->event_handler) in ntb_qp_link_cleanup()
742 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_cleanup()
747 struct ntb_transport_qp *qp = container_of(work, in ntb_qp_link_cleanup_work() local
750 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_cleanup_work()
752 ntb_qp_link_cleanup(qp); in ntb_qp_link_cleanup_work()
755 schedule_delayed_work(&qp->link_work, in ntb_qp_link_cleanup_work()
759 static void ntb_qp_link_down(struct ntb_transport_qp *qp) in ntb_qp_link_down() argument
761 schedule_work(&qp->link_cleanup); in ntb_qp_link_down()
766 struct ntb_transport_qp *qp; in ntb_transport_link_cleanup() local
775 qp = &nt->qp_vec[i]; in ntb_transport_link_cleanup()
776 ntb_qp_link_cleanup(qp); in ntb_transport_link_cleanup()
777 cancel_work_sync(&qp->link_cleanup); in ntb_transport_link_cleanup()
778 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_cleanup()
875 struct ntb_transport_qp *qp = &nt->qp_vec[i]; in ntb_transport_link_work() local
879 if (qp->client_ready) in ntb_transport_link_work()
880 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_work()
896 struct ntb_transport_qp *qp = container_of(work, in ntb_qp_link_work() local
899 struct pci_dev *pdev = qp->ndev->pdev; in ntb_qp_link_work()
900 struct ntb_transport_ctx *nt = qp->transport; in ntb_qp_link_work()
907 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); in ntb_qp_link_work()
914 if (val & BIT(qp->qp_num)) { in ntb_qp_link_work()
915 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); in ntb_qp_link_work()
916 qp->link_is_up = true; in ntb_qp_link_work()
918 if (qp->event_handler) in ntb_qp_link_work()
919 qp->event_handler(qp->cb_data, qp->link_is_up); in ntb_qp_link_work()
921 tasklet_schedule(&qp->rxc_db_work); in ntb_qp_link_work()
923 schedule_delayed_work(&qp->link_work, in ntb_qp_link_work()
930 struct ntb_transport_qp *qp; in ntb_transport_init_queue() local
942 qp = &nt->qp_vec[qp_num]; in ntb_transport_init_queue()
943 qp->qp_num = qp_num; in ntb_transport_init_queue()
944 qp->transport = nt; in ntb_transport_init_queue()
945 qp->ndev = nt->ndev; in ntb_transport_init_queue()
946 qp->client_ready = false; in ntb_transport_init_queue()
947 qp->event_handler = NULL; in ntb_transport_init_queue()
948 ntb_qp_link_down_reset(qp); in ntb_transport_init_queue()
961 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; in ntb_transport_init_queue()
962 if (!qp->tx_mw) in ntb_transport_init_queue()
965 qp->tx_mw_phys = mw_base + qp_offset; in ntb_transport_init_queue()
966 if (!qp->tx_mw_phys) in ntb_transport_init_queue()
970 qp->rx_info = qp->tx_mw + tx_size; in ntb_transport_init_queue()
973 qp->tx_max_frame = min(transport_mtu, tx_size / 2); in ntb_transport_init_queue()
974 qp->tx_max_entry = tx_size / qp->tx_max_frame; in ntb_transport_init_queue()
980 qp->debugfs_dir = debugfs_create_dir(debugfs_name, in ntb_transport_init_queue()
983 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, in ntb_transport_init_queue()
984 qp->debugfs_dir, qp, in ntb_transport_init_queue()
987 qp->debugfs_dir = NULL; in ntb_transport_init_queue()
988 qp->debugfs_stats = NULL; in ntb_transport_init_queue()
991 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); in ntb_transport_init_queue()
992 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); in ntb_transport_init_queue()
994 spin_lock_init(&qp->ntb_rx_q_lock); in ntb_transport_init_queue()
995 spin_lock_init(&qp->ntb_tx_free_q_lock); in ntb_transport_init_queue()
997 INIT_LIST_HEAD(&qp->rx_post_q); in ntb_transport_init_queue()
998 INIT_LIST_HEAD(&qp->rx_pend_q); in ntb_transport_init_queue()
999 INIT_LIST_HEAD(&qp->rx_free_q); in ntb_transport_init_queue()
1000 INIT_LIST_HEAD(&qp->tx_free_q); in ntb_transport_init_queue()
1002 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, in ntb_transport_init_queue()
1003 (unsigned long)qp); in ntb_transport_init_queue()
1132 struct ntb_transport_qp *qp; in ntb_transport_free() local
1144 qp = &nt->qp_vec[i]; in ntb_transport_free()
1146 ntb_transport_free_queue(qp); in ntb_transport_free()
1147 debugfs_remove_recursive(qp->debugfs_dir); in ntb_transport_free()
1165 static void ntb_complete_rxc(struct ntb_transport_qp *qp) in ntb_complete_rxc() argument
1172 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1174 while (!list_empty(&qp->rx_post_q)) { in ntb_complete_rxc()
1175 entry = list_first_entry(&qp->rx_post_q, in ntb_complete_rxc()
1181 iowrite32(entry->index, &qp->rx_info->entry); in ntb_complete_rxc()
1186 list_move_tail(&entry->entry, &qp->rx_free_q); in ntb_complete_rxc()
1188 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1190 if (qp->rx_handler && qp->client_ready) in ntb_complete_rxc()
1191 qp->rx_handler(qp, qp->cb_data, cb_data, len); in ntb_complete_rxc()
1193 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1196 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); in ntb_complete_rxc()
1205 ntb_complete_rxc(entry->qp); in ntb_rx_copy_callback()
1224 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx() local
1225 struct dma_chan *chan = qp->rx_dma_chan; in ntb_async_rx()
1282 qp->last_cookie = cookie; in ntb_async_rx()
1284 qp->rx_async++; in ntb_async_rx()
1294 qp->rx_memcpy++; in ntb_async_rx()
1297 static int ntb_process_rxc(struct ntb_transport_qp *qp) in ntb_process_rxc() argument
1303 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; in ntb_process_rxc()
1304 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); in ntb_process_rxc()
1306 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", in ntb_process_rxc()
1307 qp->qp_num, hdr->ver, hdr->len, hdr->flags); in ntb_process_rxc()
1310 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); in ntb_process_rxc()
1311 qp->rx_ring_empty++; in ntb_process_rxc()
1316 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); in ntb_process_rxc()
1317 ntb_qp_link_down(qp); in ntb_process_rxc()
1322 if (hdr->ver != (u32)qp->rx_pkts) { in ntb_process_rxc()
1323 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1325 qp->rx_pkts, hdr->ver); in ntb_process_rxc()
1326 qp->rx_err_ver++; in ntb_process_rxc()
1330 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); in ntb_process_rxc()
1332 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); in ntb_process_rxc()
1333 qp->rx_err_no_buf++; in ntb_process_rxc()
1338 entry->index = qp->rx_index; in ntb_process_rxc()
1341 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1344 qp->rx_err_oflow++; in ntb_process_rxc()
1349 ntb_complete_rxc(qp); in ntb_process_rxc()
1351 dev_dbg(&qp->ndev->pdev->dev, in ntb_process_rxc()
1353 qp->rx_index, hdr->ver, hdr->len, entry->len); in ntb_process_rxc()
1355 qp->rx_bytes += hdr->len; in ntb_process_rxc()
1356 qp->rx_pkts++; in ntb_process_rxc()
1363 qp->rx_index++; in ntb_process_rxc()
1364 qp->rx_index %= qp->rx_max_entry; in ntb_process_rxc()
1371 struct ntb_transport_qp *qp = (void *)data; in ntb_transport_rxc_db() local
1374 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", in ntb_transport_rxc_db()
1375 __func__, qp->qp_num); in ntb_transport_rxc_db()
1380 for (i = 0; i < qp->rx_max_entry; i++) { in ntb_transport_rxc_db()
1381 rc = ntb_process_rxc(qp); in ntb_transport_rxc_db()
1386 if (i && qp->rx_dma_chan) in ntb_transport_rxc_db()
1387 dma_async_issue_pending(qp->rx_dma_chan); in ntb_transport_rxc_db()
1389 if (i == qp->rx_max_entry) { in ntb_transport_rxc_db()
1391 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1392 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { in ntb_transport_rxc_db()
1394 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_transport_rxc_db()
1396 ntb_db_read(qp->ndev); in ntb_transport_rxc_db()
1402 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rxc_db()
1409 struct ntb_transport_qp *qp = entry->qp; in ntb_tx_copy_callback() local
1414 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); in ntb_tx_copy_callback()
1421 qp->tx_bytes += entry->len; in ntb_tx_copy_callback()
1423 if (qp->tx_handler) in ntb_tx_copy_callback()
1424 qp->tx_handler(qp, qp->cb_data, entry->cb_data, in ntb_tx_copy_callback()
1428 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); in ntb_tx_copy_callback()
1449 static void ntb_async_tx(struct ntb_transport_qp *qp, in ntb_async_tx() argument
1454 struct dma_chan *chan = qp->tx_dma_chan; in ntb_async_tx()
1464 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; in ntb_async_tx()
1465 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_async_tx()
1469 iowrite32((u32)qp->tx_pkts, &hdr->ver); in ntb_async_tx()
1478 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; in ntb_async_tx()
1513 qp->tx_async++; in ntb_async_tx()
1522 qp->tx_memcpy++; in ntb_async_tx()
1525 static int ntb_process_tx(struct ntb_transport_qp *qp, in ntb_process_tx() argument
1528 if (qp->tx_index == qp->remote_rx_info->entry) { in ntb_process_tx()
1529 qp->tx_ring_full++; in ntb_process_tx()
1533 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { in ntb_process_tx()
1534 if (qp->tx_handler) in ntb_process_tx()
1535 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); in ntb_process_tx()
1537 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_process_tx()
1538 &qp->tx_free_q); in ntb_process_tx()
1542 ntb_async_tx(qp, entry); in ntb_process_tx()
1544 qp->tx_index++; in ntb_process_tx()
1545 qp->tx_index %= qp->tx_max_entry; in ntb_process_tx()
1547 qp->tx_pkts++; in ntb_process_tx()
1552 static void ntb_send_link_down(struct ntb_transport_qp *qp) in ntb_send_link_down() argument
1554 struct pci_dev *pdev = qp->ndev->pdev; in ntb_send_link_down()
1558 if (!qp->link_is_up) in ntb_send_link_down()
1561 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); in ntb_send_link_down()
1564 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_send_link_down()
1578 rc = ntb_process_tx(qp, entry); in ntb_send_link_down()
1581 qp->qp_num); in ntb_send_link_down()
1583 ntb_qp_link_down_reset(qp); in ntb_send_link_down()
1613 struct ntb_transport_qp *qp; in ntb_transport_create_queue() local
1633 qp = &nt->qp_vec[free_queue]; in ntb_transport_create_queue()
1634 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_create_queue()
1638 qp->cb_data = data; in ntb_transport_create_queue()
1639 qp->rx_handler = handlers->rx_handler; in ntb_transport_create_queue()
1640 qp->tx_handler = handlers->tx_handler; in ntb_transport_create_queue()
1641 qp->event_handler = handlers->event_handler; in ntb_transport_create_queue()
1647 qp->tx_dma_chan = in ntb_transport_create_queue()
1650 if (!qp->tx_dma_chan) in ntb_transport_create_queue()
1653 qp->rx_dma_chan = in ntb_transport_create_queue()
1656 if (!qp->rx_dma_chan) in ntb_transport_create_queue()
1659 qp->tx_dma_chan = NULL; in ntb_transport_create_queue()
1660 qp->rx_dma_chan = NULL; in ntb_transport_create_queue()
1664 qp->tx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
1667 qp->rx_dma_chan ? "DMA" : "CPU"); in ntb_transport_create_queue()
1674 entry->qp = qp; in ntb_transport_create_queue()
1675 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_create_queue()
1676 &qp->rx_free_q); in ntb_transport_create_queue()
1684 entry->qp = qp; in ntb_transport_create_queue()
1685 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_create_queue()
1686 &qp->tx_free_q); in ntb_transport_create_queue()
1689 ntb_db_clear(qp->ndev, qp_bit); in ntb_transport_create_queue()
1690 ntb_db_clear_mask(qp->ndev, qp_bit); in ntb_transport_create_queue()
1692 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); in ntb_transport_create_queue()
1694 return qp; in ntb_transport_create_queue()
1697 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_create_queue()
1700 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_create_queue()
1702 if (qp->tx_dma_chan) in ntb_transport_create_queue()
1703 dma_release_channel(qp->tx_dma_chan); in ntb_transport_create_queue()
1704 if (qp->rx_dma_chan) in ntb_transport_create_queue()
1705 dma_release_channel(qp->rx_dma_chan); in ntb_transport_create_queue()
1718 void ntb_transport_free_queue(struct ntb_transport_qp *qp) in ntb_transport_free_queue() argument
1724 if (!qp) in ntb_transport_free_queue()
1727 pdev = qp->ndev->pdev; in ntb_transport_free_queue()
1729 if (qp->tx_dma_chan) { in ntb_transport_free_queue()
1730 struct dma_chan *chan = qp->tx_dma_chan; in ntb_transport_free_queue()
1734 qp->tx_dma_chan = NULL; in ntb_transport_free_queue()
1739 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
1744 if (qp->rx_dma_chan) { in ntb_transport_free_queue()
1745 struct dma_chan *chan = qp->rx_dma_chan; in ntb_transport_free_queue()
1749 qp->rx_dma_chan = NULL; in ntb_transport_free_queue()
1754 dma_sync_wait(chan, qp->last_cookie); in ntb_transport_free_queue()
1759 qp_bit = BIT_ULL(qp->qp_num); in ntb_transport_free_queue()
1761 ntb_db_set_mask(qp->ndev, qp_bit); in ntb_transport_free_queue()
1762 tasklet_disable(&qp->rxc_db_work); in ntb_transport_free_queue()
1764 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_free_queue()
1766 qp->cb_data = NULL; in ntb_transport_free_queue()
1767 qp->rx_handler = NULL; in ntb_transport_free_queue()
1768 qp->tx_handler = NULL; in ntb_transport_free_queue()
1769 qp->event_handler = NULL; in ntb_transport_free_queue()
1771 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_free_queue()
1774 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { in ntb_transport_free_queue()
1779 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { in ntb_transport_free_queue()
1784 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_free_queue()
1787 qp->transport->qp_bitmap_free |= qp_bit; in ntb_transport_free_queue()
1789 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); in ntb_transport_free_queue()
1803 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) in ntb_transport_rx_remove() argument
1808 if (!qp || qp->client_ready) in ntb_transport_rx_remove()
1811 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); in ntb_transport_rx_remove()
1818 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); in ntb_transport_rx_remove()
1836 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, in ntb_transport_rx_enqueue() argument
1841 if (!qp) in ntb_transport_rx_enqueue()
1844 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); in ntb_transport_rx_enqueue()
1853 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); in ntb_transport_rx_enqueue()
1855 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_rx_enqueue()
1874 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, in ntb_transport_tx_enqueue() argument
1880 if (!qp || !qp->link_is_up || !len) in ntb_transport_tx_enqueue()
1883 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_transport_tx_enqueue()
1885 qp->tx_err_no_buf++; in ntb_transport_tx_enqueue()
1894 rc = ntb_process_tx(qp, entry); in ntb_transport_tx_enqueue()
1896 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_tx_enqueue()
1897 &qp->tx_free_q); in ntb_transport_tx_enqueue()
1909 void ntb_transport_link_up(struct ntb_transport_qp *qp) in ntb_transport_link_up() argument
1911 if (!qp) in ntb_transport_link_up()
1914 qp->client_ready = true; in ntb_transport_link_up()
1916 if (qp->transport->link_is_up) in ntb_transport_link_up()
1917 schedule_delayed_work(&qp->link_work, 0); in ntb_transport_link_up()
1929 void ntb_transport_link_down(struct ntb_transport_qp *qp) in ntb_transport_link_down() argument
1933 if (!qp) in ntb_transport_link_down()
1936 qp->client_ready = false; in ntb_transport_link_down()
1938 val = ntb_spad_read(qp->ndev, QP_LINKS); in ntb_transport_link_down()
1940 ntb_peer_spad_write(qp->ndev, QP_LINKS, in ntb_transport_link_down()
1941 val & ~BIT(qp->qp_num)); in ntb_transport_link_down()
1943 if (qp->link_is_up) in ntb_transport_link_down()
1944 ntb_send_link_down(qp); in ntb_transport_link_down()
1946 cancel_delayed_work_sync(&qp->link_work); in ntb_transport_link_down()
1958 bool ntb_transport_link_query(struct ntb_transport_qp *qp) in ntb_transport_link_query() argument
1960 if (!qp) in ntb_transport_link_query()
1963 return qp->link_is_up; in ntb_transport_link_query()
1975 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) in ntb_transport_qp_num() argument
1977 if (!qp) in ntb_transport_qp_num()
1980 return qp->qp_num; in ntb_transport_qp_num()
1992 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) in ntb_transport_max_size() argument
1998 if (!qp) in ntb_transport_max_size()
2001 rx_chan = qp->rx_dma_chan; in ntb_transport_max_size()
2002 tx_chan = qp->tx_dma_chan; in ntb_transport_max_size()
2008 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); in ntb_transport_max_size()
2015 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) in ntb_transport_tx_free_entry() argument
2017 unsigned int head = qp->tx_index; in ntb_transport_tx_free_entry()
2018 unsigned int tail = qp->remote_rx_info->entry; in ntb_transport_tx_free_entry()
2020 return tail > head ? tail - head : qp->tx_max_entry + tail - head; in ntb_transport_tx_free_entry()
2027 struct ntb_transport_qp *qp; in ntb_transport_doorbell_callback() local
2036 qp = &nt->qp_vec[qp_num]; in ntb_transport_doorbell_callback()
2038 tasklet_schedule(&qp->rxc_db_work); in ntb_transport_doorbell_callback()