Lines Matching refs:tx_cb
1915 struct ql_tx_buf_cb *tx_cb; in ql_process_mac_tx_intr() local
1923 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; in ql_process_mac_tx_intr()
1934 if (tx_cb->seg_count == 0) { in ql_process_mac_tx_intr()
1943 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_process_mac_tx_intr()
1944 dma_unmap_len(&tx_cb->map[0], maplen), in ql_process_mac_tx_intr()
1946 tx_cb->seg_count--; in ql_process_mac_tx_intr()
1947 if (tx_cb->seg_count) { in ql_process_mac_tx_intr()
1948 for (i = 1; i < tx_cb->seg_count; i++) { in ql_process_mac_tx_intr()
1950 dma_unmap_addr(&tx_cb->map[i], in ql_process_mac_tx_intr()
1952 dma_unmap_len(&tx_cb->map[i], maplen), in ql_process_mac_tx_intr()
1957 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; in ql_process_mac_tx_intr()
1960 dev_kfree_skb_irq(tx_cb->skb); in ql_process_mac_tx_intr()
1961 tx_cb->skb = NULL; in ql_process_mac_tx_intr()
2305 struct ql_tx_buf_cb *tx_cb, in ql_send_map() argument
2317 seg_cnt = tx_cb->seg_count; in ql_send_map()
2335 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2336 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); in ql_send_map()
2344 oal = tx_cb->oal; in ql_send_map()
2375 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2376 dma_unmap_len_set(&tx_cb->map[seg], maplen, in ql_send_map()
2397 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2398 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); in ql_send_map()
2412 oal = tx_cb->oal; in ql_send_map()
2426 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2427 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2435 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2440 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_send_map()
2441 dma_unmap_addr(&tx_cb->map[0], maplen), in ql_send_map()
2465 struct ql_tx_buf_cb *tx_cb; in ql3xxx_send() local
2472 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; in ql3xxx_send()
2473 tx_cb->seg_count = ql_get_seg_count(qdev, in ql3xxx_send()
2475 if (tx_cb->seg_count == -1) { in ql3xxx_send()
2480 mac_iocb_ptr = tx_cb->queue_entry; in ql3xxx_send()
2487 tx_cb->skb = skb; in ql3xxx_send()
2492 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { in ql3xxx_send()
2807 struct ql_tx_buf_cb *tx_cb; in ql_free_send_free_list() local
2810 tx_cb = &qdev->tx_buf[0]; in ql_free_send_free_list()
2812 kfree(tx_cb->oal); in ql_free_send_free_list()
2813 tx_cb->oal = NULL; in ql_free_send_free_list()
2814 tx_cb++; in ql_free_send_free_list()
2820 struct ql_tx_buf_cb *tx_cb; in ql_create_send_free_list() local
2827 tx_cb = &qdev->tx_buf[i]; in ql_create_send_free_list()
2828 tx_cb->skb = NULL; in ql_create_send_free_list()
2829 tx_cb->queue_entry = req_q_curr; in ql_create_send_free_list()
2831 tx_cb->oal = kmalloc(512, GFP_KERNEL); in ql_create_send_free_list()
2832 if (tx_cb->oal == NULL) in ql_create_send_free_list()
3624 struct ql_tx_buf_cb *tx_cb; in ql_reset_work() local
3638 tx_cb = &qdev->tx_buf[i]; in ql_reset_work()
3639 if (tx_cb->skb) { in ql_reset_work()
3643 dma_unmap_addr(&tx_cb->map[0], in ql_reset_work()
3645 dma_unmap_len(&tx_cb->map[0], maplen), in ql_reset_work()
3647 for (j = 1; j < tx_cb->seg_count; j++) { in ql_reset_work()
3649 dma_unmap_addr(&tx_cb->map[j], in ql_reset_work()
3651 dma_unmap_len(&tx_cb->map[j], in ql_reset_work()
3655 dev_kfree_skb(tx_cb->skb); in ql_reset_work()
3656 tx_cb->skb = NULL; in ql_reset_work()