Lines Matching refs:tx_cb

1917 	struct ql_tx_buf_cb *tx_cb;  in ql_process_mac_tx_intr()  local
1925 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; in ql_process_mac_tx_intr()
1936 if (tx_cb->seg_count == 0) { in ql_process_mac_tx_intr()
1945 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_process_mac_tx_intr()
1946 dma_unmap_len(&tx_cb->map[0], maplen), in ql_process_mac_tx_intr()
1948 tx_cb->seg_count--; in ql_process_mac_tx_intr()
1949 if (tx_cb->seg_count) { in ql_process_mac_tx_intr()
1950 for (i = 1; i < tx_cb->seg_count; i++) { in ql_process_mac_tx_intr()
1952 dma_unmap_addr(&tx_cb->map[i], in ql_process_mac_tx_intr()
1954 dma_unmap_len(&tx_cb->map[i], maplen), in ql_process_mac_tx_intr()
1959 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; in ql_process_mac_tx_intr()
1962 dev_kfree_skb_irq(tx_cb->skb); in ql_process_mac_tx_intr()
1963 tx_cb->skb = NULL; in ql_process_mac_tx_intr()
2307 struct ql_tx_buf_cb *tx_cb, in ql_send_map() argument
2319 seg_cnt = tx_cb->seg_count; in ql_send_map()
2337 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2338 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); in ql_send_map()
2346 oal = tx_cb->oal; in ql_send_map()
2377 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2378 dma_unmap_len_set(&tx_cb->map[seg], maplen, in ql_send_map()
2399 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2400 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); in ql_send_map()
2414 oal = tx_cb->oal; in ql_send_map()
2428 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2429 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2436 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2437 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2442 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_send_map()
2443 dma_unmap_addr(&tx_cb->map[0], maplen), in ql_send_map()
2467 struct ql_tx_buf_cb *tx_cb; in ql3xxx_send() local
2474 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; in ql3xxx_send()
2475 tx_cb->seg_count = ql_get_seg_count(qdev, in ql3xxx_send()
2477 if (tx_cb->seg_count == -1) { in ql3xxx_send()
2482 mac_iocb_ptr = tx_cb->queue_entry; in ql3xxx_send()
2489 tx_cb->skb = skb; in ql3xxx_send()
2494 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { in ql3xxx_send()
2809 struct ql_tx_buf_cb *tx_cb; in ql_free_send_free_list() local
2812 tx_cb = &qdev->tx_buf[0]; in ql_free_send_free_list()
2814 kfree(tx_cb->oal); in ql_free_send_free_list()
2815 tx_cb->oal = NULL; in ql_free_send_free_list()
2816 tx_cb++; in ql_free_send_free_list()
2822 struct ql_tx_buf_cb *tx_cb; in ql_create_send_free_list() local
2829 tx_cb = &qdev->tx_buf[i]; in ql_create_send_free_list()
2830 tx_cb->skb = NULL; in ql_create_send_free_list()
2831 tx_cb->queue_entry = req_q_curr; in ql_create_send_free_list()
2833 tx_cb->oal = kmalloc(512, GFP_KERNEL); in ql_create_send_free_list()
2834 if (tx_cb->oal == NULL) in ql_create_send_free_list()
3626 struct ql_tx_buf_cb *tx_cb; in ql_reset_work() local
3640 tx_cb = &qdev->tx_buf[i]; in ql_reset_work()
3641 if (tx_cb->skb) { in ql_reset_work()
3645 dma_unmap_addr(&tx_cb->map[0], in ql_reset_work()
3647 dma_unmap_len(&tx_cb->map[0], maplen), in ql_reset_work()
3649 for (j = 1; j < tx_cb->seg_count; j++) { in ql_reset_work()
3651 dma_unmap_addr(&tx_cb->map[j], in ql_reset_work()
3653 dma_unmap_len(&tx_cb->map[j], in ql_reset_work()
3657 dev_kfree_skb(tx_cb->skb); in ql_reset_work()
3658 tx_cb->skb = NULL; in ql_reset_work()