Searched refs:rx_q (Results 1 – 14 of 14) sorted by relevance
172 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_rx_get_pending_entry()193 struct mt7601u_rx_queue *q = &dev->rx_q; in mt7601u_complete_rx()370 for (i = 0; i < dev->rx_q.entries; i++) { in mt7601u_kill_rx()371 int next = dev->rx_q.end; in mt7601u_kill_rx()374 usb_poison_urb(dev->rx_q.e[next].urb); in mt7601u_kill_rx()406 for (i = 0; i < dev->rx_q.entries; i++) { in mt7601u_submit_rx()407 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); in mt7601u_submit_rx()419 for (i = 0; i < dev->rx_q.entries; i++) { in mt7601u_free_rx()420 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); in mt7601u_free_rx()421 usb_free_urb(dev->rx_q.e[i].urb); in mt7601u_free_rx()[all …]
213 struct mt7601u_rx_queue rx_q; member
303 #define HWM_GET_RX_USED(smc) ((int)(smc)->hw.fp.rx_q[QUEUE_R1].rx_used)317 #define HWM_GET_RX_FREE(smc) ((int)(smc)->hw.fp.rx_q[QUEUE_R1].rx_free-1)333 (smc)->hw.fp.rx_q[QUEUE_R1].rx_curr_put351 if ((low_water) >= (smc)->hw.fp.rx_q[QUEUE_R1].rx_used) {\
196 struct s_smt_rx_queue rx_q[USED_QUEUES] ; member
86 struct sk_buff_head rx_q; member
414 while ((skb = skb_dequeue(&fw_dnld->rx_q))) { in fw_dnld_rx_work()466 skb_queue_head_init(&priv->fw_dnld.rx_q); in nfcmrvl_fw_dnld_init()486 skb_queue_tail(&priv->fw_dnld.rx_q, skb); in nfcmrvl_fw_dnld_recv_frame()
215 struct sk_buff_head rx_q; /* RX queue */ member
272 while ((skb = skb_dequeue(&fmdev->rx_q))) { in recv_tasklet()1465 skb_queue_tail(&fmdev->rx_q, skb); in fm_st_receive()1554 skb_queue_head_init(&fmdev->rx_q); in fmc_prepare()1607 skb_queue_purge(&fmdev->rx_q); in fmc_release()
449 skb_queue_purge(&ndev->rx_q); in nci_open_device()474 skb_queue_purge(&ndev->rx_q); in nci_close_device()1145 skb_queue_head_init(&ndev->rx_q); in nci_register_device()1214 skb_queue_tail(&ndev->rx_q, skb); in nci_recv_frame()1391 while ((skb = skb_dequeue(&ndev->rx_q))) { in nci_rx_work()
722 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) { in fddi_isr()1423 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; in hwm_rx_frag()1434 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; in hwm_rx_frag()1435 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; in hwm_rx_frag()1436 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; in hwm_rx_frag()
283 smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ; in init_rx()290 smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ; in init_rx()
231 struct sk_buff_head rx_q; member
1542 skb_queue_purge(&hdev->rx_q); in hci_dev_do_open()1728 skb_queue_purge(&hdev->rx_q); in hci_dev_do_close()1796 skb_queue_purge(&hdev->rx_q); in hci_dev_do_reset()3305 skb_queue_head_init(&hdev->rx_q); in hci_alloc_dev()3553 skb_queue_tail(&hdev->rx_q, skb); in hci_recv_frame()3569 skb_queue_tail(&hdev->rx_q, skb); in hci_recv_diag()4440 while ((skb = skb_dequeue(&hdev->rx_q))) { in hci_rx_work()
330 struct sk_buff_head rx_q; member