Lines Matching refs:queue

203 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,  in xennet_get_rx_skb()  argument
207 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
208 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
212 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
216 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
217 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref()
233 struct netfront_queue *queue = (struct netfront_queue *)data; in rx_refill_timeout() local
234 napi_schedule(&queue->napi); in rx_refill_timeout()
237 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument
239 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available()
243 static void xennet_maybe_wake_tx(struct netfront_queue *queue) in xennet_maybe_wake_tx() argument
245 struct net_device *dev = queue->info->netdev; in xennet_maybe_wake_tx()
246 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); in xennet_maybe_wake_tx()
249 netfront_tx_slot_available(queue) && in xennet_maybe_wake_tx()
251 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_maybe_wake_tx()
255 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) in xennet_alloc_one_rx_buffer() argument
260 skb = __netdev_alloc_skb(queue->info->netdev, in xennet_alloc_one_rx_buffer()
275 skb->dev = queue->info->netdev; in xennet_alloc_one_rx_buffer()
281 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) in xennet_alloc_rx_buffers() argument
283 RING_IDX req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
286 if (unlikely(!netif_carrier_ok(queue->info->netdev))) in xennet_alloc_rx_buffers()
289 for (req_prod = queue->rx.req_prod_pvt; in xennet_alloc_rx_buffers()
290 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; in xennet_alloc_rx_buffers()
298 skb = xennet_alloc_one_rx_buffer(queue); in xennet_alloc_rx_buffers()
304 BUG_ON(queue->rx_skbs[id]); in xennet_alloc_rx_buffers()
305 queue->rx_skbs[id] = skb; in xennet_alloc_rx_buffers()
307 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); in xennet_alloc_rx_buffers()
309 queue->grant_rx_ref[id] = ref; in xennet_alloc_rx_buffers()
313 req = RING_GET_REQUEST(&queue->rx, req_prod); in xennet_alloc_rx_buffers()
315 queue->info->xbdev->otherend_id, in xennet_alloc_rx_buffers()
323 queue->rx.req_prod_pvt = req_prod; in xennet_alloc_rx_buffers()
326 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { in xennet_alloc_rx_buffers()
327 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); in xennet_alloc_rx_buffers()
333 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); in xennet_alloc_rx_buffers()
335 notify_remote_via_irq(queue->rx_irq); in xennet_alloc_rx_buffers()
343 struct netfront_queue *queue = NULL; in xennet_open() local
346 queue = &np->queues[i]; in xennet_open()
347 napi_enable(&queue->napi); in xennet_open()
349 spin_lock_bh(&queue->rx_lock); in xennet_open()
351 xennet_alloc_rx_buffers(queue); in xennet_open()
352 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; in xennet_open()
353 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) in xennet_open()
354 napi_schedule(&queue->napi); in xennet_open()
356 spin_unlock_bh(&queue->rx_lock); in xennet_open()
364 static void xennet_tx_buf_gc(struct netfront_queue *queue) in xennet_tx_buf_gc() argument
370 BUG_ON(!netif_carrier_ok(queue->info->netdev)); in xennet_tx_buf_gc()
373 prod = queue->tx.sring->rsp_prod; in xennet_tx_buf_gc()
376 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { in xennet_tx_buf_gc()
379 txrsp = RING_GET_RESPONSE(&queue->tx, cons); in xennet_tx_buf_gc()
384 skb = queue->tx_skbs[id].skb; in xennet_tx_buf_gc()
386 queue->grant_tx_ref[id]) != 0)) { in xennet_tx_buf_gc()
392 queue->grant_tx_ref[id], GNTMAP_readonly); in xennet_tx_buf_gc()
394 &queue->gref_tx_head, queue->grant_tx_ref[id]); in xennet_tx_buf_gc()
395 queue->grant_tx_ref[id] = GRANT_INVALID_REF; in xennet_tx_buf_gc()
396 queue->grant_tx_page[id] = NULL; in xennet_tx_buf_gc()
397 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); in xennet_tx_buf_gc()
401 queue->tx.rsp_cons = prod; in xennet_tx_buf_gc()
411 queue->tx.sring->rsp_event = in xennet_tx_buf_gc()
412 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; in xennet_tx_buf_gc()
414 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); in xennet_tx_buf_gc()
416 xennet_maybe_wake_tx(queue); in xennet_tx_buf_gc()
420 struct netfront_queue *queue, struct sk_buff *skb, in xennet_make_one_txreq() argument
429 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); in xennet_make_one_txreq()
430 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_make_one_txreq()
431 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); in xennet_make_one_txreq()
434 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, in xennet_make_one_txreq()
437 queue->tx_skbs[id].skb = skb; in xennet_make_one_txreq()
438 queue->grant_tx_page[id] = page; in xennet_make_one_txreq()
439 queue->grant_tx_ref[id] = ref; in xennet_make_one_txreq()
451 struct netfront_queue *queue, struct xen_netif_tx_request *tx, in xennet_make_txreqs() argument
461 tx = xennet_make_one_txreq(queue, skb_get(skb), in xennet_make_txreqs()
526 struct netfront_queue *queue = NULL; in xennet_start_xmit() local
535 queue = &np->queues[queue_index]; in xennet_start_xmit()
559 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_start_xmit()
564 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
569 first_tx = tx = xennet_make_one_txreq(queue, skb, in xennet_start_xmit()
587 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); in xennet_start_xmit()
603 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); in xennet_start_xmit()
608 tx = xennet_make_txreqs(queue, tx, skb, in xennet_start_xmit()
616 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); in xennet_start_xmit()
618 notify_remote_via_irq(queue->tx_irq); in xennet_start_xmit()
626 xennet_tx_buf_gc(queue); in xennet_start_xmit()
628 if (!netfront_tx_slot_available(queue)) in xennet_start_xmit()
629 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xennet_start_xmit()
631 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_start_xmit()
646 struct netfront_queue *queue; in xennet_close() local
649 queue = &np->queues[i]; in xennet_close()
650 napi_disable(&queue->napi); in xennet_close()
655 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, in xennet_move_rx_slot() argument
658 int new = xennet_rxidx(queue->rx.req_prod_pvt); in xennet_move_rx_slot()
660 BUG_ON(queue->rx_skbs[new]); in xennet_move_rx_slot()
661 queue->rx_skbs[new] = skb; in xennet_move_rx_slot()
662 queue->grant_rx_ref[new] = ref; in xennet_move_rx_slot()
663 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; in xennet_move_rx_slot()
664 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; in xennet_move_rx_slot()
665 queue->rx.req_prod_pvt++; in xennet_move_rx_slot()
668 static int xennet_get_extras(struct netfront_queue *queue, in xennet_get_extras() argument
674 struct device *dev = &queue->info->netdev->dev; in xennet_get_extras()
675 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_extras()
690 RING_GET_RESPONSE(&queue->rx, ++cons); in xennet_get_extras()
703 skb = xennet_get_rx_skb(queue, cons); in xennet_get_extras()
704 ref = xennet_get_rx_ref(queue, cons); in xennet_get_extras()
705 xennet_move_rx_slot(queue, skb, ref); in xennet_get_extras()
708 queue->rx.rsp_cons = cons; in xennet_get_extras()
712 static int xennet_get_responses(struct netfront_queue *queue, in xennet_get_responses() argument
718 struct device *dev = &queue->info->netdev->dev; in xennet_get_responses()
719 RING_IDX cons = queue->rx.rsp_cons; in xennet_get_responses()
720 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); in xennet_get_responses()
721 grant_ref_t ref = xennet_get_rx_ref(queue, cons); in xennet_get_responses()
728 err = xennet_get_extras(queue, extras, rp); in xennet_get_responses()
729 cons = queue->rx.rsp_cons; in xennet_get_responses()
738 xennet_move_rx_slot(queue, skb, ref); in xennet_get_responses()
759 gnttab_release_grant_reference(&queue->gref_rx_head, ref); in xennet_get_responses()
774 rx = RING_GET_RESPONSE(&queue->rx, cons + slots); in xennet_get_responses()
775 skb = xennet_get_rx_skb(queue, cons + slots); in xennet_get_responses()
776 ref = xennet_get_rx_ref(queue, cons + slots); in xennet_get_responses()
787 queue->rx.rsp_cons = cons + slots; in xennet_get_responses()
821 static RING_IDX xennet_fill_frags(struct netfront_queue *queue, in xennet_fill_frags() argument
826 RING_IDX cons = queue->rx.rsp_cons; in xennet_fill_frags()
831 RING_GET_RESPONSE(&queue->rx, ++cons); in xennet_fill_frags()
876 static int handle_incoming_queue(struct netfront_queue *queue, in handle_incoming_queue() argument
879 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); in handle_incoming_queue()
890 skb->protocol = eth_type_trans(skb, queue->info->netdev); in handle_incoming_queue()
893 if (checksum_setup(queue->info->netdev, skb)) { in handle_incoming_queue()
896 queue->info->netdev->stats.rx_errors++; in handle_incoming_queue()
906 napi_gro_receive(&queue->napi, skb); in handle_incoming_queue()
914 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); in xennet_poll() local
915 struct net_device *dev = queue->info->netdev; in xennet_poll()
927 spin_lock(&queue->rx_lock); in xennet_poll()
933 rp = queue->rx.sring->rsp_prod; in xennet_poll()
936 i = queue->rx.rsp_cons; in xennet_poll()
939 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); in xennet_poll()
942 err = xennet_get_responses(queue, &rinfo, rp, &tmpq); in xennet_poll()
949 i = queue->rx.rsp_cons; in xennet_poll()
961 queue->rx.rsp_cons += skb_queue_len(&tmpq); in xennet_poll()
975 i = xennet_fill_frags(queue, skb, &tmpq); in xennet_poll()
984 queue->rx.rsp_cons = ++i; in xennet_poll()
990 work_done -= handle_incoming_queue(queue, &rxq); in xennet_poll()
992 xennet_alloc_rx_buffers(queue); in xennet_poll()
999 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); in xennet_poll()
1004 spin_unlock(&queue->rx_lock); in xennet_poll()
1055 static void xennet_release_tx_bufs(struct netfront_queue *queue) in xennet_release_tx_bufs() argument
1062 if (skb_entry_is_link(&queue->tx_skbs[i])) in xennet_release_tx_bufs()
1065 skb = queue->tx_skbs[i].skb; in xennet_release_tx_bufs()
1066 get_page(queue->grant_tx_page[i]); in xennet_release_tx_bufs()
1067 gnttab_end_foreign_access(queue->grant_tx_ref[i], in xennet_release_tx_bufs()
1069 (unsigned long)page_address(queue->grant_tx_page[i])); in xennet_release_tx_bufs()
1070 queue->grant_tx_page[i] = NULL; in xennet_release_tx_bufs()
1071 queue->grant_tx_ref[i] = GRANT_INVALID_REF; in xennet_release_tx_bufs()
1072 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); in xennet_release_tx_bufs()
1077 static void xennet_release_rx_bufs(struct netfront_queue *queue) in xennet_release_rx_bufs() argument
1081 spin_lock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1087 skb = queue->rx_skbs[id]; in xennet_release_rx_bufs()
1091 ref = queue->grant_rx_ref[id]; in xennet_release_rx_bufs()
1103 queue->grant_rx_ref[id] = GRANT_INVALID_REF; in xennet_release_rx_bufs()
1108 spin_unlock_bh(&queue->rx_lock); in xennet_release_rx_bufs()
1169 struct netfront_queue *queue = dev_id; in xennet_tx_interrupt() local
1172 spin_lock_irqsave(&queue->tx_lock, flags); in xennet_tx_interrupt()
1173 xennet_tx_buf_gc(queue); in xennet_tx_interrupt()
1174 spin_unlock_irqrestore(&queue->tx_lock, flags); in xennet_tx_interrupt()
1181 struct netfront_queue *queue = dev_id; in xennet_rx_interrupt() local
1182 struct net_device *dev = queue->info->netdev; in xennet_rx_interrupt()
1185 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) in xennet_rx_interrupt()
1186 napi_schedule(&queue->napi); in xennet_rx_interrupt()
1345 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend() local
1347 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) in xennet_disconnect_backend()
1348 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1349 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { in xennet_disconnect_backend()
1350 unbind_from_irqhandler(queue->tx_irq, queue); in xennet_disconnect_backend()
1351 unbind_from_irqhandler(queue->rx_irq, queue); in xennet_disconnect_backend()
1353 queue->tx_evtchn = queue->rx_evtchn = 0; in xennet_disconnect_backend()
1354 queue->tx_irq = queue->rx_irq = 0; in xennet_disconnect_backend()
1357 napi_synchronize(&queue->napi); in xennet_disconnect_backend()
1359 xennet_release_tx_bufs(queue); in xennet_disconnect_backend()
1360 xennet_release_rx_bufs(queue); in xennet_disconnect_backend()
1361 gnttab_free_grant_references(queue->gref_tx_head); in xennet_disconnect_backend()
1362 gnttab_free_grant_references(queue->gref_rx_head); in xennet_disconnect_backend()
1365 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); in xennet_disconnect_backend()
1366 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); in xennet_disconnect_backend()
1368 queue->tx_ring_ref = GRANT_INVALID_REF; in xennet_disconnect_backend()
1369 queue->rx_ring_ref = GRANT_INVALID_REF; in xennet_disconnect_backend()
1370 queue->tx.sring = NULL; in xennet_disconnect_backend()
1371 queue->rx.sring = NULL; in xennet_disconnect_backend()
1413 static int setup_netfront_single(struct netfront_queue *queue) in setup_netfront_single() argument
1417 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_single()
1421 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, in setup_netfront_single()
1423 0, queue->info->netdev->name, queue); in setup_netfront_single()
1426 queue->rx_evtchn = queue->tx_evtchn; in setup_netfront_single()
1427 queue->rx_irq = queue->tx_irq = err; in setup_netfront_single()
1432 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_single()
1433 queue->tx_evtchn = 0; in setup_netfront_single()
1438 static int setup_netfront_split(struct netfront_queue *queue) in setup_netfront_split() argument
1442 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); in setup_netfront_split()
1445 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); in setup_netfront_split()
1449 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), in setup_netfront_split()
1450 "%s-tx", queue->name); in setup_netfront_split()
1451 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, in setup_netfront_split()
1453 0, queue->tx_irq_name, queue); in setup_netfront_split()
1456 queue->tx_irq = err; in setup_netfront_split()
1458 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), in setup_netfront_split()
1459 "%s-rx", queue->name); in setup_netfront_split()
1460 err = bind_evtchn_to_irqhandler(queue->rx_evtchn, in setup_netfront_split()
1462 0, queue->rx_irq_name, queue); in setup_netfront_split()
1465 queue->rx_irq = err; in setup_netfront_split()
1470 unbind_from_irqhandler(queue->tx_irq, queue); in setup_netfront_split()
1471 queue->tx_irq = 0; in setup_netfront_split()
1473 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); in setup_netfront_split()
1474 queue->rx_evtchn = 0; in setup_netfront_split()
1476 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); in setup_netfront_split()
1477 queue->tx_evtchn = 0; in setup_netfront_split()
1483 struct netfront_queue *queue, unsigned int feature_split_evtchn) in setup_netfront() argument
1490 queue->tx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
1491 queue->rx_ring_ref = GRANT_INVALID_REF; in setup_netfront()
1492 queue->rx.sring = NULL; in setup_netfront()
1493 queue->tx.sring = NULL; in setup_netfront()
1502 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); in setup_netfront()
1507 queue->tx_ring_ref = gref; in setup_netfront()
1516 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); in setup_netfront()
1521 queue->rx_ring_ref = gref; in setup_netfront()
1524 err = setup_netfront_split(queue); in setup_netfront()
1530 err = setup_netfront_single(queue); in setup_netfront()
1541 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); in setup_netfront()
1545 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); in setup_netfront()
1556 static int xennet_init_queue(struct netfront_queue *queue) in xennet_init_queue() argument
1561 spin_lock_init(&queue->tx_lock); in xennet_init_queue()
1562 spin_lock_init(&queue->rx_lock); in xennet_init_queue()
1564 init_timer(&queue->rx_refill_timer); in xennet_init_queue()
1565 queue->rx_refill_timer.data = (unsigned long)queue; in xennet_init_queue()
1566 queue->rx_refill_timer.function = rx_refill_timeout; in xennet_init_queue()
1568 snprintf(queue->name, sizeof(queue->name), "%s-q%u", in xennet_init_queue()
1569 queue->info->netdev->name, queue->id); in xennet_init_queue()
1572 queue->tx_skb_freelist = 0; in xennet_init_queue()
1574 skb_entry_set_link(&queue->tx_skbs[i], i+1); in xennet_init_queue()
1575 queue->grant_tx_ref[i] = GRANT_INVALID_REF; in xennet_init_queue()
1576 queue->grant_tx_page[i] = NULL; in xennet_init_queue()
1581 queue->rx_skbs[i] = NULL; in xennet_init_queue()
1582 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_init_queue()
1587 &queue->gref_tx_head) < 0) { in xennet_init_queue()
1595 &queue->gref_rx_head) < 0) { in xennet_init_queue()
1604 gnttab_free_grant_references(queue->gref_tx_head); in xennet_init_queue()
1609 static int write_queue_xenstore_keys(struct netfront_queue *queue, in write_queue_xenstore_keys() argument
1616 struct xenbus_device *dev = queue->info->xbdev; in write_queue_xenstore_keys()
1632 dev->nodename, queue->id); in write_queue_xenstore_keys()
1639 queue->tx_ring_ref); in write_queue_xenstore_keys()
1646 queue->rx_ring_ref); in write_queue_xenstore_keys()
1655 if (queue->tx_evtchn == queue->rx_evtchn) { in write_queue_xenstore_keys()
1658 "event-channel", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
1666 "event-channel-tx", "%u", queue->tx_evtchn); in write_queue_xenstore_keys()
1673 "event-channel-rx", "%u", queue->rx_evtchn); in write_queue_xenstore_keys()
1698 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues() local
1701 napi_disable(&queue->napi); in xennet_destroy_queues()
1702 del_timer_sync(&queue->rx_refill_timer); in xennet_destroy_queues()
1703 netif_napi_del(&queue->napi); in xennet_destroy_queues()
1726 struct netfront_queue *queue = &info->queues[i]; in xennet_create_queues() local
1728 queue->id = i; in xennet_create_queues()
1729 queue->info = info; in xennet_create_queues()
1731 ret = xennet_init_queue(queue); in xennet_create_queues()
1739 netif_napi_add(queue->info->netdev, &queue->napi, in xennet_create_queues()
1742 napi_enable(&queue->napi); in xennet_create_queues()
1766 struct netfront_queue *queue = NULL; in talk_to_netback() local
1801 queue = &info->queues[i]; in talk_to_netback()
1802 err = setup_netfront(dev, queue, feature_split_evtchn); in talk_to_netback()
1841 queue = &info->queues[i]; in talk_to_netback()
1842 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ in talk_to_netback()
1919 struct netfront_queue *queue = NULL; in xennet_connect() local
1951 queue = &np->queues[j]; in xennet_connect()
1953 notify_remote_via_irq(queue->tx_irq); in xennet_connect()
1954 if (queue->tx_irq != queue->rx_irq) in xennet_connect()
1955 notify_remote_via_irq(queue->rx_irq); in xennet_connect()
1957 spin_lock_irq(&queue->tx_lock); in xennet_connect()
1958 xennet_tx_buf_gc(queue); in xennet_connect()
1959 spin_unlock_irq(&queue->tx_lock); in xennet_connect()
1961 spin_lock_bh(&queue->rx_lock); in xennet_connect()
1962 xennet_alloc_rx_buffers(queue); in xennet_connect()
1963 spin_unlock_bh(&queue->rx_lock); in xennet_connect()