Lines Matching refs:vi
218 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
224 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
246 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
259 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); in page_to_skb()
265 hdr_len = vi->hdr_len; in page_to_skb()
266 if (vi->mergeable_rx_bufs) in page_to_skb()
285 if (vi->mergeable_rx_bufs) { in page_to_skb()
320 static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) in receive_small() argument
324 len -= vi->hdr_len; in receive_small()
331 struct virtnet_info *vi, in receive_big() argument
337 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); in receive_big()
351 struct virtnet_info *vi, in receive_mergeable() argument
358 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
363 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, in receive_mergeable()
376 virtio16_to_cpu(vi->vdev, in receive_mergeable()
438 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
441 struct net_device *dev = vi->dev; in receive_buf()
442 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); in receive_buf()
446 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
449 if (vi->mergeable_rx_bufs) { in receive_buf()
453 } else if (vi->big_packets) { in receive_buf()
461 if (vi->mergeable_rx_bufs) in receive_buf()
462 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); in receive_buf()
463 else if (vi->big_packets) in receive_buf()
464 skb = receive_big(dev, vi, rq, buf, len); in receive_buf()
466 skb = receive_small(vi, buf, len); in receive_buf()
481 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start), in receive_buf()
482 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset))) in receive_buf()
513 skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev, in receive_buf()
535 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
542 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); in add_recvbuf_small()
550 sg_set_buf(rq->sg, hdr, vi->hdr_len); in add_recvbuf_small()
560 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
593 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
661 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
669 if (vi->mergeable_rx_bufs) in try_fill_recv()
671 else if (vi->big_packets) in try_fill_recv()
672 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
674 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
686 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
687 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
714 struct virtnet_info *vi = in refill_work() local
719 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
720 struct receive_queue *rq = &vi->rq[i]; in refill_work()
723 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
730 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
736 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
742 receive_buf(vi, rq, buf, len); in virtnet_receive()
747 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) in virtnet_receive()
748 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
782 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_busy_poll() local
785 if (!(vi->status & VIRTIO_NET_S_LINK_UP)) in virtnet_busy_poll()
815 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
818 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
819 if (i < vi->curr_queue_pairs) in virtnet_open()
821 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
822 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
823 virtnet_napi_enable(&vi->rq[i]); in virtnet_open()
833 struct virtnet_info *vi = sq->vq->vdev->priv; in free_old_xmit_skbs() local
834 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); in free_old_xmit_skbs()
852 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
854 unsigned hdr_len = vi->hdr_len; in xmit_skb()
857 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
859 can_push = vi->any_header_sg && in xmit_skb()
871 hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, in xmit_skb()
873 hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev, in xmit_skb()
881 hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb)); in xmit_skb()
882 hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev, in xmit_skb()
899 if (vi->mergeable_rx_bufs) in xmit_skb()
917 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
919 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
981 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
988 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command()
990 vi->ctrl_status = ~0; in virtnet_send_command()
991 vi->ctrl_hdr.class = class; in virtnet_send_command()
992 vi->ctrl_hdr.cmd = cmd; in virtnet_send_command()
994 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); in virtnet_send_command()
1001 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); in virtnet_send_command()
1005 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command()
1007 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command()
1008 return vi->ctrl_status == VIRTIO_NET_OK; in virtnet_send_command()
1013 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command()
1014 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command()
1017 return vi->ctrl_status == VIRTIO_NET_OK; in virtnet_send_command()
1022 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
1023 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
1034 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
1059 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
1064 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); in virtnet_stats()
1097 struct virtnet_info *vi = netdev_priv(dev); in virtnet_netpoll() local
1100 for (i = 0; i < vi->curr_queue_pairs; i++) in virtnet_netpoll()
1101 napi_schedule(&vi->rq[i].napi); in virtnet_netpoll()
1105 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
1108 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
1110 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
1114 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
1118 struct net_device *dev = vi->dev; in virtnet_set_queues()
1120 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
1123 s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
1126 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_set_queues()
1132 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
1135 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
1143 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
1147 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
1149 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_close()
1150 napi_disable(&vi->rq[i].napi); in virtnet_close()
1157 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
1167 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_set_rx_mode()
1170 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); in virtnet_set_rx_mode()
1171 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); in virtnet_set_rx_mode()
1173 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); in virtnet_set_rx_mode()
1175 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1178 vi->ctrl_promisc ? "en" : "dis"); in virtnet_set_rx_mode()
1180 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); in virtnet_set_rx_mode()
1182 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1185 vi->ctrl_allmulti ? "en" : "dis"); in virtnet_set_rx_mode()
1199 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_set_rx_mode()
1210 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_set_rx_mode()
1218 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_rx_mode()
1228 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
1233 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
1242 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
1247 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
1253 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) in virtnet_clean_affinity() argument
1257 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
1258 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
1259 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1260 virtqueue_set_affinity(vi->sq[i].vq, -1); in virtnet_clean_affinity()
1263 vi->affinity_hint_set = false; in virtnet_clean_affinity()
1267 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
1276 if (vi->curr_queue_pairs == 1 || in virtnet_set_affinity()
1277 vi->max_queue_pairs != num_online_cpus()) { in virtnet_set_affinity()
1278 virtnet_clean_affinity(vi, -1); in virtnet_set_affinity()
1284 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1285 virtqueue_set_affinity(vi->sq[i].vq, cpu); in virtnet_set_affinity()
1286 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); in virtnet_set_affinity()
1290 vi->affinity_hint_set = true; in virtnet_set_affinity()
1296 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); in virtnet_cpu_callback() local
1302 virtnet_set_affinity(vi); in virtnet_cpu_callback()
1305 virtnet_clean_affinity(vi, (long)hcpu); in virtnet_cpu_callback()
1317 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
1319 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1320 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
1329 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
1330 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
1342 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
1352 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
1356 err = virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
1361 virtnet_set_affinity(vi); in virtnet_set_channels()
1371 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
1373 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
1374 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
1422 struct virtnet_info *vi = in virtnet_config_changed_work() local
1426 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
1431 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
1432 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
1438 if (vi->status == v) in virtnet_config_changed_work()
1441 vi->status = v; in virtnet_config_changed_work()
1443 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
1444 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
1445 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
1447 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
1448 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
1454 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
1456 schedule_work(&vi->config_work); in virtnet_config_changed()
1459 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
1463 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
1464 napi_hash_del(&vi->rq[i].napi); in virtnet_free_queues()
1465 netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
1468 kfree(vi->rq); in virtnet_free_queues()
1469 kfree(vi->sq); in virtnet_free_queues()
1472 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
1476 for (i = 0; i < vi->max_queue_pairs; i++) { in free_receive_bufs()
1477 while (vi->rq[i].pages) in free_receive_bufs()
1478 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in free_receive_bufs()
1482 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
1485 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
1486 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
1487 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
1490 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
1495 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
1496 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
1501 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
1502 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
1505 if (vi->mergeable_rx_bufs) { in free_unused_bufs()
1509 } else if (vi->big_packets) { in free_unused_bufs()
1510 give_pages(&vi->rq[i], buf); in free_unused_bufs()
1518 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
1520 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
1522 virtnet_clean_affinity(vi, -1); in virtnet_del_vqs()
1526 virtnet_free_queues(vi); in virtnet_del_vqs()
1529 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
1541 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
1542 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
1556 if (vi->has_cvq) { in virtnet_find_vqs()
1562 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
1565 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
1566 sprintf(vi->sq[i].name, "output.%d", i); in virtnet_find_vqs()
1567 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
1568 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
1571 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
1576 if (vi->has_cvq) { in virtnet_find_vqs()
1577 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
1578 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
1579 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
1582 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
1583 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1584 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
1603 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
1607 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1608 if (!vi->sq) in virtnet_alloc_queues()
1610 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1611 if (!vi->rq) in virtnet_alloc_queues()
1614 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
1615 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
1616 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
1617 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
1619 napi_hash_add(&vi->rq[i].napi); in virtnet_alloc_queues()
1621 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
1622 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
1623 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
1629 kfree(vi->sq); in virtnet_alloc_queues()
1634 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
1639 ret = virtnet_alloc_queues(vi); in init_vqs()
1643 ret = virtnet_find_vqs(vi); in init_vqs()
1648 virtnet_set_affinity(vi); in init_vqs()
1654 virtnet_free_queues(vi); in init_vqs()
1663 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
1667 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
1668 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
1724 struct virtnet_info *vi; in virtnet_probe() local
1801 vi = netdev_priv(dev); in virtnet_probe()
1802 vi->dev = dev; in virtnet_probe()
1803 vi->vdev = vdev; in virtnet_probe()
1804 vdev->priv = vi; in virtnet_probe()
1805 vi->stats = alloc_percpu(struct virtnet_stats); in virtnet_probe()
1807 if (vi->stats == NULL) in virtnet_probe()
1812 virtnet_stats = per_cpu_ptr(vi->stats, i); in virtnet_probe()
1817 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
1824 vi->big_packets = true; in virtnet_probe()
1827 vi->mergeable_rx_bufs = true; in virtnet_probe()
1831 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
1833 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
1837 vi->any_header_sg = true; in virtnet_probe()
1840 vi->has_cvq = true; in virtnet_probe()
1842 if (vi->any_header_sg) in virtnet_probe()
1843 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
1846 vi->curr_queue_pairs = 1; in virtnet_probe()
1847 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
1850 err = init_vqs(vi); in virtnet_probe()
1855 if (vi->mergeable_rx_bufs) in virtnet_probe()
1858 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
1859 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
1870 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_probe()
1871 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); in virtnet_probe()
1874 if (vi->rq[i].vq->num_free == in virtnet_probe()
1875 virtqueue_get_vring_size(vi->rq[i].vq)) { in virtnet_probe()
1876 free_unused_bufs(vi); in virtnet_probe()
1882 vi->nb.notifier_call = &virtnet_cpu_callback; in virtnet_probe()
1883 err = register_hotcpu_notifier(&vi->nb); in virtnet_probe()
1891 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
1893 schedule_work(&vi->config_work); in virtnet_probe()
1895 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
1905 vi->vdev->config->reset(vdev); in virtnet_probe()
1907 free_receive_bufs(vi); in virtnet_probe()
1910 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
1911 free_receive_page_frags(vi); in virtnet_probe()
1912 virtnet_del_vqs(vi); in virtnet_probe()
1914 free_percpu(vi->stats); in virtnet_probe()
1920 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
1922 vi->vdev->config->reset(vi->vdev); in remove_vq_common()
1925 free_unused_bufs(vi); in remove_vq_common()
1927 free_receive_bufs(vi); in remove_vq_common()
1929 free_receive_page_frags(vi); in remove_vq_common()
1931 virtnet_del_vqs(vi); in remove_vq_common()
1936 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
1938 unregister_hotcpu_notifier(&vi->nb); in virtnet_remove()
1941 flush_work(&vi->config_work); in virtnet_remove()
1943 unregister_netdev(vi->dev); in virtnet_remove()
1945 remove_vq_common(vi); in virtnet_remove()
1947 free_percpu(vi->stats); in virtnet_remove()
1948 free_netdev(vi->dev); in virtnet_remove()
1954 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
1957 unregister_hotcpu_notifier(&vi->nb); in virtnet_freeze()
1960 flush_work(&vi->config_work); in virtnet_freeze()
1962 netif_device_detach(vi->dev); in virtnet_freeze()
1963 cancel_delayed_work_sync(&vi->refill); in virtnet_freeze()
1965 if (netif_running(vi->dev)) { in virtnet_freeze()
1966 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_freeze()
1967 napi_disable(&vi->rq[i].napi); in virtnet_freeze()
1970 remove_vq_common(vi); in virtnet_freeze()
1977 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
1980 err = init_vqs(vi); in virtnet_restore()
1986 if (netif_running(vi->dev)) { in virtnet_restore()
1987 for (i = 0; i < vi->curr_queue_pairs; i++) in virtnet_restore()
1988 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_restore()
1989 schedule_delayed_work(&vi->refill, 0); in virtnet_restore()
1991 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_restore()
1992 virtnet_napi_enable(&vi->rq[i]); in virtnet_restore()
1995 netif_device_attach(vi->dev); in virtnet_restore()
1998 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
2001 err = register_hotcpu_notifier(&vi->nb); in virtnet_restore()