Lines Matching refs:vif
122 #define callback_param(vif, pending_idx) \ argument
123 (vif->pending_tx_info[pending_idx].callback_struct)
152 static int xenvif_rx_ring_slots_needed(struct xenvif *vif) in xenvif_rx_ring_slots_needed() argument
154 if (vif->gso_mask) in xenvif_rx_ring_slots_needed()
155 return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1; in xenvif_rx_ring_slots_needed()
157 return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE); in xenvif_rx_ring_slots_needed()
165 needed = xenvif_rx_ring_slots_needed(queue->vif); in xenvif_rx_ring_slots_available()
195 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); in xenvif_rx_queue_tail()
220 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); in xenvif_rx_queue_maybe_wake()
322 copy_gop->dest.domid = queue->vif->domid; in xenvif_setup_copy_gop()
330 if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask)) in xenvif_setup_copy_gop()
424 struct xenvif *vif = netdev_priv(skb->dev); in xenvif_gop_skb() local
445 if ((1 << gso_type) & vif->gso_prefix_mask) { in xenvif_gop_skb()
457 if ((1 << gso_type) & vif->gso_mask) { in xenvif_gop_skb()
500 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, in xenvif_check_gop() argument
510 netdev_dbg(vif->dev, in xenvif_check_gop()
512 copy_op->status, vif->domid); in xenvif_check_gop()
590 queue->vif->gso_prefix_mask) { in xenvif_rx_action()
608 status = xenvif_check_gop(queue->vif, in xenvif_rx_action()
630 queue->vif->gso_mask) { in xenvif_rx_action()
717 static void xenvif_fatal_tx_err(struct xenvif *vif) in xenvif_fatal_tx_err() argument
719 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
720 vif->disabled = true; in xenvif_fatal_tx_err()
722 if (vif->queues) in xenvif_fatal_tx_err()
723 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
743 netdev_err(queue->vif->dev, in xenvif_count_requests()
746 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
754 netdev_err(queue->vif->dev, in xenvif_count_requests()
757 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
770 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
792 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
802 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
804 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
838 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
914 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
927 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
959 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
1003 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
1107 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
1108 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
1116 netdev_err(queue->vif->dev, in xenvif_get_extras()
1118 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
1129 static int xenvif_set_skb_gso(struct xenvif *vif, in xenvif_set_skb_gso() argument
1134 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
1135 xenvif_fatal_tx_err(vif); in xenvif_set_skb_gso()
1147 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
1148 xenvif_fatal_tx_err(vif); in xenvif_set_skb_gso()
1215 static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr) in xenvif_mcast_add() argument
1219 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
1221 netdev_err(vif->dev, in xenvif_mcast_add()
1231 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
1232 vif->fe_mcast_count++; in xenvif_mcast_add()
1237 static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr) in xenvif_mcast_del() argument
1241 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
1243 --vif->fe_mcast_count; in xenvif_mcast_del()
1251 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr) in xenvif_mcast_match() argument
1256 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
1267 void xenvif_mcast_addr_list_free(struct xenvif *vif) in xenvif_mcast_addr_list_free() argument
1272 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
1275 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
1278 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
1306 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1311 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1346 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1360 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1374 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1382 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1386 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1399 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1422 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1432 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1444 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; in xenvif_tx_build_gops()
1611 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1619 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1624 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1749 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1753 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1886 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1913 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_rings()
1916 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_rings()
1930 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_rings()
1938 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_rings()
1955 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off() local
1960 spin_lock(&vif->lock); in xenvif_queue_carrier_off()
1961 if (vif->stalled_queues++ == 0) { in xenvif_queue_carrier_off()
1962 netdev_info(vif->dev, "Guest Rx stalled"); in xenvif_queue_carrier_off()
1963 netif_carrier_off(vif->dev); in xenvif_queue_carrier_off()
1965 spin_unlock(&vif->lock); in xenvif_queue_carrier_off()
1970 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on() local
1976 spin_lock(&vif->lock); in xenvif_queue_carrier_on()
1977 if (--vif->stalled_queues == 0) { in xenvif_queue_carrier_on()
1978 netdev_info(vif->dev, "Guest Rx ready"); in xenvif_queue_carrier_on()
1979 netif_carrier_on(vif->dev); in xenvif_queue_carrier_on()
1981 spin_unlock(&vif->lock); in xenvif_queue_carrier_on()
1993 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
2010 || (queue->vif->stall_timeout && in xenvif_have_rx_work()
2014 || queue->vif->disabled; in xenvif_have_rx_work()
2063 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx() local
2065 if (!vif->stall_timeout) in xenvif_kthread_guest_rx()
2081 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
2082 xenvif_carrier_off(vif); in xenvif_kthread_guest_rx()
2093 if (vif->stall_timeout) { in xenvif_kthread_guest_rx()