Lines Matching refs:port

52 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
53 static void vnet_port_reset(struct vnet_port *port);
68 static int vnet_handle_unknown(struct vnet_port *port, void *arg) in vnet_handle_unknown() argument
76 ldc_disconnect(port->vio.lp); in vnet_handle_unknown()
81 static int vnet_port_alloc_tx_ring(struct vnet_port *port);
85 struct vnet_port *port = to_vnet_port(vio); in vnet_send_attr() local
86 struct net_device *dev = port->vp->dev; in vnet_send_attr()
109 if (port->rmtu) { in vnet_send_attr()
110 port->rmtu = min(VNET_MAXPACKET, port->rmtu); in vnet_send_attr()
111 pkt.mtu = port->rmtu; in vnet_send_attr()
113 port->rmtu = VNET_MAXPACKET; in vnet_send_attr()
114 pkt.mtu = port->rmtu; in vnet_send_attr()
125 if (vio_version_after_eq(vio, 1, 7) && port->tso) { in vnet_send_attr()
127 if (!port->tsolen) in vnet_send_attr()
128 port->tsolen = VNET_MAXTSO; in vnet_send_attr()
129 pkt.ipv4_lso_maxlen = port->tsolen; in vnet_send_attr()
149 struct vnet_port *port = to_vnet_port(vio); in handle_attr_info() local
159 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, in handle_attr_info()
178 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; in handle_attr_info()
184 port->rmtu = localmtu; in handle_attr_info()
188 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); in handle_attr_info()
190 port->tso = false; in handle_attr_info()
191 if (port->tso) { in handle_attr_info()
192 if (!port->tsolen) in handle_attr_info()
193 port->tsolen = VNET_MAXTSO; in handle_attr_info()
194 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); in handle_attr_info()
195 if (port->tsolen < VNET_MINTSO) { in handle_attr_info()
196 port->tso = false; in handle_attr_info()
197 port->tsolen = 0; in handle_attr_info()
200 pkt->ipv4_lso_maxlen = port->tsolen; in handle_attr_info()
229 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, in handle_attr_info()
349 static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) in vnet_rx_one() argument
351 struct net_device *dev = port->vp->dev; in vnet_rx_one()
359 if (port->tso && port->tsolen > port->rmtu) in vnet_rx_one()
360 maxlen = port->tsolen; in vnet_rx_one()
362 maxlen = port->rmtu; in vnet_rx_one()
377 err = ldc_copy(port->vio.lp, LDC_COPY_IN, in vnet_rx_one()
389 if (vio_version_after_eq(&port->vio, 1, 8)) { in vnet_rx_one()
411 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; in vnet_rx_one()
415 napi_gro_receive(&port->napi, skb); in vnet_rx_one()
426 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, in vnet_send_ack() argument
434 .sid = vio_send_sid(&port->vio), in vnet_send_ack()
447 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); in vnet_send_ack()
457 port->raddr[0], port->raddr[1], in vnet_send_ack()
458 port->raddr[2], port->raddr[3], in vnet_send_ack()
459 port->raddr[4], port->raddr[5]); in vnet_send_ack()
465 port->stop_rx_idx = end; in vnet_send_ack()
466 port->stop_rx = true; in vnet_send_ack()
468 port->stop_rx_idx = 0; in vnet_send_ack()
469 port->stop_rx = false; in vnet_send_ack()
475 static struct vio_net_desc *get_rx_desc(struct vnet_port *port, in get_rx_desc() argument
479 struct vio_net_desc *desc = port->vio.desc_buf; in get_rx_desc()
482 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, in get_rx_desc()
491 static int put_rx_desc(struct vnet_port *port, in put_rx_desc() argument
498 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, in put_rx_desc()
507 static int vnet_walk_rx_one(struct vnet_port *port, in vnet_walk_rx_one() argument
511 struct vio_net_desc *desc = get_rx_desc(port, dr, index); in vnet_walk_rx_one()
512 struct vio_driver_state *vio = &port->vio; in vnet_walk_rx_one()
530 err = vnet_rx_one(port, desc); in vnet_walk_rx_one()
534 err = put_rx_desc(port, dr, desc, index); in vnet_walk_rx_one()
541 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, in vnet_walk_rx() argument
544 struct vio_driver_state *vio = &port->vio; in vnet_walk_rx()
554 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); in vnet_walk_rx()
565 err = vnet_send_ack(port, dr, ack_start, ack_end, in vnet_walk_rx()
579 port->napi_resume = false; in vnet_walk_rx()
580 return vnet_send_ack(port, dr, ack_start, ack_end, in vnet_walk_rx()
583 port->napi_resume = true; in vnet_walk_rx()
584 port->napi_stop_idx = ack_end; in vnet_walk_rx()
589 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, in vnet_rx() argument
593 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; in vnet_rx()
594 struct vio_driver_state *vio = &port->vio; in vnet_rx()
607 if (!port->napi_resume) in vnet_rx()
612 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, in vnet_rx()
631 static int vnet_ack(struct vnet_port *port, void *msgbuf) in vnet_ack() argument
633 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_ack()
645 vp = port->vp; in vnet_ack()
658 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { in vnet_ack()
663 if (__vnet_tx_trigger(port, dr->cons) > 0) in vnet_ack()
664 port->start_cons = false; in vnet_ack()
666 port->start_cons = true; in vnet_ack()
668 port->start_cons = true; in vnet_ack()
672 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_ack()
680 static int vnet_nack(struct vnet_port *port, void *msgbuf) in vnet_nack() argument
686 static int handle_mcast(struct vnet_port *port, void *msgbuf) in handle_mcast() argument
692 port->vp->dev->name, in handle_mcast()
705 static void maybe_tx_wakeup(struct vnet_port *port) in maybe_tx_wakeup() argument
709 txq = netdev_get_tx_queue(port->vp->dev, port->q_index); in maybe_tx_wakeup()
714 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in maybe_tx_wakeup()
727 static int vnet_event_napi(struct vnet_port *port, int budget) in vnet_event_napi() argument
729 struct vio_driver_state *vio = &port->vio; in vnet_event_napi()
732 int event = (port->rx_event & LDC_EVENT_RESET); in vnet_event_napi()
740 vnet_port_reset(port); in vnet_event_napi()
743 port->rx_event = 0; in vnet_event_napi()
747 event = (port->rx_event & LDC_EVENT_UP); in vnet_event_napi()
748 port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); in vnet_event_napi()
751 event = port->rx_event; in vnet_event_napi()
765 if (port->napi_resume) { in vnet_event_napi()
769 &port->vio.drings[VIO_DRIVER_RX_RING]; in vnet_event_napi()
775 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); in vnet_event_napi()
798 if (!port_is_up(port)) { in vnet_event_napi()
806 err = vnet_rx(port, &msgbuf, &npkts, budget); in vnet_event_napi()
812 err = vnet_ack(port, &msgbuf); in vnet_event_napi()
816 err = vnet_nack(port, &msgbuf); in vnet_event_napi()
820 err = handle_mcast(port, &msgbuf); in vnet_event_napi()
826 err = vnet_handle_unknown(port, &msgbuf); in vnet_event_napi()
832 maybe_tx_wakeup(port); in vnet_event_napi()
838 struct vnet_port *port = container_of(napi, struct vnet_port, napi); in vnet_poll() local
839 struct vio_driver_state *vio = &port->vio; in vnet_poll()
840 int processed = vnet_event_napi(port, budget); in vnet_poll()
844 port->rx_event &= ~LDC_EVENT_DATA_READY; in vnet_poll()
852 struct vnet_port *port = arg; in vnet_event() local
853 struct vio_driver_state *vio = &port->vio; in vnet_event()
855 port->rx_event |= event; in vnet_event()
857 napi_schedule(&port->napi); in vnet_event()
861 static int __vnet_tx_trigger(struct vnet_port *port, u32 start) in __vnet_tx_trigger() argument
863 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in __vnet_tx_trigger()
869 .sid = vio_send_sid(&port->vio), in __vnet_tx_trigger()
878 if (port->stop_rx) { in __vnet_tx_trigger()
879 err = vnet_send_ack(port, in __vnet_tx_trigger()
880 &port->vio.drings[VIO_DRIVER_RX_RING], in __vnet_tx_trigger()
881 port->stop_rx_idx, -1, in __vnet_tx_trigger()
890 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); in __vnet_tx_trigger()
909 struct vnet_port *port; in __tx_port_find() local
911 hlist_for_each_entry_rcu(port, hp, hash) { in __tx_port_find()
912 if (!port_is_up(port)) in __tx_port_find()
914 if (ether_addr_equal(port->raddr, skb->data)) in __tx_port_find()
915 return port; in __tx_port_find()
917 list_for_each_entry_rcu(port, &vp->port_list, list) { in __tx_port_find()
918 if (!port->switch_port) in __tx_port_find()
920 if (!port_is_up(port)) in __tx_port_find()
922 return port; in __tx_port_find()
927 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, in vnet_clean_tx_ring() argument
930 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_clean_tx_ring()
950 if (port->tx_bufs[txi].skb) { in vnet_clean_tx_ring()
954 BUG_ON(port->tx_bufs[txi].skb->next); in vnet_clean_tx_ring()
956 port->tx_bufs[txi].skb->next = skb; in vnet_clean_tx_ring()
957 skb = port->tx_bufs[txi].skb; in vnet_clean_tx_ring()
958 port->tx_bufs[txi].skb = NULL; in vnet_clean_tx_ring()
960 ldc_unmap(port->vio.lp, in vnet_clean_tx_ring()
961 port->tx_bufs[txi].cookies, in vnet_clean_tx_ring()
962 port->tx_bufs[txi].ncookies); in vnet_clean_tx_ring()
984 struct vnet_port *port = (struct vnet_port *)port0; in vnet_clean_timer_expire() local
988 netif_tx_lock(port->vp->dev); in vnet_clean_timer_expire()
989 freeskbs = vnet_clean_tx_ring(port, &pending); in vnet_clean_timer_expire()
990 netif_tx_unlock(port->vp->dev); in vnet_clean_timer_expire()
995 (void)mod_timer(&port->clean_timer, in vnet_clean_timer_expire()
998 del_timer(&port->clean_timer); in vnet_clean_timer_expire()
1138 struct vnet_port *port = __tx_port_find(vp, skb); in vnet_select_queue() local
1140 if (port == NULL) in vnet_select_queue()
1142 return port->q_index; in vnet_select_queue()
1147 static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) in vnet_handle_offloads() argument
1149 struct net_device *dev = port->vp->dev; in vnet_handle_offloads()
1150 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_handle_offloads()
1172 datalen = port->tsolen - hlen; in vnet_handle_offloads()
1178 if (port->tso && gso_size < datalen) in vnet_handle_offloads()
1184 txq = netdev_get_tx_queue(dev, port->q_index); in vnet_handle_offloads()
1194 if (port->tso && gso_size < datalen) { in vnet_handle_offloads()
1215 if (port->tso && curr->len > dev->mtu) { in vnet_handle_offloads()
1251 struct vnet_port *port = NULL; in vnet_start_xmit() local
1261 port = __tx_port_find(vp, skb); in vnet_start_xmit()
1262 if (unlikely(!port)) { in vnet_start_xmit()
1267 if (skb_is_gso(skb) && skb->len > port->tsolen) { in vnet_start_xmit()
1268 err = vnet_handle_offloads(port, skb); in vnet_start_xmit()
1273 if (!skb_is_gso(skb) && skb->len > port->rmtu) { in vnet_start_xmit()
1274 unsigned long localmtu = port->rmtu - ETH_HLEN; in vnet_start_xmit()
1276 if (vio_version_after_eq(&port->vio, 1, 3)) in vnet_start_xmit()
1313 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_start_xmit()
1332 freeskbs = vnet_clean_tx_ring(port, &pending); in vnet_start_xmit()
1334 BUG_ON(port->tx_bufs[txi].skb); in vnet_start_xmit()
1340 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, in vnet_start_xmit()
1347 port->tx_bufs[txi].skb = skb; in vnet_start_xmit()
1349 port->tx_bufs[txi].ncookies = err; in vnet_start_xmit()
1361 d->ncookies = port->tx_bufs[txi].ncookies; in vnet_start_xmit()
1363 d->cookies[i] = port->tx_bufs[txi].cookies[i]; in vnet_start_xmit()
1364 if (vio_version_after_eq(&port->vio, 1, 7)) { in vnet_start_xmit()
1368 if (skb_is_gso(port->tx_bufs[txi].skb)) { in vnet_start_xmit()
1369 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) in vnet_start_xmit()
1373 if (vio_version_after_eq(&port->vio, 1, 8) && in vnet_start_xmit()
1374 !port->switch_port) { in vnet_start_xmit()
1407 if (!port->start_cons) in vnet_start_xmit()
1410 err = __vnet_tx_trigger(port, dr->cons); in vnet_start_xmit()
1414 skb = port->tx_bufs[txi].skb; in vnet_start_xmit()
1415 port->tx_bufs[txi].skb = NULL; in vnet_start_xmit()
1421 port->start_cons = false; in vnet_start_xmit()
1424 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; in vnet_start_xmit()
1433 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); in vnet_start_xmit()
1442 (void)mod_timer(&port->clean_timer, in vnet_start_xmit()
1444 else if (port) in vnet_start_xmit()
1445 del_timer(&port->clean_timer); in vnet_start_xmit()
1446 if (port) in vnet_start_xmit()
1513 static void __send_mc_list(struct vnet *vp, struct vnet_port *port) in __send_mc_list() argument
1524 info.tag.sid = vio_send_sid(&port->vio); in __send_mc_list()
1537 (void) vio_ldc_send(&port->vio, &info, in __send_mc_list()
1544 (void) vio_ldc_send(&port->vio, &info, sizeof(info)); in __send_mc_list()
1562 (void) vio_ldc_send(&port->vio, &info, in __send_mc_list()
1572 (void) vio_ldc_send(&port->vio, &info, sizeof(info)); in __send_mc_list()
1579 struct vnet_port *port; in vnet_set_rx_mode() local
1582 list_for_each_entry_rcu(port, &vp->port_list, list) { in vnet_set_rx_mode()
1584 if (port->switch_port) { in vnet_set_rx_mode()
1586 __send_mc_list(vp, port); in vnet_set_rx_mode()
1633 static void vnet_port_free_tx_bufs(struct vnet_port *port) in vnet_port_free_tx_bufs() argument
1638 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_port_free_tx_bufs()
1645 void *skb = port->tx_bufs[i].skb; in vnet_port_free_tx_bufs()
1652 ldc_unmap(port->vio.lp, in vnet_port_free_tx_bufs()
1653 port->tx_bufs[i].cookies, in vnet_port_free_tx_bufs()
1654 port->tx_bufs[i].ncookies); in vnet_port_free_tx_bufs()
1656 port->tx_bufs[i].skb = NULL; in vnet_port_free_tx_bufs()
1659 ldc_free_exp_dring(port->vio.lp, dr->base, in vnet_port_free_tx_bufs()
1669 static void vnet_port_reset(struct vnet_port *port) in vnet_port_reset() argument
1671 del_timer(&port->clean_timer); in vnet_port_reset()
1672 vnet_port_free_tx_bufs(port); in vnet_port_reset()
1673 port->rmtu = 0; in vnet_port_reset()
1674 port->tso = true; in vnet_port_reset()
1675 port->tsolen = 0; in vnet_port_reset()
1678 static int vnet_port_alloc_tx_ring(struct vnet_port *port) in vnet_port_alloc_tx_ring() argument
1685 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; in vnet_port_alloc_tx_ring()
1689 if (vio_version_after_eq(&port->vio, 1, 7)) in vnet_port_alloc_tx_ring()
1694 dring = ldc_alloc_exp_dring(port->vio.lp, len, in vnet_port_alloc_tx_ring()
1708 port->start_cons = true; /* need an initial trigger */ in vnet_port_alloc_tx_ring()
1721 vnet_port_free_tx_bufs(port); in vnet_port_alloc_tx_ring()
1730 struct vnet_port *port; in vnet_poll_controller() local
1735 port = list_entry(vp->port_list.next, struct vnet_port, list); in vnet_poll_controller()
1736 napi_schedule(&port->napi); in vnet_poll_controller()
1895 vnet_port_add_txq(struct vnet_port *port) in vnet_port_add_txq() argument
1897 struct vnet *vp = port->vp; in vnet_port_add_txq()
1902 port->q_index = n; in vnet_port_add_txq()
1903 netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); in vnet_port_add_txq()
1907 vnet_port_rm_txq(struct vnet_port *port) in vnet_port_rm_txq() argument
1909 port->vp->nports--; in vnet_port_rm_txq()
1910 netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); in vnet_port_rm_txq()
1916 struct vnet_port *port; in vnet_port_probe() local
1940 port = kzalloc(sizeof(*port), GFP_KERNEL); in vnet_port_probe()
1942 if (!port) in vnet_port_probe()
1946 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; in vnet_port_probe()
1948 port->vp = vp; in vnet_port_probe()
1950 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, in vnet_port_probe()
1956 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); in vnet_port_probe()
1960 netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); in vnet_port_probe()
1962 INIT_HLIST_NODE(&port->hash); in vnet_port_probe()
1963 INIT_LIST_HEAD(&port->list); in vnet_port_probe()
1968 port->switch_port = switch_port; in vnet_port_probe()
1969 port->tso = true; in vnet_port_probe()
1970 port->tsolen = 0; in vnet_port_probe()
1974 list_add_rcu(&port->list, &vp->port_list); in vnet_port_probe()
1976 list_add_tail_rcu(&port->list, &vp->port_list); in vnet_port_probe()
1977 hlist_add_head_rcu(&port->hash, in vnet_port_probe()
1978 &vp->port_hash[vnet_hashfn(port->raddr)]); in vnet_port_probe()
1979 vnet_port_add_txq(port); in vnet_port_probe()
1982 dev_set_drvdata(&vdev->dev, port); in vnet_port_probe()
1985 vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); in vnet_port_probe()
1987 setup_timer(&port->clean_timer, vnet_clean_timer_expire, in vnet_port_probe()
1988 (unsigned long)port); in vnet_port_probe()
1990 napi_enable(&port->napi); in vnet_port_probe()
1991 vio_port_up(&port->vio); in vnet_port_probe()
1998 kfree(port); in vnet_port_probe()
2007 struct vnet_port *port = dev_get_drvdata(&vdev->dev); in vnet_port_remove() local
2009 if (port) { in vnet_port_remove()
2011 del_timer_sync(&port->vio.timer); in vnet_port_remove()
2013 napi_disable(&port->napi); in vnet_port_remove()
2015 list_del_rcu(&port->list); in vnet_port_remove()
2016 hlist_del_rcu(&port->hash); in vnet_port_remove()
2019 del_timer_sync(&port->clean_timer); in vnet_port_remove()
2020 vnet_port_rm_txq(port); in vnet_port_remove()
2021 netif_napi_del(&port->napi); in vnet_port_remove()
2022 vnet_port_free_tx_bufs(port); in vnet_port_remove()
2023 vio_ldc_free(&port->vio); in vnet_port_remove()
2027 kfree(port); in vnet_port_remove()