Lines Matching refs:h

589 static int is_valid_clean_head(struct hnae_ring *ring, int h)  in is_valid_clean_head()  argument
594 if (unlikely(h > ring->desc_num)) in is_valid_clean_head()
599 assert(u != c && h != c); /* must be checked before call this func */ in is_valid_clean_head()
601 return u > c ? (h > c && h <= u) : (h > c || h <= u); in is_valid_clean_head()
742 struct hnae_handle *h = priv->ae_handle; in hns_nic_adjust_link() local
744 h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); in hns_nic_adjust_link()
753 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) in hns_nic_init_phy() argument
758 if (!h->phy_node) in hns_nic_init_phy()
761 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) in hns_nic_init_phy()
762 phy_dev = of_phy_connect(ndev, h->phy_node, in hns_nic_init_phy()
763 hns_nic_adjust_link, 0, h->phy_if); in hns_nic_init_phy()
765 phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if); in hns_nic_init_phy()
770 phy_dev->supported &= h->if_support; in hns_nic_init_phy()
773 if (h->phy_if == PHY_INTERFACE_MODE_XGMII) in hns_nic_init_phy()
784 struct hnae_handle *h = priv->ae_handle; in hns_nic_ring_open() local
789 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0); in hns_nic_ring_open()
797 struct hnae_handle *h = priv->ae_handle; in hns_nic_net_set_mac_address() local
804 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data); in hns_nic_net_set_mac_address()
818 struct hnae_handle *h = priv->ae_handle; in hns_nic_update_stats() local
820 h->dev->ops->update_stats(h, &netdev->stats); in hns_nic_update_stats()
843 struct hnae_handle *h = priv->ae_handle; in hns_nic_ring_close() local
845 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1); in hns_nic_ring_close()
853 struct hnae_handle *h = priv->ae_handle; in hns_nic_init_irq() local
860 for (i = 0; i < h->q_num * 2; i++) { in hns_nic_init_irq()
868 (i < h->q_num ? "tx" : "rx"), rd->queue_index); in hns_nic_init_irq()
897 struct hnae_handle *h = priv->ae_handle; in hns_nic_net_up() local
907 for (i = 0; i < h->q_num * 2; i++) { in hns_nic_net_up()
913 for (k = 0; k < h->q_num; k++) in hns_nic_net_up()
914 h->dev->ops->toggle_queue_status(h->qs[k], 1); in hns_nic_net_up()
916 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); in hns_nic_net_up()
920 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; in hns_nic_net_up()
935 for (k = 0; k < h->q_num; k++) in hns_nic_net_up()
936 h->dev->ops->toggle_queue_status(h->qs[k], 0); in hns_nic_net_up()
1010 struct hnae_handle *h = priv->ae_handle; in hns_nic_net_open() local
1019 ret = netif_set_real_num_tx_queues(ndev, h->q_num); in hns_nic_net_open()
1026 ret = netif_set_real_num_rx_queues(ndev, h->q_num); in hns_nic_net_open()
1108 struct hnae_handle *h = priv->ae_handle; in hns_nic_change_mtu() local
1115 if (!h->dev->ops->set_mtu) in hns_nic_change_mtu()
1122 ret = h->dev->ops->set_mtu(h, new_mtu); in hns_nic_change_mtu()
1130 ret = h->dev->ops->set_mtu(h, new_mtu); in hns_nic_change_mtu()
1149 struct hnae_handle *h = priv->ae_handle; in hns_set_multicast_list() local
1152 if (!h) { in hns_set_multicast_list()
1157 if (h->dev->ops->set_mc_addr) { in hns_set_multicast_list()
1159 if (h->dev->ops->set_mc_addr(h, ha->addr)) in hns_set_multicast_list()
1167 struct hnae_handle *h = priv->ae_handle; in hns_nic_set_rx_mode() local
1169 if (h->dev->ops->set_promisc_mode) { in hns_nic_set_rx_mode()
1171 h->dev->ops->set_promisc_mode(h, 1); in hns_nic_set_rx_mode()
1173 h->dev->ops->set_promisc_mode(h, 0); in hns_nic_set_rx_mode()
1188 struct hnae_handle *h = priv->ae_handle; in hns_nic_get_stats64() local
1190 for (idx = 0; idx < h->q_num; idx++) { in hns_nic_get_stats64()
1191 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; in hns_nic_get_stats64()
1192 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; in hns_nic_get_stats64()
1193 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; in hns_nic_get_stats64()
1194 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; in hns_nic_get_stats64()
1245 struct hnae_handle *h = priv->ae_handle; in hns_nic_update_link_status() local
1254 state = state && h->dev->ops->get_status(h); in hns_nic_update_link_status()
1272 struct hnae_handle *h = priv->ae_handle; in hns_nic_dump() local
1273 struct hnae_ae_ops *ops = h->dev->ops; in hns_nic_dump()
1290 for (i = 0; i < h->q_num; i++) { in hns_nic_dump()
1292 i, h->qs[i]->tx_ring.next_to_clean); in hns_nic_dump()
1294 i, h->qs[i]->tx_ring.next_to_use); in hns_nic_dump()
1296 i, h->qs[i]->rx_ring.next_to_clean); in hns_nic_dump()
1298 i, h->qs[i]->rx_ring.next_to_use); in hns_nic_dump()
1343 struct hnae_handle *h = priv->ae_handle; in hns_nic_service_task() local
1346 h->dev->ops->update_led_status(h); in hns_nic_service_task()
1389 struct hnae_handle *h = priv->ae_handle; in hns_nic_init_ring_data() local
1393 if (h->q_num > NIC_MAX_Q_PER_VF) { in hns_nic_init_ring_data()
1394 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num); in hns_nic_init_ring_data()
1398 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2, in hns_nic_init_ring_data()
1403 for (i = 0; i < h->q_num; i++) { in hns_nic_init_ring_data()
1406 rd->ring = &h->qs[i]->tx_ring; in hns_nic_init_ring_data()
1414 for (i = h->q_num; i < h->q_num * 2; i++) { in hns_nic_init_ring_data()
1416 rd->queue_index = i - h->q_num; in hns_nic_init_ring_data()
1417 rd->ring = &h->qs[i - h->q_num]->rx_ring; in hns_nic_init_ring_data()
1432 struct hnae_handle *h = priv->ae_handle; in hns_nic_uninit_ring_data() local
1435 for (i = 0; i < h->q_num * 2; i++) { in hns_nic_uninit_ring_data()
1452 struct hnae_handle *h; in hns_nic_try_get_ae() local
1455 h = hnae_get_handle(&priv->netdev->dev, in hns_nic_try_get_ae()
1457 if (IS_ERR_OR_NULL(h)) { in hns_nic_try_get_ae()
1458 ret = PTR_ERR(h); in hns_nic_try_get_ae()
1462 priv->ae_handle = h; in hns_nic_try_get_ae()
1464 ret = hns_nic_init_phy(ndev, h); in hns_nic_try_get_ae()