Lines Matching refs:pdata

133 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)  in xgbe_alloc_channels()  argument
140 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); in xgbe_alloc_channels()
146 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), in xgbe_alloc_channels()
151 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), in xgbe_alloc_channels()
158 channel->pdata = pdata; in xgbe_alloc_channels()
160 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + in xgbe_alloc_channels()
163 if (pdata->per_channel_irq) { in xgbe_alloc_channels()
165 ret = platform_get_irq(pdata->pdev, i + 1); in xgbe_alloc_channels()
167 netdev_err(pdata->netdev, in xgbe_alloc_channels()
176 if (i < pdata->tx_ring_count) { in xgbe_alloc_channels()
181 if (i < pdata->rx_ring_count) { in xgbe_alloc_channels()
186 netif_dbg(pdata, drv, pdata->netdev, in xgbe_alloc_channels()
192 pdata->channel = channel_mem; in xgbe_alloc_channels()
193 pdata->channel_count = count; in xgbe_alloc_channels()
210 static void xgbe_free_channels(struct xgbe_prv_data *pdata) in xgbe_free_channels() argument
212 if (!pdata->channel) in xgbe_free_channels()
215 kfree(pdata->channel->rx_ring); in xgbe_free_channels()
216 kfree(pdata->channel->tx_ring); in xgbe_free_channels()
217 kfree(pdata->channel); in xgbe_free_channels()
219 pdata->channel = NULL; in xgbe_free_channels()
220 pdata->channel_count = 0; in xgbe_free_channels()
236 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_maybe_stop_tx_queue() local
239 netif_info(pdata, drv, pdata->netdev, in xgbe_maybe_stop_tx_queue()
241 netif_stop_subqueue(pdata->netdev, channel->queue_index); in xgbe_maybe_stop_tx_queue()
248 pdata->hw_if.tx_start_xmit(channel, ring); in xgbe_maybe_stop_tx_queue()
274 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) in xgbe_enable_rx_tx_ints() argument
276 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_enable_rx_tx_ints()
281 channel = pdata->channel; in xgbe_enable_rx_tx_ints()
282 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_enable_rx_tx_ints()
296 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) in xgbe_disable_rx_tx_ints() argument
298 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_disable_rx_tx_ints()
303 channel = pdata->channel; in xgbe_disable_rx_tx_ints()
304 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_disable_rx_tx_ints()
320 struct xgbe_prv_data *pdata = data; in xgbe_isr() local
321 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_isr()
331 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); in xgbe_isr()
335 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); in xgbe_isr()
337 for (i = 0; i < pdata->channel_count; i++) { in xgbe_isr()
341 channel = pdata->channel + i; in xgbe_isr()
344 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", in xgbe_isr()
351 if (!pdata->per_channel_irq && in xgbe_isr()
354 if (napi_schedule_prep(&pdata->napi)) { in xgbe_isr()
356 xgbe_disable_rx_tx_ints(pdata); in xgbe_isr()
359 __napi_schedule(&pdata->napi); in xgbe_isr()
364 pdata->ext_stats.rx_buffer_unavailable++; in xgbe_isr()
368 schedule_work(&pdata->restart_work); in xgbe_isr()
375 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); in xgbe_isr()
378 hw_if->tx_mmc_int(pdata); in xgbe_isr()
381 hw_if->rx_mmc_int(pdata); in xgbe_isr()
384 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); in xgbe_isr()
388 pdata->tx_tstamp = in xgbe_isr()
389 hw_if->get_tx_tstamp(pdata); in xgbe_isr()
390 queue_work(pdata->dev_workqueue, in xgbe_isr()
391 &pdata->tx_tstamp_work); in xgbe_isr()
421 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_timer() local
426 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_tx_timer()
430 if (pdata->per_channel_irq) in xgbe_tx_timer()
433 xgbe_disable_rx_tx_ints(pdata); in xgbe_tx_timer()
446 struct xgbe_prv_data *pdata = container_of(work, in xgbe_service() local
450 pdata->phy_if.phy_status(pdata); in xgbe_service()
455 struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; in xgbe_service_timer() local
457 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_service_timer()
459 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_service_timer()
462 static void xgbe_init_timers(struct xgbe_prv_data *pdata) in xgbe_init_timers() argument
467 setup_timer(&pdata->service_timer, xgbe_service_timer, in xgbe_init_timers()
468 (unsigned long)pdata); in xgbe_init_timers()
470 channel = pdata->channel; in xgbe_init_timers()
471 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_init_timers()
480 static void xgbe_start_timers(struct xgbe_prv_data *pdata) in xgbe_start_timers() argument
482 mod_timer(&pdata->service_timer, jiffies + HZ); in xgbe_start_timers()
485 static void xgbe_stop_timers(struct xgbe_prv_data *pdata) in xgbe_stop_timers() argument
490 del_timer_sync(&pdata->service_timer); in xgbe_stop_timers()
492 channel = pdata->channel; in xgbe_stop_timers()
493 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_stop_timers()
501 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) in xgbe_get_all_hw_features() argument
504 struct xgbe_hw_features *hw_feat = &pdata->hw_feat; in xgbe_get_all_hw_features()
508 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); in xgbe_get_all_hw_features()
509 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); in xgbe_get_all_hw_features()
510 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); in xgbe_get_all_hw_features()
514 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); in xgbe_get_all_hw_features()
601 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) in xgbe_napi_enable() argument
606 if (pdata->per_channel_irq) { in xgbe_napi_enable()
607 channel = pdata->channel; in xgbe_napi_enable()
608 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_napi_enable()
610 netif_napi_add(pdata->netdev, &channel->napi, in xgbe_napi_enable()
617 netif_napi_add(pdata->netdev, &pdata->napi, in xgbe_napi_enable()
620 napi_enable(&pdata->napi); in xgbe_napi_enable()
624 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) in xgbe_napi_disable() argument
629 if (pdata->per_channel_irq) { in xgbe_napi_disable()
630 channel = pdata->channel; in xgbe_napi_disable()
631 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_napi_disable()
638 napi_disable(&pdata->napi); in xgbe_napi_disable()
641 netif_napi_del(&pdata->napi); in xgbe_napi_disable()
645 static int xgbe_request_irqs(struct xgbe_prv_data *pdata) in xgbe_request_irqs() argument
648 struct net_device *netdev = pdata->netdev; in xgbe_request_irqs()
652 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, in xgbe_request_irqs()
653 netdev->name, pdata); in xgbe_request_irqs()
656 pdata->dev_irq); in xgbe_request_irqs()
660 if (!pdata->per_channel_irq) in xgbe_request_irqs()
663 channel = pdata->channel; in xgbe_request_irqs()
664 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_request_irqs()
670 ret = devm_request_irq(pdata->dev, channel->dma_irq, in xgbe_request_irqs()
684 for (i--, channel--; i < pdata->channel_count; i--, channel--) in xgbe_request_irqs()
685 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_request_irqs()
687 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_request_irqs()
692 static void xgbe_free_irqs(struct xgbe_prv_data *pdata) in xgbe_free_irqs() argument
697 devm_free_irq(pdata->dev, pdata->dev_irq, pdata); in xgbe_free_irqs()
699 if (!pdata->per_channel_irq) in xgbe_free_irqs()
702 channel = pdata->channel; in xgbe_free_irqs()
703 for (i = 0; i < pdata->channel_count; i++, channel++) in xgbe_free_irqs()
704 devm_free_irq(pdata->dev, channel->dma_irq, channel); in xgbe_free_irqs()
707 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) in xgbe_init_tx_coalesce() argument
709 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_tx_coalesce()
713 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; in xgbe_init_tx_coalesce()
714 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; in xgbe_init_tx_coalesce()
716 hw_if->config_tx_coalesce(pdata); in xgbe_init_tx_coalesce()
721 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) in xgbe_init_rx_coalesce() argument
723 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_init_rx_coalesce()
727 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); in xgbe_init_rx_coalesce()
728 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; in xgbe_init_rx_coalesce()
729 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; in xgbe_init_rx_coalesce()
731 hw_if->config_rx_coalesce(pdata); in xgbe_init_rx_coalesce()
736 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) in xgbe_free_tx_data() argument
738 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_tx_data()
746 channel = pdata->channel; in xgbe_free_tx_data()
747 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_free_tx_data()
754 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_tx_data()
761 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) in xgbe_free_rx_data() argument
763 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_free_rx_data()
771 channel = pdata->channel; in xgbe_free_rx_data()
772 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_free_rx_data()
779 desc_if->unmap_rdata(pdata, rdata); in xgbe_free_rx_data()
786 static int xgbe_phy_init(struct xgbe_prv_data *pdata) in xgbe_phy_init() argument
788 pdata->phy_link = -1; in xgbe_phy_init()
789 pdata->phy_speed = SPEED_UNKNOWN; in xgbe_phy_init()
791 return pdata->phy_if.phy_reset(pdata); in xgbe_phy_init()
796 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_powerdown() local
797 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerdown()
803 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { in xgbe_powerdown()
809 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerdown()
816 xgbe_stop_timers(pdata); in xgbe_powerdown()
817 flush_workqueue(pdata->dev_workqueue); in xgbe_powerdown()
819 hw_if->powerdown_tx(pdata); in xgbe_powerdown()
820 hw_if->powerdown_rx(pdata); in xgbe_powerdown()
822 xgbe_napi_disable(pdata, 0); in xgbe_powerdown()
824 pdata->power_down = 1; in xgbe_powerdown()
826 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerdown()
835 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_powerup() local
836 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_powerup()
842 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { in xgbe_powerup()
848 spin_lock_irqsave(&pdata->lock, flags); in xgbe_powerup()
850 pdata->power_down = 0; in xgbe_powerup()
852 xgbe_napi_enable(pdata, 0); in xgbe_powerup()
854 hw_if->powerup_tx(pdata); in xgbe_powerup()
855 hw_if->powerup_rx(pdata); in xgbe_powerup()
862 xgbe_start_timers(pdata); in xgbe_powerup()
864 spin_unlock_irqrestore(&pdata->lock, flags); in xgbe_powerup()
871 static int xgbe_start(struct xgbe_prv_data *pdata) in xgbe_start() argument
873 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_start()
874 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_start()
875 struct net_device *netdev = pdata->netdev; in xgbe_start()
880 hw_if->init(pdata); in xgbe_start()
882 ret = phy_if->phy_start(pdata); in xgbe_start()
886 xgbe_napi_enable(pdata, 1); in xgbe_start()
888 ret = xgbe_request_irqs(pdata); in xgbe_start()
892 hw_if->enable_tx(pdata); in xgbe_start()
893 hw_if->enable_rx(pdata); in xgbe_start()
897 xgbe_start_timers(pdata); in xgbe_start()
898 queue_work(pdata->dev_workqueue, &pdata->service_work); in xgbe_start()
905 xgbe_napi_disable(pdata, 1); in xgbe_start()
907 phy_if->phy_stop(pdata); in xgbe_start()
910 hw_if->exit(pdata); in xgbe_start()
915 static void xgbe_stop(struct xgbe_prv_data *pdata) in xgbe_stop() argument
917 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_stop()
918 struct xgbe_phy_if *phy_if = &pdata->phy_if; in xgbe_stop()
920 struct net_device *netdev = pdata->netdev; in xgbe_stop()
928 xgbe_stop_timers(pdata); in xgbe_stop()
929 flush_workqueue(pdata->dev_workqueue); in xgbe_stop()
931 hw_if->disable_tx(pdata); in xgbe_stop()
932 hw_if->disable_rx(pdata); in xgbe_stop()
934 xgbe_free_irqs(pdata); in xgbe_stop()
936 xgbe_napi_disable(pdata, 1); in xgbe_stop()
938 phy_if->phy_stop(pdata); in xgbe_stop()
940 hw_if->exit(pdata); in xgbe_stop()
942 channel = pdata->channel; in xgbe_stop()
943 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_stop()
954 static void xgbe_restart_dev(struct xgbe_prv_data *pdata) in xgbe_restart_dev() argument
959 if (!netif_running(pdata->netdev)) in xgbe_restart_dev()
962 xgbe_stop(pdata); in xgbe_restart_dev()
964 xgbe_free_tx_data(pdata); in xgbe_restart_dev()
965 xgbe_free_rx_data(pdata); in xgbe_restart_dev()
967 xgbe_start(pdata); in xgbe_restart_dev()
974 struct xgbe_prv_data *pdata = container_of(work, in xgbe_restart() local
980 xgbe_restart_dev(pdata); in xgbe_restart()
987 struct xgbe_prv_data *pdata = container_of(work, in xgbe_tx_tstamp() local
994 if (pdata->tx_tstamp) { in xgbe_tx_tstamp()
995 nsec = timecounter_cyc2time(&pdata->tstamp_tc, in xgbe_tx_tstamp()
996 pdata->tx_tstamp); in xgbe_tx_tstamp()
1000 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); in xgbe_tx_tstamp()
1003 dev_kfree_skb_any(pdata->tx_tstamp_skb); in xgbe_tx_tstamp()
1005 spin_lock_irqsave(&pdata->tstamp_lock, flags); in xgbe_tx_tstamp()
1006 pdata->tx_tstamp_skb = NULL; in xgbe_tx_tstamp()
1007 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); in xgbe_tx_tstamp()
1010 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, in xgbe_get_hwtstamp_settings() argument
1013 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, in xgbe_get_hwtstamp_settings()
1014 sizeof(pdata->tstamp_config))) in xgbe_get_hwtstamp_settings()
1020 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, in xgbe_set_hwtstamp_settings() argument
1146 pdata->hw_if.config_tstamp(pdata, mac_tscr); in xgbe_set_hwtstamp_settings()
1148 memcpy(&pdata->tstamp_config, &config, sizeof(config)); in xgbe_set_hwtstamp_settings()
1153 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, in xgbe_prep_tx_tstamp() argument
1160 spin_lock_irqsave(&pdata->tstamp_lock, flags); in xgbe_prep_tx_tstamp()
1161 if (pdata->tx_tstamp_skb) { in xgbe_prep_tx_tstamp()
1166 pdata->tx_tstamp_skb = skb_get(skb); in xgbe_prep_tx_tstamp()
1169 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); in xgbe_prep_tx_tstamp()
1225 static void xgbe_packet_info(struct xgbe_prv_data *pdata, in xgbe_packet_info() argument
1274 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) in xgbe_packet_info()
1294 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_open() local
1295 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_open()
1301 ret = xgbe_phy_init(pdata); in xgbe_open()
1306 ret = clk_prepare_enable(pdata->sysclk); in xgbe_open()
1312 ret = clk_prepare_enable(pdata->ptpclk); in xgbe_open()
1322 pdata->rx_buf_size = ret; in xgbe_open()
1325 ret = xgbe_alloc_channels(pdata); in xgbe_open()
1330 ret = desc_if->alloc_ring_resources(pdata); in xgbe_open()
1334 INIT_WORK(&pdata->service_work, xgbe_service); in xgbe_open()
1335 INIT_WORK(&pdata->restart_work, xgbe_restart); in xgbe_open()
1336 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); in xgbe_open()
1337 xgbe_init_timers(pdata); in xgbe_open()
1339 ret = xgbe_start(pdata); in xgbe_open()
1343 clear_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_open()
1350 desc_if->free_ring_resources(pdata); in xgbe_open()
1353 xgbe_free_channels(pdata); in xgbe_open()
1356 clk_disable_unprepare(pdata->ptpclk); in xgbe_open()
1359 clk_disable_unprepare(pdata->sysclk); in xgbe_open()
1366 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_close() local
1367 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_close()
1372 xgbe_stop(pdata); in xgbe_close()
1375 desc_if->free_ring_resources(pdata); in xgbe_close()
1378 xgbe_free_channels(pdata); in xgbe_close()
1381 clk_disable_unprepare(pdata->ptpclk); in xgbe_close()
1382 clk_disable_unprepare(pdata->sysclk); in xgbe_close()
1384 set_bit(XGBE_DOWN, &pdata->dev_state); in xgbe_close()
1393 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_xmit() local
1394 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_xmit()
1395 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_xmit()
1404 channel = pdata->channel + skb->queue_mapping; in xgbe_xmit()
1412 netif_err(pdata, tx_err, netdev, in xgbe_xmit()
1420 xgbe_packet_info(pdata, ring, skb, packet); in xgbe_xmit()
1429 netif_err(pdata, tx_err, netdev, in xgbe_xmit()
1441 xgbe_prep_tx_tstamp(pdata, skb, packet); in xgbe_xmit()
1449 if (netif_msg_pktdata(pdata)) in xgbe_xmit()
1463 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_set_rx_mode() local
1464 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_rx_mode()
1468 hw_if->config_rx_mode(pdata); in xgbe_set_rx_mode()
1475 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_set_mac_address() local
1476 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_mac_address()
1486 hw_if->set_mac_address(pdata, netdev->dev_addr); in xgbe_set_mac_address()
1495 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_ioctl() local
1500 ret = xgbe_get_hwtstamp_settings(pdata, ifreq); in xgbe_ioctl()
1504 ret = xgbe_set_hwtstamp_settings(pdata, ifreq); in xgbe_ioctl()
1516 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_change_mtu() local
1525 pdata->rx_buf_size = ret; in xgbe_change_mtu()
1528 xgbe_restart_dev(pdata); in xgbe_change_mtu()
1537 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_tx_timeout() local
1540 schedule_work(&pdata->restart_work); in xgbe_tx_timeout()
1546 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_get_stats64() local
1547 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; in xgbe_get_stats64()
1551 pdata->hw_if.read_mmc_stats(pdata); in xgbe_get_stats64()
1577 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_vlan_rx_add_vid() local
1578 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_add_vid()
1582 set_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_add_vid()
1583 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_add_vid()
1593 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_vlan_rx_kill_vid() local
1594 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_vlan_rx_kill_vid()
1598 clear_bit(vid, pdata->active_vlans); in xgbe_vlan_rx_kill_vid()
1599 hw_if->update_vlan_hash_table(pdata); in xgbe_vlan_rx_kill_vid()
1609 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_poll_controller() local
1615 if (pdata->per_channel_irq) { in xgbe_poll_controller()
1616 channel = pdata->channel; in xgbe_poll_controller()
1617 for (i = 0; i < pdata->channel_count; i++, channel++) in xgbe_poll_controller()
1620 disable_irq(pdata->dev_irq); in xgbe_poll_controller()
1621 xgbe_isr(pdata->dev_irq, pdata); in xgbe_poll_controller()
1622 enable_irq(pdata->dev_irq); in xgbe_poll_controller()
1631 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_setup_tc() local
1635 if (tc && (tc != pdata->hw_feat.tc_cnt)) in xgbe_setup_tc()
1641 while ((queue < pdata->tx_q_count) && in xgbe_setup_tc()
1642 (pdata->q2tc_map[queue] == i)) in xgbe_setup_tc()
1645 netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n", in xgbe_setup_tc()
1660 struct xgbe_prv_data *pdata = netdev_priv(netdev); in xgbe_set_features() local
1661 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_set_features()
1665 rxhash = pdata->netdev_features & NETIF_F_RXHASH; in xgbe_set_features()
1666 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; in xgbe_set_features()
1667 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; in xgbe_set_features()
1668 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; in xgbe_set_features()
1671 ret = hw_if->enable_rss(pdata); in xgbe_set_features()
1673 ret = hw_if->disable_rss(pdata); in xgbe_set_features()
1678 hw_if->enable_rx_csum(pdata); in xgbe_set_features()
1680 hw_if->disable_rx_csum(pdata); in xgbe_set_features()
1683 hw_if->enable_rx_vlan_stripping(pdata); in xgbe_set_features()
1685 hw_if->disable_rx_vlan_stripping(pdata); in xgbe_set_features()
1688 hw_if->enable_rx_vlan_filtering(pdata); in xgbe_set_features()
1690 hw_if->disable_rx_vlan_filtering(pdata); in xgbe_set_features()
1692 pdata->netdev_features = features; in xgbe_set_features()
1726 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_refresh() local
1727 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_refresh()
1728 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_rx_refresh()
1736 desc_if->unmap_rdata(pdata, rdata); in xgbe_rx_refresh()
1738 if (desc_if->map_rx_buffer(pdata, ring, rdata)) in xgbe_rx_refresh()
1741 hw_if->rx_desc_reset(pdata, rdata, ring->dirty); in xgbe_rx_refresh()
1756 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, in xgbe_create_skb() argument
1772 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, in xgbe_create_skb()
1786 dma_sync_single_range_for_cpu(pdata->dev, in xgbe_create_skb()
1804 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_poll() local
1805 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_tx_poll()
1806 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_tx_poll()
1810 struct net_device *netdev = pdata->netdev; in xgbe_tx_poll()
1841 if (netif_msg_tx_done(pdata)) in xgbe_tx_poll()
1842 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); in xgbe_tx_poll()
1850 desc_if->unmap_rdata(pdata, rdata); in xgbe_tx_poll()
1875 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_poll() local
1876 struct xgbe_hw_if *hw_if = &pdata->hw_if; in xgbe_rx_poll()
1880 struct net_device *netdev = pdata->netdev; in xgbe_rx_poll()
1898 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; in xgbe_rx_poll()
1945 netif_err(pdata, rx_err, netdev, in xgbe_rx_poll()
1957 skb = xgbe_create_skb(pdata, napi, rdata, in xgbe_rx_poll()
1962 dma_sync_single_range_for_cpu(pdata->dev, in xgbe_rx_poll()
1990 netif_err(pdata, rx_err, netdev, in xgbe_rx_poll()
1996 if (netif_msg_pktdata(pdata)) in xgbe_rx_poll()
2013 nsec = timecounter_cyc2time(&pdata->tstamp_tc, in xgbe_rx_poll()
2079 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, in xgbe_all_poll() local
2089 ring_budget = budget / pdata->rx_ring_count; in xgbe_all_poll()
2093 channel = pdata->channel; in xgbe_all_poll()
2094 for (i = 0; i < pdata->channel_count; i++, channel++) { in xgbe_all_poll()
2111 xgbe_enable_rx_tx_ints(pdata); in xgbe_all_poll()
2119 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, in xgbe_dump_tx_desc() argument
2128 netdev_dbg(pdata->netdev, in xgbe_dump_tx_desc()
2139 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, in xgbe_dump_rx_desc() argument
2147 netdev_dbg(pdata->netdev, in xgbe_dump_rx_desc()