Lines Matching refs:efx

200 static int efx_soft_enable_interrupts(struct efx_nic *efx);
201 static void efx_soft_disable_interrupts(struct efx_nic *efx);
203 static void efx_remove_channels(struct efx_nic *efx);
205 static void efx_remove_port(struct efx_nic *efx);
207 static void efx_fini_napi(struct efx_nic *efx);
209 static void efx_fini_struct(struct efx_nic *efx);
210 static void efx_start_all(struct efx_nic *efx);
211 static void efx_stop_all(struct efx_nic *efx);
213 #define EFX_ASSERT_RESET_SERIALISED(efx) \ argument
215 if ((efx->state == STATE_READY) || \
216 (efx->state == STATE_RECOVERY) || \
217 (efx->state == STATE_DISABLED)) \
221 static int efx_check_disabled(struct efx_nic *efx) in efx_check_disabled() argument
223 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { in efx_check_disabled()
224 netif_err(efx, drv, efx->net_dev, in efx_check_disabled()
272 struct efx_nic *efx = channel->efx; in efx_poll() local
278 netif_vdbg(efx, intr, efx->net_dev, in efx_poll()
286 efx->irq_rx_adaptive && in efx_poll()
292 efx->type->push_irq_moderation(channel); in efx_poll()
297 efx->irq_rx_moderation) { in efx_poll()
299 efx->type->push_irq_moderation(channel); in efx_poll()
328 struct efx_nic *efx = channel->efx; in efx_probe_eventq() local
331 netif_dbg(efx, probe, efx->net_dev, in efx_probe_eventq()
336 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); in efx_probe_eventq()
346 struct efx_nic *efx = channel->efx; in efx_init_eventq() local
351 netif_dbg(efx, drv, efx->net_dev, in efx_init_eventq()
356 efx->type->push_irq_moderation(channel); in efx_init_eventq()
366 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_start_eventq()
395 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
404 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
418 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) in efx_alloc_channel() argument
429 channel->efx = efx; in efx_alloc_channel()
435 tx_queue->efx = efx; in efx_alloc_channel()
441 rx_queue->efx = efx; in efx_alloc_channel()
491 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
524 struct efx_nic *efx = channel->efx; in efx_get_channel_name() local
529 if (efx->tx_channel_offset == 0) { in efx_get_channel_name()
531 } else if (channel->channel < efx->tx_channel_offset) { in efx_get_channel_name()
535 number -= efx->tx_channel_offset; in efx_get_channel_name()
537 snprintf(buf, len, "%s%s-%d", efx->name, type, number); in efx_get_channel_name()
540 static void efx_set_channel_names(struct efx_nic *efx) in efx_set_channel_names() argument
544 efx_for_each_channel(channel, efx) in efx_set_channel_names()
546 efx->msi_context[channel->channel].name, in efx_set_channel_names()
547 sizeof(efx->msi_context[0].name)); in efx_set_channel_names()
550 static int efx_probe_channels(struct efx_nic *efx) in efx_probe_channels() argument
556 efx->next_buffer_table = 0; in efx_probe_channels()
563 efx_for_each_channel_rev(channel, efx) { in efx_probe_channels()
566 netif_err(efx, probe, efx->net_dev, in efx_probe_channels()
572 efx_set_channel_names(efx); in efx_probe_channels()
577 efx_remove_channels(efx); in efx_probe_channels()
585 static void efx_start_datapath(struct efx_nic *efx) in efx_start_datapath() argument
587 bool old_rx_scatter = efx->rx_scatter; in efx_start_datapath()
597 efx->rx_dma_len = (efx->rx_prefix_size + in efx_start_datapath()
598 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + in efx_start_datapath()
599 efx->type->rx_buffer_padding); in efx_start_datapath()
601 efx->rx_ip_align + efx->rx_dma_len); in efx_start_datapath()
603 efx->rx_scatter = efx->type->always_rx_scatter; in efx_start_datapath()
604 efx->rx_buffer_order = 0; in efx_start_datapath()
605 } else if (efx->type->can_rx_scatter) { in efx_start_datapath()
611 efx->rx_scatter = true; in efx_start_datapath()
612 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; in efx_start_datapath()
613 efx->rx_buffer_order = 0; in efx_start_datapath()
615 efx->rx_scatter = false; in efx_start_datapath()
616 efx->rx_buffer_order = get_order(rx_buf_len); in efx_start_datapath()
619 efx_rx_config_page_split(efx); in efx_start_datapath()
620 if (efx->rx_buffer_order) in efx_start_datapath()
621 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
623 efx->rx_dma_len, efx->rx_buffer_order, in efx_start_datapath()
624 efx->rx_pages_per_batch); in efx_start_datapath()
626 netif_dbg(efx, drv, efx->net_dev, in efx_start_datapath()
628 efx->rx_dma_len, efx->rx_page_buf_step, in efx_start_datapath()
629 efx->rx_bufs_per_page, efx->rx_pages_per_batch); in efx_start_datapath()
632 if (efx->rx_scatter != old_rx_scatter) in efx_start_datapath()
633 efx->type->filter_update_rx_scatter(efx); in efx_start_datapath()
642 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); in efx_start_datapath()
643 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; in efx_start_datapath()
646 efx_for_each_channel(channel, efx) { in efx_start_datapath()
649 atomic_inc(&efx->active_queues); in efx_start_datapath()
654 atomic_inc(&efx->active_queues); in efx_start_datapath()
663 efx_ptp_start_datapath(efx); in efx_start_datapath()
665 if (netif_device_present(efx->net_dev)) in efx_start_datapath()
666 netif_tx_wake_all_queues(efx->net_dev); in efx_start_datapath()
669 static void efx_stop_datapath(struct efx_nic *efx) in efx_stop_datapath() argument
676 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_datapath()
677 BUG_ON(efx->port_enabled); in efx_stop_datapath()
679 efx_ptp_stop_datapath(efx); in efx_stop_datapath()
682 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
687 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
700 rc = efx->type->fini_dmaq(efx); in efx_stop_datapath()
701 if (rc && EFX_WORKAROUND_7803(efx)) { in efx_stop_datapath()
707 netif_err(efx, drv, efx->net_dev, in efx_stop_datapath()
709 efx_schedule_reset(efx, RESET_TYPE_ALL); in efx_stop_datapath()
711 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); in efx_stop_datapath()
713 netif_dbg(efx, drv, efx->net_dev, in efx_stop_datapath()
717 efx_for_each_channel(channel, efx) { in efx_stop_datapath()
730 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_channel()
741 static void efx_remove_channels(struct efx_nic *efx) in efx_remove_channels() argument
745 efx_for_each_channel(channel, efx) in efx_remove_channels()
750 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) in efx_realloc_channels() argument
757 rc = efx_check_disabled(efx); in efx_realloc_channels()
764 efx_for_each_channel(channel, efx) { in efx_realloc_channels()
783 efx_device_detach_sync(efx); in efx_realloc_channels()
784 efx_stop_all(efx); in efx_realloc_channels()
785 efx_soft_disable_interrupts(efx); in efx_realloc_channels()
789 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
790 channel = efx->channel[i]; in efx_realloc_channels()
801 old_rxq_entries = efx->rxq_entries; in efx_realloc_channels()
802 old_txq_entries = efx->txq_entries; in efx_realloc_channels()
803 efx->rxq_entries = rxq_entries; in efx_realloc_channels()
804 efx->txq_entries = txq_entries; in efx_realloc_channels()
805 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
806 channel = efx->channel[i]; in efx_realloc_channels()
807 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
812 efx->next_buffer_table = next_buffer_table; in efx_realloc_channels()
814 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
815 channel = efx->channel[i]; in efx_realloc_channels()
821 efx_init_napi_channel(efx->channel[i]); in efx_realloc_channels()
826 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
835 rc2 = efx_soft_enable_interrupts(efx); in efx_realloc_channels()
838 netif_err(efx, drv, efx->net_dev, in efx_realloc_channels()
840 efx_schedule_reset(efx, RESET_TYPE_DISABLE); in efx_realloc_channels()
842 efx_start_all(efx); in efx_realloc_channels()
843 netif_device_attach(efx->net_dev); in efx_realloc_channels()
849 efx->rxq_entries = old_rxq_entries; in efx_realloc_channels()
850 efx->txq_entries = old_txq_entries; in efx_realloc_channels()
851 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
852 channel = efx->channel[i]; in efx_realloc_channels()
853 efx->channel[i] = other_channel[i]; in efx_realloc_channels()
891 void efx_link_status_changed(struct efx_nic *efx) in efx_link_status_changed() argument
893 struct efx_link_state *link_state = &efx->link_state; in efx_link_status_changed()
899 if (!netif_running(efx->net_dev)) in efx_link_status_changed()
902 if (link_state->up != netif_carrier_ok(efx->net_dev)) { in efx_link_status_changed()
903 efx->n_link_state_changes++; in efx_link_status_changed()
906 netif_carrier_on(efx->net_dev); in efx_link_status_changed()
908 netif_carrier_off(efx->net_dev); in efx_link_status_changed()
913 netif_info(efx, link, efx->net_dev, in efx_link_status_changed()
916 efx->net_dev->mtu); in efx_link_status_changed()
918 netif_info(efx, link, efx->net_dev, "link down\n"); in efx_link_status_changed()
921 void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) in efx_link_set_advertising() argument
923 efx->link_advertising = advertising; in efx_link_set_advertising()
926 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); in efx_link_set_advertising()
928 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); in efx_link_set_advertising()
930 efx->wanted_fc ^= EFX_FC_TX; in efx_link_set_advertising()
934 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) in efx_link_set_wanted_fc() argument
936 efx->wanted_fc = wanted_fc; in efx_link_set_wanted_fc()
937 if (efx->link_advertising) { in efx_link_set_wanted_fc()
939 efx->link_advertising |= (ADVERTISED_Pause | in efx_link_set_wanted_fc()
942 efx->link_advertising &= ~(ADVERTISED_Pause | in efx_link_set_wanted_fc()
945 efx->link_advertising ^= ADVERTISED_Asym_Pause; in efx_link_set_wanted_fc()
949 static void efx_fini_port(struct efx_nic *efx);
958 int __efx_reconfigure_port(struct efx_nic *efx) in __efx_reconfigure_port() argument
963 WARN_ON(!mutex_is_locked(&efx->mac_lock)); in __efx_reconfigure_port()
966 phy_mode = efx->phy_mode; in __efx_reconfigure_port()
967 if (LOOPBACK_INTERNAL(efx)) in __efx_reconfigure_port()
968 efx->phy_mode |= PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
970 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; in __efx_reconfigure_port()
972 rc = efx->type->reconfigure_port(efx); in __efx_reconfigure_port()
975 efx->phy_mode = phy_mode; in __efx_reconfigure_port()
982 int efx_reconfigure_port(struct efx_nic *efx) in efx_reconfigure_port() argument
986 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reconfigure_port()
988 mutex_lock(&efx->mac_lock); in efx_reconfigure_port()
989 rc = __efx_reconfigure_port(efx); in efx_reconfigure_port()
990 mutex_unlock(&efx->mac_lock); in efx_reconfigure_port()
1000 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); in efx_mac_work() local
1002 mutex_lock(&efx->mac_lock); in efx_mac_work()
1003 if (efx->port_enabled) in efx_mac_work()
1004 efx->type->reconfigure_mac(efx); in efx_mac_work()
1005 mutex_unlock(&efx->mac_lock); in efx_mac_work()
1008 static int efx_probe_port(struct efx_nic *efx) in efx_probe_port() argument
1012 netif_dbg(efx, probe, efx->net_dev, "create port\n"); in efx_probe_port()
1015 efx->phy_mode = PHY_MODE_SPECIAL; in efx_probe_port()
1018 rc = efx->type->probe_port(efx); in efx_probe_port()
1023 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); in efx_probe_port()
1028 static int efx_init_port(struct efx_nic *efx) in efx_init_port() argument
1032 netif_dbg(efx, drv, efx->net_dev, "init port\n"); in efx_init_port()
1034 mutex_lock(&efx->mac_lock); in efx_init_port()
1036 rc = efx->phy_op->init(efx); in efx_init_port()
1040 efx->port_initialized = true; in efx_init_port()
1044 efx->type->reconfigure_mac(efx); in efx_init_port()
1047 rc = efx->phy_op->reconfigure(efx); in efx_init_port()
1051 mutex_unlock(&efx->mac_lock); in efx_init_port()
1055 efx->phy_op->fini(efx); in efx_init_port()
1057 mutex_unlock(&efx->mac_lock); in efx_init_port()
1061 static void efx_start_port(struct efx_nic *efx) in efx_start_port() argument
1063 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); in efx_start_port()
1064 BUG_ON(efx->port_enabled); in efx_start_port()
1066 mutex_lock(&efx->mac_lock); in efx_start_port()
1067 efx->port_enabled = true; in efx_start_port()
1070 efx->type->reconfigure_mac(efx); in efx_start_port()
1072 mutex_unlock(&efx->mac_lock); in efx_start_port()
1080 static void efx_stop_port(struct efx_nic *efx) in efx_stop_port() argument
1082 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); in efx_stop_port()
1084 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_port()
1086 mutex_lock(&efx->mac_lock); in efx_stop_port()
1087 efx->port_enabled = false; in efx_stop_port()
1088 mutex_unlock(&efx->mac_lock); in efx_stop_port()
1091 netif_addr_lock_bh(efx->net_dev); in efx_stop_port()
1092 netif_addr_unlock_bh(efx->net_dev); in efx_stop_port()
1094 cancel_delayed_work_sync(&efx->monitor_work); in efx_stop_port()
1095 efx_selftest_async_cancel(efx); in efx_stop_port()
1096 cancel_work_sync(&efx->mac_work); in efx_stop_port()
1099 static void efx_fini_port(struct efx_nic *efx) in efx_fini_port() argument
1101 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); in efx_fini_port()
1103 if (!efx->port_initialized) in efx_fini_port()
1106 efx->phy_op->fini(efx); in efx_fini_port()
1107 efx->port_initialized = false; in efx_fini_port()
1109 efx->link_state.up = false; in efx_fini_port()
1110 efx_link_status_changed(efx); in efx_fini_port()
1113 static void efx_remove_port(struct efx_nic *efx) in efx_remove_port() argument
1115 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); in efx_remove_port()
1117 efx->type->remove_port(efx); in efx_remove_port()
1136 static void efx_associate(struct efx_nic *efx) in efx_associate() argument
1140 if (efx->primary == efx) { in efx_associate()
1143 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); in efx_associate()
1144 list_add_tail(&efx->node, &efx_primary_list); in efx_associate()
1148 if (efx_same_controller(efx, other)) { in efx_associate()
1152 pci_name(efx->pci_dev), in efx_associate()
1153 efx->net_dev->name); in efx_associate()
1155 &efx->secondary_list); in efx_associate()
1156 other->primary = efx; in efx_associate()
1163 if (efx_same_controller(efx, other)) { in efx_associate()
1164 netif_dbg(efx, probe, efx->net_dev, in efx_associate()
1168 list_add_tail(&efx->node, in efx_associate()
1170 efx->primary = other; in efx_associate()
1175 netif_dbg(efx, probe, efx->net_dev, in efx_associate()
1177 list_add_tail(&efx->node, &efx_unassociated_list); in efx_associate()
1181 static void efx_dissociate(struct efx_nic *efx) in efx_dissociate() argument
1185 list_del(&efx->node); in efx_dissociate()
1186 efx->primary = NULL; in efx_dissociate()
1188 list_for_each_entry_safe(other, next, &efx->secondary_list, node) { in efx_dissociate()
1198 static int efx_init_io(struct efx_nic *efx) in efx_init_io() argument
1200 struct pci_dev *pci_dev = efx->pci_dev; in efx_init_io()
1201 dma_addr_t dma_mask = efx->type->max_dma_mask; in efx_init_io()
1202 unsigned int mem_map_size = efx->type->mem_map_size(efx); in efx_init_io()
1205 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); in efx_init_io()
1209 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1230 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1234 netif_dbg(efx, probe, efx->net_dev, in efx_init_io()
1237 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); in efx_init_io()
1240 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1245 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); in efx_init_io()
1246 if (!efx->membase) { in efx_init_io()
1247 netif_err(efx, probe, efx->net_dev, in efx_init_io()
1249 (unsigned long long)efx->membase_phys, mem_map_size); in efx_init_io()
1253 netif_dbg(efx, probe, efx->net_dev, in efx_init_io()
1255 (unsigned long long)efx->membase_phys, mem_map_size, in efx_init_io()
1256 efx->membase); in efx_init_io()
1261 pci_release_region(efx->pci_dev, EFX_MEM_BAR); in efx_init_io()
1263 efx->membase_phys = 0; in efx_init_io()
1265 pci_disable_device(efx->pci_dev); in efx_init_io()
1270 static void efx_fini_io(struct efx_nic *efx) in efx_fini_io() argument
1272 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); in efx_fini_io()
1274 if (efx->membase) { in efx_fini_io()
1275 iounmap(efx->membase); in efx_fini_io()
1276 efx->membase = NULL; in efx_fini_io()
1279 if (efx->membase_phys) { in efx_fini_io()
1280 pci_release_region(efx->pci_dev, EFX_MEM_BAR); in efx_fini_io()
1281 efx->membase_phys = 0; in efx_fini_io()
1284 pci_disable_device(efx->pci_dev); in efx_fini_io()
1287 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) in efx_wanted_parallelism() argument
1297 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
1317 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && in efx_wanted_parallelism()
1318 count > efx_vf_size(efx)) { in efx_wanted_parallelism()
1319 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
1323 count, efx_vf_size(efx)); in efx_wanted_parallelism()
1324 count = efx_vf_size(efx); in efx_wanted_parallelism()
1333 static int efx_probe_interrupts(struct efx_nic *efx) in efx_probe_interrupts() argument
1340 if (efx->extra_channel_type[i]) in efx_probe_interrupts()
1343 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { in efx_probe_interrupts()
1347 n_channels = efx_wanted_parallelism(efx); in efx_probe_interrupts()
1351 n_channels = min(n_channels, efx->max_channels); in efx_probe_interrupts()
1355 rc = pci_enable_msix_range(efx->pci_dev, in efx_probe_interrupts()
1359 efx->interrupt_mode = EFX_INT_MODE_MSI; in efx_probe_interrupts()
1360 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1363 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1366 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1372 efx->n_channels = n_channels; in efx_probe_interrupts()
1376 efx->n_tx_channels = max(n_channels / 2, 1U); in efx_probe_interrupts()
1377 efx->n_rx_channels = max(n_channels - in efx_probe_interrupts()
1378 efx->n_tx_channels, in efx_probe_interrupts()
1381 efx->n_tx_channels = n_channels; in efx_probe_interrupts()
1382 efx->n_rx_channels = n_channels; in efx_probe_interrupts()
1384 for (i = 0; i < efx->n_channels; i++) in efx_probe_interrupts()
1385 efx_get_channel(efx, i)->irq = in efx_probe_interrupts()
1391 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { in efx_probe_interrupts()
1392 efx->n_channels = 1; in efx_probe_interrupts()
1393 efx->n_rx_channels = 1; in efx_probe_interrupts()
1394 efx->n_tx_channels = 1; in efx_probe_interrupts()
1395 rc = pci_enable_msi(efx->pci_dev); in efx_probe_interrupts()
1397 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; in efx_probe_interrupts()
1399 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
1401 efx->interrupt_mode = EFX_INT_MODE_LEGACY; in efx_probe_interrupts()
1406 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { in efx_probe_interrupts()
1407 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); in efx_probe_interrupts()
1408 efx->n_rx_channels = 1; in efx_probe_interrupts()
1409 efx->n_tx_channels = 1; in efx_probe_interrupts()
1410 efx->legacy_irq = efx->pci_dev->irq; in efx_probe_interrupts()
1414 j = efx->n_channels; in efx_probe_interrupts()
1416 if (!efx->extra_channel_type[i]) in efx_probe_interrupts()
1418 if (efx->interrupt_mode != EFX_INT_MODE_MSIX || in efx_probe_interrupts()
1419 efx->n_channels <= extra_channels) { in efx_probe_interrupts()
1420 efx->extra_channel_type[i]->handle_no_channel(efx); in efx_probe_interrupts()
1423 efx_get_channel(efx, j)->type = in efx_probe_interrupts()
1424 efx->extra_channel_type[i]; in efx_probe_interrupts()
1430 efx->rss_spread = ((efx->n_rx_channels > 1 || in efx_probe_interrupts()
1431 !efx->type->sriov_wanted(efx)) ? in efx_probe_interrupts()
1432 efx->n_rx_channels : efx_vf_size(efx)); in efx_probe_interrupts()
1437 static int efx_soft_enable_interrupts(struct efx_nic *efx) in efx_soft_enable_interrupts() argument
1442 BUG_ON(efx->state == STATE_DISABLED); in efx_soft_enable_interrupts()
1444 efx->irq_soft_enabled = true; in efx_soft_enable_interrupts()
1447 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1456 efx_mcdi_mode_event(efx); in efx_soft_enable_interrupts()
1461 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1472 static void efx_soft_disable_interrupts(struct efx_nic *efx) in efx_soft_disable_interrupts() argument
1476 if (efx->state == STATE_DISABLED) in efx_soft_disable_interrupts()
1479 efx_mcdi_mode_poll(efx); in efx_soft_disable_interrupts()
1481 efx->irq_soft_enabled = false; in efx_soft_disable_interrupts()
1484 if (efx->legacy_irq) in efx_soft_disable_interrupts()
1485 synchronize_irq(efx->legacy_irq); in efx_soft_disable_interrupts()
1487 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1497 efx_mcdi_flush_async(efx); in efx_soft_disable_interrupts()
1500 static int efx_enable_interrupts(struct efx_nic *efx) in efx_enable_interrupts() argument
1505 BUG_ON(efx->state == STATE_DISABLED); in efx_enable_interrupts()
1507 if (efx->eeh_disabled_legacy_irq) { in efx_enable_interrupts()
1508 enable_irq(efx->legacy_irq); in efx_enable_interrupts()
1509 efx->eeh_disabled_legacy_irq = false; in efx_enable_interrupts()
1512 efx->type->irq_enable_master(efx); in efx_enable_interrupts()
1514 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1522 rc = efx_soft_enable_interrupts(efx); in efx_enable_interrupts()
1530 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1537 efx->type->irq_disable_non_ev(efx); in efx_enable_interrupts()
1542 static void efx_disable_interrupts(struct efx_nic *efx) in efx_disable_interrupts() argument
1546 efx_soft_disable_interrupts(efx); in efx_disable_interrupts()
1548 efx_for_each_channel(channel, efx) { in efx_disable_interrupts()
1553 efx->type->irq_disable_non_ev(efx); in efx_disable_interrupts()
1556 static void efx_remove_interrupts(struct efx_nic *efx) in efx_remove_interrupts() argument
1561 efx_for_each_channel(channel, efx) in efx_remove_interrupts()
1563 pci_disable_msi(efx->pci_dev); in efx_remove_interrupts()
1564 pci_disable_msix(efx->pci_dev); in efx_remove_interrupts()
1567 efx->legacy_irq = 0; in efx_remove_interrupts()
1570 static void efx_set_channels(struct efx_nic *efx) in efx_set_channels() argument
1575 efx->tx_channel_offset = in efx_set_channels()
1576 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; in efx_set_channels()
1582 efx_for_each_channel(channel, efx) { in efx_set_channels()
1583 if (channel->channel < efx->n_rx_channels) in efx_set_channels()
1589 tx_queue->queue -= (efx->tx_channel_offset * in efx_set_channels()
1594 static int efx_probe_nic(struct efx_nic *efx) in efx_probe_nic() argument
1599 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); in efx_probe_nic()
1602 rc = efx->type->probe(efx); in efx_probe_nic()
1608 rc = efx_probe_interrupts(efx); in efx_probe_nic()
1612 efx_set_channels(efx); in efx_probe_nic()
1614 rc = efx->type->dimension_resources(efx); in efx_probe_nic()
1618 if (efx->n_channels > 1) in efx_probe_nic()
1619 netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); in efx_probe_nic()
1620 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) in efx_probe_nic()
1621 efx->rx_indir_table[i] = in efx_probe_nic()
1622 ethtool_rxfh_indir_default(i, efx->rss_spread); in efx_probe_nic()
1624 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); in efx_probe_nic()
1625 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); in efx_probe_nic()
1628 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, in efx_probe_nic()
1634 efx_remove_interrupts(efx); in efx_probe_nic()
1636 efx->type->remove(efx); in efx_probe_nic()
1640 static void efx_remove_nic(struct efx_nic *efx) in efx_remove_nic() argument
1642 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); in efx_remove_nic()
1644 efx_remove_interrupts(efx); in efx_remove_nic()
1645 efx->type->remove(efx); in efx_remove_nic()
1648 static int efx_probe_filters(struct efx_nic *efx) in efx_probe_filters() argument
1652 spin_lock_init(&efx->filter_lock); in efx_probe_filters()
1654 rc = efx->type->filter_table_probe(efx); in efx_probe_filters()
1659 if (efx->type->offload_features & NETIF_F_NTUPLE) { in efx_probe_filters()
1660 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, in efx_probe_filters()
1661 sizeof(*efx->rps_flow_id), in efx_probe_filters()
1663 if (!efx->rps_flow_id) { in efx_probe_filters()
1664 efx->type->filter_table_remove(efx); in efx_probe_filters()
1673 static void efx_remove_filters(struct efx_nic *efx) in efx_remove_filters() argument
1676 kfree(efx->rps_flow_id); in efx_remove_filters()
1678 efx->type->filter_table_remove(efx); in efx_remove_filters()
1681 static void efx_restore_filters(struct efx_nic *efx) in efx_restore_filters() argument
1683 efx->type->filter_table_restore(efx); in efx_restore_filters()
1692 static int efx_probe_all(struct efx_nic *efx) in efx_probe_all() argument
1696 rc = efx_probe_nic(efx); in efx_probe_all()
1698 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); in efx_probe_all()
1702 rc = efx_probe_port(efx); in efx_probe_all()
1704 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); in efx_probe_all()
1709 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { in efx_probe_all()
1713 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; in efx_probe_all()
1715 rc = efx_probe_filters(efx); in efx_probe_all()
1717 netif_err(efx, probe, efx->net_dev, in efx_probe_all()
1722 rc = efx_probe_channels(efx); in efx_probe_all()
1729 efx_remove_filters(efx); in efx_probe_all()
1731 efx_remove_port(efx); in efx_probe_all()
1733 efx_remove_nic(efx); in efx_probe_all()
1745 static void efx_start_all(struct efx_nic *efx) in efx_start_all() argument
1747 EFX_ASSERT_RESET_SERIALISED(efx); in efx_start_all()
1748 BUG_ON(efx->state == STATE_DISABLED); in efx_start_all()
1752 if (efx->port_enabled || !netif_running(efx->net_dev) || in efx_start_all()
1753 efx->reset_pending) in efx_start_all()
1756 efx_start_port(efx); in efx_start_all()
1757 efx_start_datapath(efx); in efx_start_all()
1760 if (efx->type->monitor != NULL) in efx_start_all()
1761 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_start_all()
1767 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { in efx_start_all()
1768 mutex_lock(&efx->mac_lock); in efx_start_all()
1769 if (efx->phy_op->poll(efx)) in efx_start_all()
1770 efx_link_status_changed(efx); in efx_start_all()
1771 mutex_unlock(&efx->mac_lock); in efx_start_all()
1774 efx->type->start_stats(efx); in efx_start_all()
1775 efx->type->pull_stats(efx); in efx_start_all()
1776 spin_lock_bh(&efx->stats_lock); in efx_start_all()
1777 efx->type->update_stats(efx, NULL, NULL); in efx_start_all()
1778 spin_unlock_bh(&efx->stats_lock); in efx_start_all()
1786 static void efx_stop_all(struct efx_nic *efx) in efx_stop_all() argument
1788 EFX_ASSERT_RESET_SERIALISED(efx); in efx_stop_all()
1791 if (!efx->port_enabled) in efx_stop_all()
1797 efx->type->pull_stats(efx); in efx_stop_all()
1798 spin_lock_bh(&efx->stats_lock); in efx_stop_all()
1799 efx->type->update_stats(efx, NULL, NULL); in efx_stop_all()
1800 spin_unlock_bh(&efx->stats_lock); in efx_stop_all()
1801 efx->type->stop_stats(efx); in efx_stop_all()
1802 efx_stop_port(efx); in efx_stop_all()
1808 WARN_ON(netif_running(efx->net_dev) && in efx_stop_all()
1809 netif_device_present(efx->net_dev)); in efx_stop_all()
1810 netif_tx_disable(efx->net_dev); in efx_stop_all()
1812 efx_stop_datapath(efx); in efx_stop_all()
1815 static void efx_remove_all(struct efx_nic *efx) in efx_remove_all() argument
1817 efx_remove_channels(efx); in efx_remove_all()
1818 efx_remove_filters(efx); in efx_remove_all()
1819 efx_remove_port(efx); in efx_remove_all()
1820 efx_remove_nic(efx); in efx_remove_all()
1839 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, in efx_init_irq_moderation() argument
1844 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * in efx_init_irq_moderation()
1845 efx->timer_quantum_ns, in efx_init_irq_moderation()
1850 EFX_ASSERT_RESET_SERIALISED(efx); in efx_init_irq_moderation()
1855 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); in efx_init_irq_moderation()
1856 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); in efx_init_irq_moderation()
1858 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && in efx_init_irq_moderation()
1860 netif_err(efx, drv, efx->net_dev, "Channels are shared. " in efx_init_irq_moderation()
1865 efx->irq_rx_adaptive = rx_adaptive; in efx_init_irq_moderation()
1866 efx->irq_rx_moderation = rx_ticks; in efx_init_irq_moderation()
1867 efx_for_each_channel(channel, efx) { in efx_init_irq_moderation()
1877 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, in efx_get_irq_moderation() argument
1884 *rx_adaptive = efx->irq_rx_adaptive; in efx_get_irq_moderation()
1885 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * in efx_get_irq_moderation()
1886 efx->timer_quantum_ns, in efx_get_irq_moderation()
1893 if (efx->tx_channel_offset == 0) in efx_get_irq_moderation()
1897 efx->channel[efx->tx_channel_offset]->irq_moderation * in efx_get_irq_moderation()
1898 efx->timer_quantum_ns, in efx_get_irq_moderation()
1911 struct efx_nic *efx = container_of(data, struct efx_nic, in efx_monitor() local
1914 netif_vdbg(efx, timer, efx->net_dev, in efx_monitor()
1917 BUG_ON(efx->type->monitor == NULL); in efx_monitor()
1922 if (mutex_trylock(&efx->mac_lock)) { in efx_monitor()
1923 if (efx->port_enabled) in efx_monitor()
1924 efx->type->monitor(efx); in efx_monitor()
1925 mutex_unlock(&efx->mac_lock); in efx_monitor()
1928 queue_delayed_work(efx->workqueue, &efx->monitor_work, in efx_monitor()
1943 struct efx_nic *efx = netdev_priv(net_dev); in efx_ioctl() local
1947 return efx_ptp_set_ts_config(efx, ifr); in efx_ioctl()
1949 return efx_ptp_get_ts_config(efx, ifr); in efx_ioctl()
1956 return mdio_mii_ioctl(&efx->mdio, data, cmd); in efx_ioctl()
1967 struct efx_nic *efx = channel->efx; in efx_init_napi_channel() local
1969 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
1976 static void efx_init_napi(struct efx_nic *efx) in efx_init_napi() argument
1980 efx_for_each_channel(channel, efx) in efx_init_napi()
1993 static void efx_fini_napi(struct efx_nic *efx) in efx_fini_napi() argument
1997 efx_for_each_channel(channel, efx) in efx_fini_napi()
2015 struct efx_nic *efx = netdev_priv(net_dev); in efx_netpoll() local
2018 efx_for_each_channel(channel, efx) in efx_netpoll()
2029 struct efx_nic *efx = channel->efx; in efx_busy_poll() local
2033 if (!netif_running(efx->net_dev)) in efx_busy_poll()
2064 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_open() local
2067 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", in efx_net_open()
2070 rc = efx_check_disabled(efx); in efx_net_open()
2073 if (efx->phy_mode & PHY_MODE_SPECIAL) in efx_net_open()
2075 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) in efx_net_open()
2080 efx_link_status_changed(efx); in efx_net_open()
2082 efx_start_all(efx); in efx_net_open()
2083 efx_selftest_async_start(efx); in efx_net_open()
2093 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_stop() local
2095 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", in efx_net_stop()
2099 efx_stop_all(efx); in efx_net_stop()
2108 struct efx_nic *efx = netdev_priv(net_dev); in efx_net_stats() local
2110 spin_lock_bh(&efx->stats_lock); in efx_net_stats()
2111 efx->type->update_stats(efx, NULL, stats); in efx_net_stats()
2112 spin_unlock_bh(&efx->stats_lock); in efx_net_stats()
2120 struct efx_nic *efx = netdev_priv(net_dev); in efx_watchdog() local
2122 netif_err(efx, tx_err, efx->net_dev, in efx_watchdog()
2124 efx->port_enabled); in efx_watchdog()
2126 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); in efx_watchdog()
2133 struct efx_nic *efx = netdev_priv(net_dev); in efx_change_mtu() local
2136 rc = efx_check_disabled(efx); in efx_change_mtu()
2142 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); in efx_change_mtu()
2144 efx_device_detach_sync(efx); in efx_change_mtu()
2145 efx_stop_all(efx); in efx_change_mtu()
2147 mutex_lock(&efx->mac_lock); in efx_change_mtu()
2149 efx->type->reconfigure_mac(efx); in efx_change_mtu()
2150 mutex_unlock(&efx->mac_lock); in efx_change_mtu()
2152 efx_start_all(efx); in efx_change_mtu()
2153 netif_device_attach(efx->net_dev); in efx_change_mtu()
2159 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_mac_address() local
2164 netif_err(efx, drv, efx->net_dev, in efx_set_mac_address()
2171 efx->type->sriov_mac_address_changed(efx); in efx_set_mac_address()
2174 mutex_lock(&efx->mac_lock); in efx_set_mac_address()
2175 efx->type->reconfigure_mac(efx); in efx_set_mac_address()
2176 mutex_unlock(&efx->mac_lock); in efx_set_mac_address()
2184 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_rx_mode() local
2186 if (efx->port_enabled) in efx_set_rx_mode()
2187 queue_work(efx->workqueue, &efx->mac_work); in efx_set_rx_mode()
2193 struct efx_nic *efx = netdev_priv(net_dev); in efx_set_features() local
2197 return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); in efx_set_features()
2255 static void efx_update_name(struct efx_nic *efx) in efx_update_name() argument
2257 strcpy(efx->name, efx->net_dev->name); in efx_update_name()
2258 efx_mtd_rename(efx); in efx_update_name()
2259 efx_set_channel_names(efx); in efx_update_name()
2282 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); in show_phy_type() local
2283 return sprintf(buf, "%d\n", efx->phy_type); in show_phy_type()
2287 static int efx_register_netdev(struct efx_nic *efx) in efx_register_netdev() argument
2289 struct net_device *net_dev = efx->net_dev; in efx_register_netdev()
2294 net_dev->irq = efx->pci_dev->irq; in efx_register_netdev()
2295 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { in efx_register_netdev()
2310 efx->state = STATE_READY; in efx_register_netdev()
2312 if (efx->reset_pending) { in efx_register_netdev()
2313 netif_err(efx, probe, efx->net_dev, in efx_register_netdev()
2322 efx_update_name(efx); in efx_register_netdev()
2331 efx_for_each_channel(channel, efx) { in efx_register_netdev()
2337 efx_associate(efx); in efx_register_netdev()
2341 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); in efx_register_netdev()
2343 netif_err(efx, drv, efx->net_dev, in efx_register_netdev()
2352 efx_dissociate(efx); in efx_register_netdev()
2355 efx->state = STATE_UNINIT; in efx_register_netdev()
2357 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); in efx_register_netdev()
2361 static void efx_unregister_netdev(struct efx_nic *efx) in efx_unregister_netdev() argument
2363 if (!efx->net_dev) in efx_unregister_netdev()
2366 BUG_ON(netdev_priv(efx->net_dev) != efx); in efx_unregister_netdev()
2368 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); in efx_unregister_netdev()
2369 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); in efx_unregister_netdev()
2372 unregister_netdevice(efx->net_dev); in efx_unregister_netdev()
2373 efx->state = STATE_UNINIT; in efx_unregister_netdev()
2385 void efx_reset_down(struct efx_nic *efx, enum reset_type method) in efx_reset_down() argument
2387 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reset_down()
2390 efx->type->prepare_flr(efx); in efx_reset_down()
2392 efx_stop_all(efx); in efx_reset_down()
2393 efx_disable_interrupts(efx); in efx_reset_down()
2395 mutex_lock(&efx->mac_lock); in efx_reset_down()
2396 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) in efx_reset_down()
2397 efx->phy_op->fini(efx); in efx_reset_down()
2398 efx->type->fini(efx); in efx_reset_down()
2406 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) in efx_reset_up() argument
2410 EFX_ASSERT_RESET_SERIALISED(efx); in efx_reset_up()
2413 efx->type->finish_flr(efx); in efx_reset_up()
2416 rc = efx->type->init(efx); in efx_reset_up()
2418 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); in efx_reset_up()
2425 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { in efx_reset_up()
2426 rc = efx->phy_op->init(efx); in efx_reset_up()
2429 if (efx->phy_op->reconfigure(efx)) in efx_reset_up()
2430 netif_err(efx, drv, efx->net_dev, in efx_reset_up()
2434 rc = efx_enable_interrupts(efx); in efx_reset_up()
2437 efx_restore_filters(efx); in efx_reset_up()
2438 efx->type->sriov_reset(efx); in efx_reset_up()
2440 mutex_unlock(&efx->mac_lock); in efx_reset_up()
2442 efx_start_all(efx); in efx_reset_up()
2447 efx->port_initialized = false; in efx_reset_up()
2449 mutex_unlock(&efx->mac_lock); in efx_reset_up()
2459 int efx_reset(struct efx_nic *efx, enum reset_type method) in efx_reset() argument
2464 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", in efx_reset()
2467 efx_device_detach_sync(efx); in efx_reset()
2468 efx_reset_down(efx, method); in efx_reset()
2470 rc = efx->type->reset(efx, method); in efx_reset()
2472 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); in efx_reset()
2480 efx->reset_pending &= -(1 << (method + 1)); in efx_reset()
2482 __clear_bit(method, &efx->reset_pending); in efx_reset()
2488 pci_set_master(efx->pci_dev); in efx_reset()
2495 rc2 = efx_reset_up(efx, method, !disabled); in efx_reset()
2503 dev_close(efx->net_dev); in efx_reset()
2504 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); in efx_reset()
2505 efx->state = STATE_DISABLED; in efx_reset()
2507 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); in efx_reset()
2508 netif_device_attach(efx->net_dev); in efx_reset()
2518 int efx_try_recovery(struct efx_nic *efx) in efx_try_recovery() argument
2526 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); in efx_try_recovery()
2537 static void efx_wait_for_bist_end(struct efx_nic *efx) in efx_wait_for_bist_end() argument
2542 if (efx_mcdi_poll_reboot(efx)) in efx_wait_for_bist_end()
2547 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); in efx_wait_for_bist_end()
2552 efx->mc_bist_for_other_fn = false; in efx_wait_for_bist_end()
2560 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); in efx_reset_work() local
2564 pending = ACCESS_ONCE(efx->reset_pending); in efx_reset_work()
2568 efx_wait_for_bist_end(efx); in efx_reset_work()
2572 efx_try_recovery(efx)) in efx_reset_work()
2584 if (efx->state == STATE_READY) in efx_reset_work()
2585 (void)efx_reset(efx, method); in efx_reset_work()
2590 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) in efx_schedule_reset() argument
2594 if (efx->state == STATE_RECOVERY) { in efx_schedule_reset()
2595 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
2611 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", in efx_schedule_reset()
2615 method = efx->type->map_reset_reason(type); in efx_schedule_reset()
2616 netif_dbg(efx, drv, efx->net_dev, in efx_schedule_reset()
2622 set_bit(method, &efx->reset_pending); in efx_schedule_reset()
2628 if (ACCESS_ONCE(efx->state) != STATE_READY) in efx_schedule_reset()
2633 efx_mcdi_mode_poll(efx); in efx_schedule_reset()
2635 queue_work(reset_workqueue, &efx->reset_work); in efx_schedule_reset()
2672 int efx_port_dummy_op_int(struct efx_nic *efx) in efx_port_dummy_op_int() argument
2676 void efx_port_dummy_op_void(struct efx_nic *efx) {} in efx_port_dummy_op_void() argument
2678 static bool efx_port_dummy_op_poll(struct efx_nic *efx) in efx_port_dummy_op_poll() argument
2699 static int efx_init_struct(struct efx_nic *efx, in efx_init_struct() argument
2705 INIT_LIST_HEAD(&efx->node); in efx_init_struct()
2706 INIT_LIST_HEAD(&efx->secondary_list); in efx_init_struct()
2707 spin_lock_init(&efx->biu_lock); in efx_init_struct()
2709 INIT_LIST_HEAD(&efx->mtd_list); in efx_init_struct()
2711 INIT_WORK(&efx->reset_work, efx_reset_work); in efx_init_struct()
2712 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); in efx_init_struct()
2713 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); in efx_init_struct()
2714 efx->pci_dev = pci_dev; in efx_init_struct()
2715 efx->msg_enable = debug; in efx_init_struct()
2716 efx->state = STATE_UNINIT; in efx_init_struct()
2717 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); in efx_init_struct()
2719 efx->net_dev = net_dev; in efx_init_struct()
2720 efx->rx_prefix_size = efx->type->rx_prefix_size; in efx_init_struct()
2721 efx->rx_ip_align = in efx_init_struct()
2722 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; in efx_init_struct()
2723 efx->rx_packet_hash_offset = in efx_init_struct()
2724 efx->type->rx_hash_offset - efx->type->rx_prefix_size; in efx_init_struct()
2725 efx->rx_packet_ts_offset = in efx_init_struct()
2726 efx->type->rx_ts_offset - efx->type->rx_prefix_size; in efx_init_struct()
2727 spin_lock_init(&efx->stats_lock); in efx_init_struct()
2728 mutex_init(&efx->mac_lock); in efx_init_struct()
2729 efx->phy_op = &efx_dummy_phy_operations; in efx_init_struct()
2730 efx->mdio.dev = net_dev; in efx_init_struct()
2731 INIT_WORK(&efx->mac_work, efx_mac_work); in efx_init_struct()
2732 init_waitqueue_head(&efx->flush_wq); in efx_init_struct()
2735 efx->channel[i] = efx_alloc_channel(efx, i, NULL); in efx_init_struct()
2736 if (!efx->channel[i]) in efx_init_struct()
2738 efx->msi_context[i].efx = efx; in efx_init_struct()
2739 efx->msi_context[i].index = i; in efx_init_struct()
2743 efx->interrupt_mode = max(efx->type->max_interrupt_mode, in efx_init_struct()
2747 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", in efx_init_struct()
2749 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); in efx_init_struct()
2750 if (!efx->workqueue) in efx_init_struct()
2756 efx_fini_struct(efx); in efx_init_struct()
2760 static void efx_fini_struct(struct efx_nic *efx) in efx_fini_struct() argument
2765 kfree(efx->channel[i]); in efx_fini_struct()
2767 kfree(efx->vpd_sn); in efx_fini_struct()
2769 if (efx->workqueue) { in efx_fini_struct()
2770 destroy_workqueue(efx->workqueue); in efx_fini_struct()
2771 efx->workqueue = NULL; in efx_fini_struct()
2775 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) in efx_update_sw_stats() argument
2780 efx_for_each_channel(channel, efx) in efx_update_sw_stats()
2783 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); in efx_update_sw_stats()
2795 static void efx_pci_remove_main(struct efx_nic *efx) in efx_pci_remove_main() argument
2800 BUG_ON(efx->state == STATE_READY); in efx_pci_remove_main()
2801 cancel_work_sync(&efx->reset_work); in efx_pci_remove_main()
2803 efx_disable_interrupts(efx); in efx_pci_remove_main()
2804 efx_nic_fini_interrupt(efx); in efx_pci_remove_main()
2805 efx_fini_port(efx); in efx_pci_remove_main()
2806 efx->type->fini(efx); in efx_pci_remove_main()
2807 efx_fini_napi(efx); in efx_pci_remove_main()
2808 efx_remove_all(efx); in efx_pci_remove_main()
2816 struct efx_nic *efx; in efx_pci_remove() local
2818 efx = pci_get_drvdata(pci_dev); in efx_pci_remove()
2819 if (!efx) in efx_pci_remove()
2824 efx_dissociate(efx); in efx_pci_remove()
2825 dev_close(efx->net_dev); in efx_pci_remove()
2826 efx_disable_interrupts(efx); in efx_pci_remove()
2829 efx->type->sriov_fini(efx); in efx_pci_remove()
2830 efx_unregister_netdev(efx); in efx_pci_remove()
2832 efx_mtd_remove(efx); in efx_pci_remove()
2834 efx_pci_remove_main(efx); in efx_pci_remove()
2836 efx_fini_io(efx); in efx_pci_remove()
2837 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); in efx_pci_remove()
2839 efx_fini_struct(efx); in efx_pci_remove()
2840 free_netdev(efx->net_dev); in efx_pci_remove()
2851 static void efx_probe_vpd_strings(struct efx_nic *efx) in efx_probe_vpd_strings() argument
2853 struct pci_dev *dev = efx->pci_dev; in efx_probe_vpd_strings()
2861 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); in efx_probe_vpd_strings()
2868 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); in efx_probe_vpd_strings()
2881 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); in efx_probe_vpd_strings()
2888 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); in efx_probe_vpd_strings()
2892 netif_info(efx, drv, efx->net_dev, in efx_probe_vpd_strings()
2899 netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); in efx_probe_vpd_strings()
2906 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); in efx_probe_vpd_strings()
2910 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); in efx_probe_vpd_strings()
2911 if (!efx->vpd_sn) in efx_probe_vpd_strings()
2914 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); in efx_probe_vpd_strings()
2921 static int efx_pci_probe_main(struct efx_nic *efx) in efx_pci_probe_main() argument
2926 rc = efx_probe_all(efx); in efx_pci_probe_main()
2930 efx_init_napi(efx); in efx_pci_probe_main()
2932 rc = efx->type->init(efx); in efx_pci_probe_main()
2934 netif_err(efx, probe, efx->net_dev, in efx_pci_probe_main()
2939 rc = efx_init_port(efx); in efx_pci_probe_main()
2941 netif_err(efx, probe, efx->net_dev, in efx_pci_probe_main()
2946 rc = efx_nic_init_interrupt(efx); in efx_pci_probe_main()
2949 rc = efx_enable_interrupts(efx); in efx_pci_probe_main()
2956 efx_nic_fini_interrupt(efx); in efx_pci_probe_main()
2958 efx_fini_port(efx); in efx_pci_probe_main()
2960 efx->type->fini(efx); in efx_pci_probe_main()
2962 efx_fini_napi(efx); in efx_pci_probe_main()
2963 efx_remove_all(efx); in efx_pci_probe_main()
2981 struct efx_nic *efx; in efx_pci_probe() local
2985 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, in efx_pci_probe()
2989 efx = netdev_priv(net_dev); in efx_pci_probe()
2990 efx->type = (const struct efx_nic_type *) entry->driver_data; in efx_pci_probe()
2991 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | in efx_pci_probe()
2994 if (efx->type->offload_features & NETIF_F_V6_CSUM) in efx_pci_probe()
3002 pci_set_drvdata(pci_dev, efx); in efx_pci_probe()
3004 rc = efx_init_struct(efx, pci_dev, net_dev); in efx_pci_probe()
3008 netif_info(efx, probe, efx->net_dev, in efx_pci_probe()
3011 efx_probe_vpd_strings(efx); in efx_pci_probe()
3014 rc = efx_init_io(efx); in efx_pci_probe()
3018 rc = efx_pci_probe_main(efx); in efx_pci_probe()
3022 rc = efx_register_netdev(efx); in efx_pci_probe()
3026 rc = efx->type->sriov_init(efx); in efx_pci_probe()
3028 netif_err(efx, probe, efx->net_dev, in efx_pci_probe()
3031 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); in efx_pci_probe()
3035 rc = efx_mtd_probe(efx); in efx_pci_probe()
3038 netif_warn(efx, probe, efx->net_dev, in efx_pci_probe()
3043 netif_warn(efx, probe, efx->net_dev, in efx_pci_probe()
3049 efx_pci_remove_main(efx); in efx_pci_probe()
3051 efx_fini_io(efx); in efx_pci_probe()
3053 efx_fini_struct(efx); in efx_pci_probe()
3056 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); in efx_pci_probe()
3063 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); in efx_pm_freeze() local
3067 if (efx->state != STATE_DISABLED) { in efx_pm_freeze()
3068 efx->state = STATE_UNINIT; in efx_pm_freeze()
3070 efx_device_detach_sync(efx); in efx_pm_freeze()
3072 efx_stop_all(efx); in efx_pm_freeze()
3073 efx_disable_interrupts(efx); in efx_pm_freeze()
3084 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); in efx_pm_thaw() local
3088 if (efx->state != STATE_DISABLED) { in efx_pm_thaw()
3089 rc = efx_enable_interrupts(efx); in efx_pm_thaw()
3093 mutex_lock(&efx->mac_lock); in efx_pm_thaw()
3094 efx->phy_op->reconfigure(efx); in efx_pm_thaw()
3095 mutex_unlock(&efx->mac_lock); in efx_pm_thaw()
3097 efx_start_all(efx); in efx_pm_thaw()
3099 netif_device_attach(efx->net_dev); in efx_pm_thaw()
3101 efx->state = STATE_READY; in efx_pm_thaw()
3103 efx->type->resume_wol(efx); in efx_pm_thaw()
3109 queue_work(reset_workqueue, &efx->reset_work); in efx_pm_thaw()
3122 struct efx_nic *efx = pci_get_drvdata(pci_dev); in efx_pm_poweroff() local
3124 efx->type->fini(efx); in efx_pm_poweroff()
3126 efx->reset_pending = 0; in efx_pm_poweroff()
3136 struct efx_nic *efx = pci_get_drvdata(pci_dev); in efx_pm_resume() local
3146 pci_set_master(efx->pci_dev); in efx_pm_resume()
3147 rc = efx->type->reset(efx, RESET_TYPE_ALL); in efx_pm_resume()
3150 rc = efx->type->init(efx); in efx_pm_resume()
3185 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_error_detected() local
3192 if (efx->state != STATE_DISABLED) { in efx_io_error_detected()
3193 efx->state = STATE_RECOVERY; in efx_io_error_detected()
3194 efx->reset_pending = 0; in efx_io_error_detected()
3196 efx_device_detach_sync(efx); in efx_io_error_detected()
3198 efx_stop_all(efx); in efx_io_error_detected()
3199 efx_disable_interrupts(efx); in efx_io_error_detected()
3219 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_slot_reset() local
3224 netif_err(efx, hw, efx->net_dev, in efx_io_slot_reset()
3231 netif_err(efx, hw, efx->net_dev, in efx_io_slot_reset()
3242 struct efx_nic *efx = pci_get_drvdata(pdev); in efx_io_resume() local
3247 if (efx->state == STATE_DISABLED) in efx_io_resume()
3250 rc = efx_reset(efx, RESET_TYPE_ALL); in efx_io_resume()
3252 netif_err(efx, hw, efx->net_dev, in efx_io_resume()
3255 efx->state = STATE_READY; in efx_io_resume()
3256 netif_dbg(efx, hw, efx->net_dev, in efx_io_resume()