This source file includes following definitions.
- netif_is_ixgbe
- ixgbe_read_pci_cfg_word_parent
- ixgbe_get_parent_bus_info
- ixgbe_pcie_from_parent
- ixgbe_check_minimum_link
- ixgbe_service_event_schedule
- ixgbe_remove_adapter
- ixgbe_check_remove
- ixgbe_read_reg
- ixgbe_check_cfg_remove
- ixgbe_read_pci_cfg_word
- ixgbe_read_pci_cfg_dword
- ixgbe_write_pci_cfg_word
- ixgbe_service_event_complete
- ixgbe_regdump
- ixgbe_print_buffer
- ixgbe_dump
- ixgbe_release_hw_control
- ixgbe_get_hw_control
- ixgbe_set_ivar
- ixgbe_irq_rearm_queues
- ixgbe_update_xoff_rx_lfc
- ixgbe_update_xoff_received
- ixgbe_get_tx_completed
- ixgbe_get_tx_pending
- ixgbe_check_tx_hang
- ixgbe_tx_timeout_reset
- ixgbe_tx_maxrate
- ixgbe_clean_tx_irq
- ixgbe_update_tx_dca
- ixgbe_update_rx_dca
- ixgbe_update_dca
- ixgbe_setup_dca
- __ixgbe_notify_dca
- ixgbe_rx_hash
- ixgbe_rx_is_fcoe
- ixgbe_rx_checksum
- ixgbe_rx_offset
- ixgbe_alloc_mapped_page
- ixgbe_alloc_rx_buffers
- ixgbe_set_rsc_gso_size
- ixgbe_update_rsc_stats
- ixgbe_process_skb_fields
- ixgbe_rx_skb
- ixgbe_is_non_eop
- ixgbe_pull_tail
- ixgbe_dma_sync_frag
- ixgbe_cleanup_headers
- ixgbe_reuse_rx_page
- ixgbe_page_is_reserved
- ixgbe_can_reuse_rx_page
- ixgbe_add_rx_frag
- ixgbe_get_rx_buffer
- ixgbe_put_rx_buffer
- ixgbe_construct_skb
- ixgbe_build_skb
- ixgbe_run_xdp
- ixgbe_rx_buffer_flip
- ixgbe_clean_rx_irq
- ixgbe_configure_msix
- ixgbe_update_itr
- ixgbe_write_eitr
- ixgbe_set_itr
- ixgbe_check_overtemp_subtask
- ixgbe_check_fan_failure
- ixgbe_check_overtemp_event
- ixgbe_is_sfp
- ixgbe_check_sfp_event
- ixgbe_check_lsc
- ixgbe_irq_enable_queues
- ixgbe_irq_disable_queues
- ixgbe_irq_enable
- ixgbe_msix_other
- ixgbe_msix_clean_rings
- ixgbe_poll
- ixgbe_request_msix_irqs
- ixgbe_intr
- ixgbe_request_irq
- ixgbe_free_irq
- ixgbe_irq_disable
- ixgbe_configure_msi_and_legacy
- ixgbe_configure_tx_ring
- ixgbe_setup_mtqc
- ixgbe_configure_tx
- ixgbe_enable_rx_drop
- ixgbe_disable_rx_drop
- ixgbe_set_rx_drop_en
- ixgbe_configure_srrctl
- ixgbe_rss_indir_tbl_entries
- ixgbe_store_key
- ixgbe_init_rss_key
- ixgbe_store_reta
- ixgbe_store_vfreta
- ixgbe_setup_reta
- ixgbe_setup_vfreta
- ixgbe_setup_mrqc
- ixgbe_configure_rscctl
- ixgbe_rx_desc_queue_enable
- ixgbe_configure_rx_ring
- ixgbe_setup_psrtype
- ixgbe_configure_virtualization
- ixgbe_set_rx_buffer_len
- ixgbe_setup_rdrxctl
- ixgbe_configure_rx
- ixgbe_vlan_rx_add_vid
- ixgbe_find_vlvf_entry
- ixgbe_update_pf_promisc_vlvf
- ixgbe_vlan_rx_kill_vid
- ixgbe_vlan_strip_disable
- ixgbe_vlan_strip_enable
- ixgbe_vlan_promisc_enable
- ixgbe_scrub_vfta
- ixgbe_vlan_promisc_disable
- ixgbe_restore_vlan
- ixgbe_write_mc_addr_list
- ixgbe_full_sync_mac_table
- ixgbe_sync_mac_table
- ixgbe_flush_sw_mac_table
- ixgbe_available_rars
- ixgbe_mac_set_default_filter
- ixgbe_add_mac_filter
- ixgbe_del_mac_filter
- ixgbe_uc_sync
- ixgbe_uc_unsync
- ixgbe_set_rx_mode
- ixgbe_napi_enable_all
- ixgbe_napi_disable_all
- ixgbe_clear_udp_tunnel_port
- ixgbe_configure_dcb
- ixgbe_hpbthresh
- ixgbe_lpbthresh
- ixgbe_pbthresh_setup
- ixgbe_configure_pb
- ixgbe_fdir_filter_restore
- ixgbe_clean_rx_ring
- ixgbe_fwd_ring_up
- ixgbe_macvlan_up
- ixgbe_configure_dfwd
- ixgbe_configure
- ixgbe_sfp_link_config
- ixgbe_non_sfp_link_config
- ixgbe_setup_gpie
- ixgbe_up_complete
- ixgbe_reinit_locked
- ixgbe_up
- ixgbe_get_completion_timeout
- ixgbe_disable_rx
- ixgbe_disable_tx
- ixgbe_reset
- ixgbe_clean_tx_ring
- ixgbe_clean_all_rx_rings
- ixgbe_clean_all_tx_rings
- ixgbe_fdir_filter_exit
- ixgbe_down
- ixgbe_set_eee_capable
- ixgbe_tx_timeout
- ixgbe_init_dcb
- ixgbe_sw_init
- ixgbe_setup_tx_resources
- ixgbe_setup_all_tx_resources
- ixgbe_setup_rx_resources
- ixgbe_setup_all_rx_resources
- ixgbe_free_tx_resources
- ixgbe_free_all_tx_resources
- ixgbe_free_rx_resources
- ixgbe_free_all_rx_resources
- ixgbe_change_mtu
- ixgbe_open
- ixgbe_close_suspend
- ixgbe_close
- ixgbe_resume
- __ixgbe_shutdown
- ixgbe_suspend
- ixgbe_shutdown
- ixgbe_update_stats
- ixgbe_fdir_reinit_subtask
- ixgbe_check_hang_subtask
- ixgbe_watchdog_update_link
- ixgbe_update_default_up
- ixgbe_watchdog_link_is_up
- ixgbe_watchdog_link_is_down
- ixgbe_ring_tx_pending
- ixgbe_vf_tx_pending
- ixgbe_watchdog_flush_tx
- ixgbe_check_for_bad_vf
- ixgbe_spoof_check
- ixgbe_spoof_check
- ixgbe_check_for_bad_vf
- ixgbe_watchdog_subtask
- ixgbe_sfp_detection_subtask
- ixgbe_sfp_link_config_subtask
- ixgbe_service_timer
- ixgbe_phy_interrupt_subtask
- ixgbe_reset_subtask
- ixgbe_check_fw_error
- ixgbe_service_task
- ixgbe_tso
- ixgbe_ipv6_csum_is_sctp
- ixgbe_tx_csum
- ixgbe_tx_cmd_type
- ixgbe_tx_olinfo_status
- __ixgbe_maybe_stop_tx
- ixgbe_maybe_stop_tx
- ixgbe_tx_map
- ixgbe_atr
- ixgbe_select_queue
- ixgbe_xmit_xdp_ring
- ixgbe_xmit_frame_ring
- __ixgbe_xmit_frame
- ixgbe_xmit_frame
- ixgbe_set_mac
- ixgbe_mdio_read
- ixgbe_mdio_write
- ixgbe_ioctl
- ixgbe_add_sanmac_netdev
- ixgbe_del_sanmac_netdev
- ixgbe_get_ring_stats64
- ixgbe_get_stats64
- ixgbe_validate_rtr
- ixgbe_set_prio_tc_map
- ixgbe_reassign_macvlan_pool
- ixgbe_defrag_macvlan_pools
- ixgbe_setup_tc
- ixgbe_delete_clsu32
- ixgbe_configure_clsu32_add_hnode
- ixgbe_configure_clsu32_del_hnode
- get_macvlan_queue
- handle_redirect_action
- parse_tc_actions
- parse_tc_actions
- ixgbe_clsu32_build_input
- ixgbe_configure_clsu32
- ixgbe_setup_tc_cls_u32
- ixgbe_setup_tc_block_cb
- ixgbe_setup_tc_mqprio
- __ixgbe_setup_tc
- ixgbe_sriov_reinit
- ixgbe_do_reset
- ixgbe_fix_features
- ixgbe_reset_l2fw_offload
- ixgbe_set_features
- ixgbe_add_udp_tunnel_port
- ixgbe_del_udp_tunnel_port
- ixgbe_ndo_fdb_add
- ixgbe_configure_bridge_mode
- ixgbe_ndo_bridge_setlink
- ixgbe_ndo_bridge_getlink
- ixgbe_fwd_add
- ixgbe_fwd_del
- ixgbe_features_check
- ixgbe_xdp_setup
- ixgbe_xdp
- ixgbe_xdp_ring_update_tail
- ixgbe_xdp_xmit
- ixgbe_disable_txr_hw
- ixgbe_disable_txr
- ixgbe_disable_rxr_hw
- ixgbe_reset_txr_stats
- ixgbe_reset_rxr_stats
- ixgbe_txrx_ring_disable
- ixgbe_txrx_ring_enable
- ixgbe_enumerate_functions
- ixgbe_wol_supported
- ixgbe_set_fw_version
- ixgbe_probe
- ixgbe_remove
- ixgbe_io_error_detected
- ixgbe_io_slot_reset
- ixgbe_io_resume
- ixgbe_init_module
- ixgbe_exit_module
- ixgbe_notify_dca
1
2
3
4 #include <linux/types.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/vmalloc.h>
9 #include <linux/string.h>
10 #include <linux/in.h>
11 #include <linux/interrupt.h>
12 #include <linux/ip.h>
13 #include <linux/tcp.h>
14 #include <linux/sctp.h>
15 #include <linux/pkt_sched.h>
16 #include <linux/ipv6.h>
17 #include <linux/slab.h>
18 #include <net/checksum.h>
19 #include <net/ip6_checksum.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_macvlan.h>
25 #include <linux/if_bridge.h>
26 #include <linux/prefetch.h>
27 #include <linux/bpf.h>
28 #include <linux/bpf_trace.h>
29 #include <linux/atomic.h>
30 #include <linux/numa.h>
31 #include <scsi/fc/fc_fcoe.h>
32 #include <net/udp_tunnel.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/vxlan.h>
37 #include <net/mpls.h>
38 #include <net/xdp_sock.h>
39 #include <net/xfrm.h>
40
41 #include "ixgbe.h"
42 #include "ixgbe_common.h"
43 #include "ixgbe_dcb_82599.h"
44 #include "ixgbe_phy.h"
45 #include "ixgbe_sriov.h"
46 #include "ixgbe_model.h"
47 #include "ixgbe_txrx_common.h"
48
49 char ixgbe_driver_name[] = "ixgbe";
50 static const char ixgbe_driver_string[] =
51 "Intel(R) 10 Gigabit PCI Express Network Driver";
52 #ifdef IXGBE_FCOE
53 char ixgbe_default_device_descr[] =
54 "Intel(R) 10 Gigabit Network Connection";
55 #else
56 static char ixgbe_default_device_descr[] =
57 "Intel(R) 10 Gigabit Network Connection";
58 #endif
59 #define DRV_VERSION "5.1.0-k"
60 const char ixgbe_driver_version[] = DRV_VERSION;
61 static const char ixgbe_copyright[] =
62 "Copyright (c) 1999-2016 Intel Corporation.";
63
64 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
65
66 static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 [board_82598] = &ixgbe_82598_info,
68 [board_82599] = &ixgbe_82599_info,
69 [board_X540] = &ixgbe_X540_info,
70 [board_X550] = &ixgbe_X550_info,
71 [board_X550EM_x] = &ixgbe_X550EM_x_info,
72 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
73 [board_x550em_a] = &ixgbe_x550em_a_info,
74 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
75 };
76
77
78
79
80
81
82
83
84
85 static const struct pci_device_id ixgbe_pci_tbl[] = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
133
134 {0, }
135 };
136 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
137
138 #ifdef CONFIG_IXGBE_DCA
139 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
140 void *p);
141 static struct notifier_block dca_notifier = {
142 .notifier_call = ixgbe_notify_dca,
143 .next = NULL,
144 .priority = 0
145 };
146 #endif
147
148 #ifdef CONFIG_PCI_IOV
149 static unsigned int max_vfs;
150 module_param(max_vfs, uint, 0);
151 MODULE_PARM_DESC(max_vfs,
152 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
153 #endif
154
155 static unsigned int allow_unsupported_sfp;
156 module_param(allow_unsupported_sfp, uint, 0);
157 MODULE_PARM_DESC(allow_unsupported_sfp,
158 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
159
160 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
161 static int debug = -1;
162 module_param(debug, int, 0);
163 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
164
165 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
166 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
167 MODULE_LICENSE("GPL v2");
168 MODULE_VERSION(DRV_VERSION);
169
170 static struct workqueue_struct *ixgbe_wq;
171
172 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
173 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
174
175 static const struct net_device_ops ixgbe_netdev_ops;
176
177 static bool netif_is_ixgbe(struct net_device *dev)
178 {
179 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
180 }
181
182 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
183 u32 reg, u16 *value)
184 {
185 struct pci_dev *parent_dev;
186 struct pci_bus *parent_bus;
187
188 parent_bus = adapter->pdev->bus->parent;
189 if (!parent_bus)
190 return -1;
191
192 parent_dev = parent_bus->self;
193 if (!parent_dev)
194 return -1;
195
196 if (!pci_is_pcie(parent_dev))
197 return -1;
198
199 pcie_capability_read_word(parent_dev, reg, value);
200 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
201 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
202 return -1;
203 return 0;
204 }
205
206 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
207 {
208 struct ixgbe_hw *hw = &adapter->hw;
209 u16 link_status = 0;
210 int err;
211
212 hw->bus.type = ixgbe_bus_type_pci_express;
213
214
215
216
217 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
218
219
220 if (err)
221 return err;
222
223 hw->bus.width = ixgbe_convert_bus_width(link_status);
224 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
225
226 return 0;
227 }
228
229
230
231
232
233
234
235
236
237
238 static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
239 {
240 switch (hw->device_id) {
241 case IXGBE_DEV_ID_82599_SFP_SF_QP:
242 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
243 return true;
244 default:
245 return false;
246 }
247 }
248
249 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
250 int expected_gts)
251 {
252 struct ixgbe_hw *hw = &adapter->hw;
253 struct pci_dev *pdev;
254
255
256
257
258
259 if (hw->bus.type == ixgbe_bus_type_internal)
260 return;
261
262
263 if (ixgbe_pcie_from_parent(&adapter->hw))
264 pdev = adapter->pdev->bus->parent->self;
265 else
266 pdev = adapter->pdev;
267
268 pcie_print_link_status(pdev);
269 }
270
271 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
272 {
273 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
274 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
275 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
276 queue_work(ixgbe_wq, &adapter->service_task);
277 }
278
279 static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
280 {
281 struct ixgbe_adapter *adapter = hw->back;
282
283 if (!hw->hw_addr)
284 return;
285 hw->hw_addr = NULL;
286 e_dev_err("Adapter removed\n");
287 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
288 ixgbe_service_event_schedule(adapter);
289 }
290
291 static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
292 {
293 u8 __iomem *reg_addr;
294 u32 value;
295 int i;
296
297 reg_addr = READ_ONCE(hw->hw_addr);
298 if (ixgbe_removed(reg_addr))
299 return IXGBE_FAILED_READ_REG;
300
301
302
303
304
305 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
306 value = readl(reg_addr + IXGBE_STATUS);
307 if (value != IXGBE_FAILED_READ_REG)
308 break;
309 mdelay(3);
310 }
311
312 if (value == IXGBE_FAILED_READ_REG)
313 ixgbe_remove_adapter(hw);
314 else
315 value = readl(reg_addr + reg);
316 return value;
317 }
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
333 {
334 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
335 u32 value;
336
337 if (ixgbe_removed(reg_addr))
338 return IXGBE_FAILED_READ_REG;
339 if (unlikely(hw->phy.nw_mng_if_sel &
340 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
341 struct ixgbe_adapter *adapter;
342 int i;
343
344 for (i = 0; i < 200; ++i) {
345 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
346 if (likely(!value))
347 goto writes_completed;
348 if (value == IXGBE_FAILED_READ_REG) {
349 ixgbe_remove_adapter(hw);
350 return IXGBE_FAILED_READ_REG;
351 }
352 udelay(5);
353 }
354
355 adapter = hw->back;
356 e_warn(hw, "register writes incomplete %08x\n", value);
357 }
358
359 writes_completed:
360 value = readl(reg_addr + reg);
361 if (unlikely(value == IXGBE_FAILED_READ_REG))
362 value = ixgbe_check_remove(hw, reg);
363 return value;
364 }
365
366 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
367 {
368 u16 value;
369
370 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
371 if (value == IXGBE_FAILED_READ_CFG_WORD) {
372 ixgbe_remove_adapter(hw);
373 return true;
374 }
375 return false;
376 }
377
378 u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
379 {
380 struct ixgbe_adapter *adapter = hw->back;
381 u16 value;
382
383 if (ixgbe_removed(hw->hw_addr))
384 return IXGBE_FAILED_READ_CFG_WORD;
385 pci_read_config_word(adapter->pdev, reg, &value);
386 if (value == IXGBE_FAILED_READ_CFG_WORD &&
387 ixgbe_check_cfg_remove(hw, adapter->pdev))
388 return IXGBE_FAILED_READ_CFG_WORD;
389 return value;
390 }
391
392 #ifdef CONFIG_PCI_IOV
393 static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
394 {
395 struct ixgbe_adapter *adapter = hw->back;
396 u32 value;
397
398 if (ixgbe_removed(hw->hw_addr))
399 return IXGBE_FAILED_READ_CFG_DWORD;
400 pci_read_config_dword(adapter->pdev, reg, &value);
401 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
402 ixgbe_check_cfg_remove(hw, adapter->pdev))
403 return IXGBE_FAILED_READ_CFG_DWORD;
404 return value;
405 }
406 #endif
407
408 void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
409 {
410 struct ixgbe_adapter *adapter = hw->back;
411
412 if (ixgbe_removed(hw->hw_addr))
413 return;
414 pci_write_config_word(adapter->pdev, reg, value);
415 }
416
417 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
418 {
419 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
420
421
422 smp_mb__before_atomic();
423 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
424 }
425
426 struct ixgbe_reg_info {
427 u32 ofs;
428 char *name;
429 };
430
431 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
432
433
434 {IXGBE_CTRL, "CTRL"},
435 {IXGBE_STATUS, "STATUS"},
436 {IXGBE_CTRL_EXT, "CTRL_EXT"},
437
438
439 {IXGBE_EICR, "EICR"},
440
441
442 {IXGBE_SRRCTL(0), "SRRCTL"},
443 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
444 {IXGBE_RDLEN(0), "RDLEN"},
445 {IXGBE_RDH(0), "RDH"},
446 {IXGBE_RDT(0), "RDT"},
447 {IXGBE_RXDCTL(0), "RXDCTL"},
448 {IXGBE_RDBAL(0), "RDBAL"},
449 {IXGBE_RDBAH(0), "RDBAH"},
450
451
452 {IXGBE_TDBAL(0), "TDBAL"},
453 {IXGBE_TDBAH(0), "TDBAH"},
454 {IXGBE_TDLEN(0), "TDLEN"},
455 {IXGBE_TDH(0), "TDH"},
456 {IXGBE_TDT(0), "TDT"},
457 {IXGBE_TXDCTL(0), "TXDCTL"},
458
459
460 { .name = NULL }
461 };
462
463
464
465
466
467 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
468 {
469 int i;
470 char rname[16];
471 u32 regs[64];
472
473 switch (reginfo->ofs) {
474 case IXGBE_SRRCTL(0):
475 for (i = 0; i < 64; i++)
476 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
477 break;
478 case IXGBE_DCA_RXCTRL(0):
479 for (i = 0; i < 64; i++)
480 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 break;
482 case IXGBE_RDLEN(0):
483 for (i = 0; i < 64; i++)
484 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
485 break;
486 case IXGBE_RDH(0):
487 for (i = 0; i < 64; i++)
488 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
489 break;
490 case IXGBE_RDT(0):
491 for (i = 0; i < 64; i++)
492 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
493 break;
494 case IXGBE_RXDCTL(0):
495 for (i = 0; i < 64; i++)
496 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
497 break;
498 case IXGBE_RDBAL(0):
499 for (i = 0; i < 64; i++)
500 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
501 break;
502 case IXGBE_RDBAH(0):
503 for (i = 0; i < 64; i++)
504 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
505 break;
506 case IXGBE_TDBAL(0):
507 for (i = 0; i < 64; i++)
508 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
509 break;
510 case IXGBE_TDBAH(0):
511 for (i = 0; i < 64; i++)
512 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
513 break;
514 case IXGBE_TDLEN(0):
515 for (i = 0; i < 64; i++)
516 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
517 break;
518 case IXGBE_TDH(0):
519 for (i = 0; i < 64; i++)
520 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
521 break;
522 case IXGBE_TDT(0):
523 for (i = 0; i < 64; i++)
524 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
525 break;
526 case IXGBE_TXDCTL(0):
527 for (i = 0; i < 64; i++)
528 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
529 break;
530 default:
531 pr_info("%-15s %08x\n",
532 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
533 return;
534 }
535
536 i = 0;
537 while (i < 64) {
538 int j;
539 char buf[9 * 8 + 1];
540 char *p = buf;
541
542 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
543 for (j = 0; j < 8; j++)
544 p += sprintf(p, " %08x", regs[i++]);
545 pr_err("%-15s%s\n", rname, buf);
546 }
547
548 }
549
550 static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
551 {
552 struct ixgbe_tx_buffer *tx_buffer;
553
554 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
555 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
556 n, ring->next_to_use, ring->next_to_clean,
557 (u64)dma_unmap_addr(tx_buffer, dma),
558 dma_unmap_len(tx_buffer, len),
559 tx_buffer->next_to_watch,
560 (u64)tx_buffer->time_stamp);
561 }
562
563
564
565
566 static void ixgbe_dump(struct ixgbe_adapter *adapter)
567 {
568 struct net_device *netdev = adapter->netdev;
569 struct ixgbe_hw *hw = &adapter->hw;
570 struct ixgbe_reg_info *reginfo;
571 int n = 0;
572 struct ixgbe_ring *ring;
573 struct ixgbe_tx_buffer *tx_buffer;
574 union ixgbe_adv_tx_desc *tx_desc;
575 struct my_u0 { u64 a; u64 b; } *u0;
576 struct ixgbe_ring *rx_ring;
577 union ixgbe_adv_rx_desc *rx_desc;
578 struct ixgbe_rx_buffer *rx_buffer_info;
579 int i = 0;
580
581 if (!netif_msg_hw(adapter))
582 return;
583
584
585 if (netdev) {
586 dev_info(&adapter->pdev->dev, "Net device Info\n");
587 pr_info("Device Name state "
588 "trans_start\n");
589 pr_info("%-15s %016lX %016lX\n",
590 netdev->name,
591 netdev->state,
592 dev_trans_start(netdev));
593 }
594
595
596 dev_info(&adapter->pdev->dev, "Register Dump\n");
597 pr_info(" Register Name Value\n");
598 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
599 reginfo->name; reginfo++) {
600 ixgbe_regdump(hw, reginfo);
601 }
602
603
604 if (!netdev || !netif_running(netdev))
605 return;
606
607 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
608 pr_info(" %s %s %s %s\n",
609 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
610 "leng", "ntw", "timestamp");
611 for (n = 0; n < adapter->num_tx_queues; n++) {
612 ring = adapter->tx_ring[n];
613 ixgbe_print_buffer(ring, n);
614 }
615
616 for (n = 0; n < adapter->num_xdp_queues; n++) {
617 ring = adapter->xdp_ring[n];
618 ixgbe_print_buffer(ring, n);
619 }
620
621
622 if (!netif_msg_tx_done(adapter))
623 goto rx_ring_summary;
624
625 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 for (n = 0; n < adapter->num_tx_queues; n++) {
663 ring = adapter->tx_ring[n];
664 pr_info("------------------------------------\n");
665 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
666 pr_info("------------------------------------\n");
667 pr_info("%s%s %s %s %s %s\n",
668 "T [desc] [address 63:0 ] ",
669 "[PlPOIdStDDt Ln] [bi->dma ] ",
670 "leng", "ntw", "timestamp", "bi->skb");
671
672 for (i = 0; ring->desc && (i < ring->count); i++) {
673 tx_desc = IXGBE_TX_DESC(ring, i);
674 tx_buffer = &ring->tx_buffer_info[i];
675 u0 = (struct my_u0 *)tx_desc;
676 if (dma_unmap_len(tx_buffer, len) > 0) {
677 const char *ring_desc;
678
679 if (i == ring->next_to_use &&
680 i == ring->next_to_clean)
681 ring_desc = " NTC/U";
682 else if (i == ring->next_to_use)
683 ring_desc = " NTU";
684 else if (i == ring->next_to_clean)
685 ring_desc = " NTC";
686 else
687 ring_desc = "";
688 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
689 i,
690 le64_to_cpu((__force __le64)u0->a),
691 le64_to_cpu((__force __le64)u0->b),
692 (u64)dma_unmap_addr(tx_buffer, dma),
693 dma_unmap_len(tx_buffer, len),
694 tx_buffer->next_to_watch,
695 (u64)tx_buffer->time_stamp,
696 tx_buffer->skb,
697 ring_desc);
698
699 if (netif_msg_pktdata(adapter) &&
700 tx_buffer->skb)
701 print_hex_dump(KERN_INFO, "",
702 DUMP_PREFIX_ADDRESS, 16, 1,
703 tx_buffer->skb->data,
704 dma_unmap_len(tx_buffer, len),
705 true);
706 }
707 }
708 }
709
710
711 rx_ring_summary:
712 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
713 pr_info("Queue [NTU] [NTC]\n");
714 for (n = 0; n < adapter->num_rx_queues; n++) {
715 rx_ring = adapter->rx_ring[n];
716 pr_info("%5d %5X %5X\n",
717 n, rx_ring->next_to_use, rx_ring->next_to_clean);
718 }
719
720
721 if (!netif_msg_rx_status(adapter))
722 return;
723
724 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 for (n = 0; n < adapter->num_rx_queues; n++) {
772 rx_ring = adapter->rx_ring[n];
773 pr_info("------------------------------------\n");
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
775 pr_info("------------------------------------\n");
776 pr_info("%s%s%s\n",
777 "R [desc] [ PktBuf A0] ",
778 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
779 "<-- Adv Rx Read format");
780 pr_info("%s%s%s\n",
781 "RWB[desc] [PcsmIpSHl PtRs] ",
782 "[vl er S cks ln] ---------------- [bi->skb ] ",
783 "<-- Adv Rx Write-Back format");
784
785 for (i = 0; i < rx_ring->count; i++) {
786 const char *ring_desc;
787
788 if (i == rx_ring->next_to_use)
789 ring_desc = " NTU";
790 else if (i == rx_ring->next_to_clean)
791 ring_desc = " NTC";
792 else
793 ring_desc = "";
794
795 rx_buffer_info = &rx_ring->rx_buffer_info[i];
796 rx_desc = IXGBE_RX_DESC(rx_ring, i);
797 u0 = (struct my_u0 *)rx_desc;
798 if (rx_desc->wb.upper.length) {
799
800 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
801 i,
802 le64_to_cpu((__force __le64)u0->a),
803 le64_to_cpu((__force __le64)u0->b),
804 rx_buffer_info->skb,
805 ring_desc);
806 } else {
807 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
808 i,
809 le64_to_cpu((__force __le64)u0->a),
810 le64_to_cpu((__force __le64)u0->b),
811 (u64)rx_buffer_info->dma,
812 rx_buffer_info->skb,
813 ring_desc);
814
815 if (netif_msg_pktdata(adapter) &&
816 rx_buffer_info->dma) {
817 print_hex_dump(KERN_INFO, "",
818 DUMP_PREFIX_ADDRESS, 16, 1,
819 page_address(rx_buffer_info->page) +
820 rx_buffer_info->page_offset,
821 ixgbe_rx_bufsz(rx_ring), true);
822 }
823 }
824 }
825 }
826 }
827
828 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
829 {
830 u32 ctrl_ext;
831
832
833 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
835 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
836 }
837
838 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
839 {
840 u32 ctrl_ext;
841
842
843 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
845 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
846 }
847
848
849
850
851
852
853
854
855
856 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
857 u8 queue, u8 msix_vector)
858 {
859 u32 ivar, index;
860 struct ixgbe_hw *hw = &adapter->hw;
861 switch (hw->mac.type) {
862 case ixgbe_mac_82598EB:
863 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
864 if (direction == -1)
865 direction = 0;
866 index = (((direction * 64) + queue) >> 2) & 0x1F;
867 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
868 ivar &= ~(0xFF << (8 * (queue & 0x3)));
869 ivar |= (msix_vector << (8 * (queue & 0x3)));
870 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
871 break;
872 case ixgbe_mac_82599EB:
873 case ixgbe_mac_X540:
874 case ixgbe_mac_X550:
875 case ixgbe_mac_X550EM_x:
876 case ixgbe_mac_x550em_a:
877 if (direction == -1) {
878
879 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
880 index = ((queue & 1) * 8);
881 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
882 ivar &= ~(0xFF << index);
883 ivar |= (msix_vector << index);
884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
885 break;
886 } else {
887
888 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
889 index = ((16 * (queue & 1)) + (8 * direction));
890 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
891 ivar &= ~(0xFF << index);
892 ivar |= (msix_vector << index);
893 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
894 break;
895 }
896 default:
897 break;
898 }
899 }
900
901 void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
902 u64 qmask)
903 {
904 u32 mask;
905
906 switch (adapter->hw.mac.type) {
907 case ixgbe_mac_82598EB:
908 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
910 break;
911 case ixgbe_mac_82599EB:
912 case ixgbe_mac_X540:
913 case ixgbe_mac_X550:
914 case ixgbe_mac_X550EM_x:
915 case ixgbe_mac_x550em_a:
916 mask = (qmask & 0xFFFFFFFF);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
918 mask = (qmask >> 32);
919 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
920 break;
921 default:
922 break;
923 }
924 }
925
926 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
927 {
928 struct ixgbe_hw *hw = &adapter->hw;
929 struct ixgbe_hw_stats *hwstats = &adapter->stats;
930 int i;
931 u32 data;
932
933 if ((hw->fc.current_mode != ixgbe_fc_full) &&
934 (hw->fc.current_mode != ixgbe_fc_rx_pause))
935 return;
936
937 switch (hw->mac.type) {
938 case ixgbe_mac_82598EB:
939 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
940 break;
941 default:
942 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
943 }
944 hwstats->lxoffrxc += data;
945
946
947 if (!data)
948 return;
949
950 for (i = 0; i < adapter->num_tx_queues; i++)
951 clear_bit(__IXGBE_HANG_CHECK_ARMED,
952 &adapter->tx_ring[i]->state);
953
954 for (i = 0; i < adapter->num_xdp_queues; i++)
955 clear_bit(__IXGBE_HANG_CHECK_ARMED,
956 &adapter->xdp_ring[i]->state);
957 }
958
959 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
960 {
961 struct ixgbe_hw *hw = &adapter->hw;
962 struct ixgbe_hw_stats *hwstats = &adapter->stats;
963 u32 xoff[8] = {0};
964 u8 tc;
965 int i;
966 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
967
968 if (adapter->ixgbe_ieee_pfc)
969 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
970
971 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
972 ixgbe_update_xoff_rx_lfc(adapter);
973 return;
974 }
975
976
977 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
978 u32 pxoffrxc;
979
980 switch (hw->mac.type) {
981 case ixgbe_mac_82598EB:
982 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
983 break;
984 default:
985 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
986 }
987 hwstats->pxoffrxc[i] += pxoffrxc;
988
989 tc = netdev_get_prio_tc_map(adapter->netdev, i);
990 xoff[tc] += pxoffrxc;
991 }
992
993
994 for (i = 0; i < adapter->num_tx_queues; i++) {
995 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
996
997 tc = tx_ring->dcb_tc;
998 if (xoff[tc])
999 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1000 }
1001
1002 for (i = 0; i < adapter->num_xdp_queues; i++) {
1003 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1004
1005 tc = xdp_ring->dcb_tc;
1006 if (xoff[tc])
1007 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1008 }
1009 }
1010
1011 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1012 {
1013 return ring->stats.packets;
1014 }
1015
1016 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1017 {
1018 unsigned int head, tail;
1019
1020 head = ring->next_to_clean;
1021 tail = ring->next_to_use;
1022
1023 return ((head <= tail) ? tail : tail + ring->count) - head;
1024 }
1025
1026 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027 {
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1031
1032 clear_check_for_tx_hang(tx_ring);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (tx_done_old == tx_done && tx_pending)
1047
1048 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1049 &tx_ring->state);
1050
1051 tx_ring->tx_stats.tx_done_old = tx_done;
1052
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1054
1055 return false;
1056 }
1057
1058
1059
1060
1061
1062 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1063 {
1064
1065
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1067 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1068 e_warn(drv, "initiating reset due to tx timeout\n");
1069 ixgbe_service_event_schedule(adapter);
1070 }
1071 }
1072
1073
1074
1075
1076
1077
1078
1079 static int ixgbe_tx_maxrate(struct net_device *netdev,
1080 int queue_index, u32 maxrate)
1081 {
1082 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1083 struct ixgbe_hw *hw = &adapter->hw;
1084 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1085
1086 if (!maxrate)
1087 return 0;
1088
1089
1090 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1091 bcnrc_val /= maxrate;
1092
1093
1094 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1095 IXGBE_RTTBCNRC_RF_DEC_MASK;
1096
1097
1098 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1099
1100 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1101 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1102
1103 return 0;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1113 struct ixgbe_ring *tx_ring, int napi_budget)
1114 {
1115 struct ixgbe_adapter *adapter = q_vector->adapter;
1116 struct ixgbe_tx_buffer *tx_buffer;
1117 union ixgbe_adv_tx_desc *tx_desc;
1118 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1119 unsigned int budget = q_vector->tx.work_limit;
1120 unsigned int i = tx_ring->next_to_clean;
1121
1122 if (test_bit(__IXGBE_DOWN, &adapter->state))
1123 return true;
1124
1125 tx_buffer = &tx_ring->tx_buffer_info[i];
1126 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1127 i -= tx_ring->count;
1128
1129 do {
1130 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1131
1132
1133 if (!eop_desc)
1134 break;
1135
1136
1137 smp_rmb();
1138
1139
1140 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1141 break;
1142
1143
1144 tx_buffer->next_to_watch = NULL;
1145
1146
1147 total_bytes += tx_buffer->bytecount;
1148 total_packets += tx_buffer->gso_segs;
1149 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1150 total_ipsec++;
1151
1152
1153 if (ring_is_xdp(tx_ring))
1154 xdp_return_frame(tx_buffer->xdpf);
1155 else
1156 napi_consume_skb(tx_buffer->skb, napi_budget);
1157
1158
1159 dma_unmap_single(tx_ring->dev,
1160 dma_unmap_addr(tx_buffer, dma),
1161 dma_unmap_len(tx_buffer, len),
1162 DMA_TO_DEVICE);
1163
1164
1165 dma_unmap_len_set(tx_buffer, len, 0);
1166
1167
1168 while (tx_desc != eop_desc) {
1169 tx_buffer++;
1170 tx_desc++;
1171 i++;
1172 if (unlikely(!i)) {
1173 i -= tx_ring->count;
1174 tx_buffer = tx_ring->tx_buffer_info;
1175 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1176 }
1177
1178
1179 if (dma_unmap_len(tx_buffer, len)) {
1180 dma_unmap_page(tx_ring->dev,
1181 dma_unmap_addr(tx_buffer, dma),
1182 dma_unmap_len(tx_buffer, len),
1183 DMA_TO_DEVICE);
1184 dma_unmap_len_set(tx_buffer, len, 0);
1185 }
1186 }
1187
1188
1189 tx_buffer++;
1190 tx_desc++;
1191 i++;
1192 if (unlikely(!i)) {
1193 i -= tx_ring->count;
1194 tx_buffer = tx_ring->tx_buffer_info;
1195 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1196 }
1197
1198
1199 prefetch(tx_desc);
1200
1201
1202 budget--;
1203 } while (likely(budget));
1204
1205 i += tx_ring->count;
1206 tx_ring->next_to_clean = i;
1207 u64_stats_update_begin(&tx_ring->syncp);
1208 tx_ring->stats.bytes += total_bytes;
1209 tx_ring->stats.packets += total_packets;
1210 u64_stats_update_end(&tx_ring->syncp);
1211 q_vector->tx.total_bytes += total_bytes;
1212 q_vector->tx.total_packets += total_packets;
1213 adapter->tx_ipsec += total_ipsec;
1214
1215 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1216
1217 struct ixgbe_hw *hw = &adapter->hw;
1218 e_err(drv, "Detected Tx Unit Hang %s\n"
1219 " Tx Queue <%d>\n"
1220 " TDH, TDT <%x>, <%x>\n"
1221 " next_to_use <%x>\n"
1222 " next_to_clean <%x>\n"
1223 "tx_buffer_info[next_to_clean]\n"
1224 " time_stamp <%lx>\n"
1225 " jiffies <%lx>\n",
1226 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1227 tx_ring->queue_index,
1228 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1229 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1230 tx_ring->next_to_use, i,
1231 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1232
1233 if (!ring_is_xdp(tx_ring))
1234 netif_stop_subqueue(tx_ring->netdev,
1235 tx_ring->queue_index);
1236
1237 e_info(probe,
1238 "tx hang %d detected on queue %d, resetting adapter\n",
1239 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1240
1241
1242 ixgbe_tx_timeout_reset(adapter);
1243
1244
1245 return true;
1246 }
1247
1248 if (ring_is_xdp(tx_ring))
1249 return !!budget;
1250
1251 netdev_tx_completed_queue(txring_txq(tx_ring),
1252 total_packets, total_bytes);
1253
1254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1255 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1256 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1257
1258
1259
1260 smp_mb();
1261 if (__netif_subqueue_stopped(tx_ring->netdev,
1262 tx_ring->queue_index)
1263 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1264 netif_wake_subqueue(tx_ring->netdev,
1265 tx_ring->queue_index);
1266 ++tx_ring->tx_stats.restart_queue;
1267 }
1268 }
1269
1270 return !!budget;
1271 }
1272
1273 #ifdef CONFIG_IXGBE_DCA
1274 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1275 struct ixgbe_ring *tx_ring,
1276 int cpu)
1277 {
1278 struct ixgbe_hw *hw = &adapter->hw;
1279 u32 txctrl = 0;
1280 u16 reg_offset;
1281
1282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1283 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1284
1285 switch (hw->mac.type) {
1286 case ixgbe_mac_82598EB:
1287 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1288 break;
1289 case ixgbe_mac_82599EB:
1290 case ixgbe_mac_X540:
1291 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1292 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1293 break;
1294 default:
1295
1296 return;
1297 }
1298
1299
1300
1301
1302
1303
1304 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1305 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1306 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1307
1308 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1309 }
1310
1311 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1312 struct ixgbe_ring *rx_ring,
1313 int cpu)
1314 {
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 u32 rxctrl = 0;
1317 u8 reg_idx = rx_ring->reg_idx;
1318
1319 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1320 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1321
1322 switch (hw->mac.type) {
1323 case ixgbe_mac_82599EB:
1324 case ixgbe_mac_X540:
1325 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1326 break;
1327 default:
1328 break;
1329 }
1330
1331
1332
1333
1334
1335
1336 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1337 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1338 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1339
1340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1341 }
1342
1343 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1344 {
1345 struct ixgbe_adapter *adapter = q_vector->adapter;
1346 struct ixgbe_ring *ring;
1347 int cpu = get_cpu();
1348
1349 if (q_vector->cpu == cpu)
1350 goto out_no_update;
1351
1352 ixgbe_for_each_ring(ring, q_vector->tx)
1353 ixgbe_update_tx_dca(adapter, ring, cpu);
1354
1355 ixgbe_for_each_ring(ring, q_vector->rx)
1356 ixgbe_update_rx_dca(adapter, ring, cpu);
1357
1358 q_vector->cpu = cpu;
1359 out_no_update:
1360 put_cpu();
1361 }
1362
1363 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1364 {
1365 int i;
1366
1367
1368 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1370 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1371 else
1372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1373 IXGBE_DCA_CTRL_DCA_DISABLE);
1374
1375 for (i = 0; i < adapter->num_q_vectors; i++) {
1376 adapter->q_vector[i]->cpu = -1;
1377 ixgbe_update_dca(adapter->q_vector[i]);
1378 }
1379 }
1380
1381 static int __ixgbe_notify_dca(struct device *dev, void *data)
1382 {
1383 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1384 unsigned long event = *(unsigned long *)data;
1385
1386 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1387 return 0;
1388
1389 switch (event) {
1390 case DCA_PROVIDER_ADD:
1391
1392 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1393 break;
1394 if (dca_add_requester(dev) == 0) {
1395 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1396 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1397 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1398 break;
1399 }
1400
1401 case DCA_PROVIDER_REMOVE:
1402 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1403 dca_remove_requester(dev);
1404 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1405 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1406 IXGBE_DCA_CTRL_DCA_DISABLE);
1407 }
1408 break;
1409 }
1410
1411 return 0;
1412 }
1413
1414 #endif
1415
1416 #define IXGBE_RSS_L4_TYPES_MASK \
1417 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1421
1422 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1423 union ixgbe_adv_rx_desc *rx_desc,
1424 struct sk_buff *skb)
1425 {
1426 u16 rss_type;
1427
1428 if (!(ring->netdev->features & NETIF_F_RXHASH))
1429 return;
1430
1431 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1432 IXGBE_RXDADV_RSSTYPE_MASK;
1433
1434 if (!rss_type)
1435 return;
1436
1437 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1438 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1439 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1440 }
1441
1442 #ifdef IXGBE_FCOE
1443
1444
1445
1446
1447
1448
1449
1450 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1451 union ixgbe_adv_rx_desc *rx_desc)
1452 {
1453 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1454
1455 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1456 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1457 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1458 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1459 }
1460
1461 #endif
1462
1463
1464
1465
1466
1467
1468 static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1469 union ixgbe_adv_rx_desc *rx_desc,
1470 struct sk_buff *skb)
1471 {
1472 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1473 bool encap_pkt = false;
1474
1475 skb_checksum_none_assert(skb);
1476
1477
1478 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1479 return;
1480
1481
1482 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1483 encap_pkt = true;
1484 skb->encapsulation = 1;
1485 }
1486
1487
1488 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1489 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1490 ring->rx_stats.csum_err++;
1491 return;
1492 }
1493
1494 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1495 return;
1496
1497 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1498
1499
1500
1501
1502 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1503 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1504 return;
1505
1506 ring->rx_stats.csum_err++;
1507 return;
1508 }
1509
1510
1511 skb->ip_summed = CHECKSUM_UNNECESSARY;
1512 if (encap_pkt) {
1513 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1514 return;
1515
1516 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1517 skb->ip_summed = CHECKSUM_NONE;
1518 return;
1519 }
1520
1521 skb->csum_level = 1;
1522 }
1523 }
1524
1525 static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1526 {
1527 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1528 }
1529
1530 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1531 struct ixgbe_rx_buffer *bi)
1532 {
1533 struct page *page = bi->page;
1534 dma_addr_t dma;
1535
1536
1537 if (likely(page))
1538 return true;
1539
1540
1541 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1542 if (unlikely(!page)) {
1543 rx_ring->rx_stats.alloc_rx_page_failed++;
1544 return false;
1545 }
1546
1547
1548 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1549 ixgbe_rx_pg_size(rx_ring),
1550 DMA_FROM_DEVICE,
1551 IXGBE_RX_DMA_ATTR);
1552
1553
1554
1555
1556
1557 if (dma_mapping_error(rx_ring->dev, dma)) {
1558 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1559
1560 rx_ring->rx_stats.alloc_rx_page_failed++;
1561 return false;
1562 }
1563
1564 bi->dma = dma;
1565 bi->page = page;
1566 bi->page_offset = ixgbe_rx_offset(rx_ring);
1567 page_ref_add(page, USHRT_MAX - 1);
1568 bi->pagecnt_bias = USHRT_MAX;
1569 rx_ring->rx_stats.alloc_rx_page++;
1570
1571 return true;
1572 }
1573
1574
1575
1576
1577
1578
1579 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1580 {
1581 union ixgbe_adv_rx_desc *rx_desc;
1582 struct ixgbe_rx_buffer *bi;
1583 u16 i = rx_ring->next_to_use;
1584 u16 bufsz;
1585
1586
1587 if (!cleaned_count)
1588 return;
1589
1590 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1591 bi = &rx_ring->rx_buffer_info[i];
1592 i -= rx_ring->count;
1593
1594 bufsz = ixgbe_rx_bufsz(rx_ring);
1595
1596 do {
1597 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1598 break;
1599
1600
1601 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1602 bi->page_offset, bufsz,
1603 DMA_FROM_DEVICE);
1604
1605
1606
1607
1608
1609 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1610
1611 rx_desc++;
1612 bi++;
1613 i++;
1614 if (unlikely(!i)) {
1615 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1616 bi = rx_ring->rx_buffer_info;
1617 i -= rx_ring->count;
1618 }
1619
1620
1621 rx_desc->wb.upper.length = 0;
1622
1623 cleaned_count--;
1624 } while (cleaned_count);
1625
1626 i += rx_ring->count;
1627
1628 if (rx_ring->next_to_use != i) {
1629 rx_ring->next_to_use = i;
1630
1631
1632 rx_ring->next_to_alloc = i;
1633
1634
1635
1636
1637
1638
1639 wmb();
1640 writel(i, rx_ring->tail);
1641 }
1642 }
1643
1644 static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1645 struct sk_buff *skb)
1646 {
1647 u16 hdr_len = skb_headlen(skb);
1648
1649
1650 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1651 IXGBE_CB(skb)->append_cnt);
1652 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1653 }
1654
1655 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1656 struct sk_buff *skb)
1657 {
1658
1659 if (!IXGBE_CB(skb)->append_cnt)
1660 return;
1661
1662 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1663 rx_ring->rx_stats.rsc_flush++;
1664
1665 ixgbe_set_rsc_gso_size(rx_ring, skb);
1666
1667
1668 IXGBE_CB(skb)->append_cnt = 0;
1669 }
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1682 union ixgbe_adv_rx_desc *rx_desc,
1683 struct sk_buff *skb)
1684 {
1685 struct net_device *dev = rx_ring->netdev;
1686 u32 flags = rx_ring->q_vector->adapter->flags;
1687
1688 ixgbe_update_rsc_stats(rx_ring, skb);
1689
1690 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1691
1692 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1693
1694 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1695 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1696
1697 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1698 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1699 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1700 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1701 }
1702
1703 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1704 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1705
1706
1707 if (netif_is_ixgbe(dev))
1708 skb_record_rx_queue(skb, rx_ring->queue_index);
1709 else
1710 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1711 false);
1712
1713 skb->protocol = eth_type_trans(skb, dev);
1714 }
1715
1716 void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1717 struct sk_buff *skb)
1718 {
1719 napi_gro_receive(&q_vector->napi, skb);
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1734 union ixgbe_adv_rx_desc *rx_desc,
1735 struct sk_buff *skb)
1736 {
1737 u32 ntc = rx_ring->next_to_clean + 1;
1738
1739
1740 ntc = (ntc < rx_ring->count) ? ntc : 0;
1741 rx_ring->next_to_clean = ntc;
1742
1743 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1744
1745
1746 if (ring_is_rsc_enabled(rx_ring)) {
1747 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1748 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1749
1750 if (unlikely(rsc_enabled)) {
1751 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1752
1753 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1754 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1755
1756
1757 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1758 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1759 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1760 }
1761 }
1762
1763
1764 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1765 return false;
1766
1767
1768 rx_ring->rx_buffer_info[ntc].skb = skb;
1769 rx_ring->rx_stats.non_eop_descs++;
1770
1771 return true;
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1787 struct sk_buff *skb)
1788 {
1789 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1790 unsigned char *va;
1791 unsigned int pull_len;
1792
1793
1794
1795
1796
1797
1798 va = skb_frag_address(frag);
1799
1800
1801
1802
1803
1804 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1805
1806
1807 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1808
1809
1810 skb_frag_size_sub(frag, pull_len);
1811 skb_frag_off_add(frag, pull_len);
1812 skb->data_len -= pull_len;
1813 skb->tail += pull_len;
1814 }
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1827 struct sk_buff *skb)
1828 {
1829 if (ring_uses_build_skb(rx_ring)) {
1830 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1831
1832 dma_sync_single_range_for_cpu(rx_ring->dev,
1833 IXGBE_CB(skb)->dma,
1834 offset,
1835 skb_headlen(skb),
1836 DMA_FROM_DEVICE);
1837 } else {
1838 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1839
1840 dma_sync_single_range_for_cpu(rx_ring->dev,
1841 IXGBE_CB(skb)->dma,
1842 skb_frag_off(frag),
1843 skb_frag_size(frag),
1844 DMA_FROM_DEVICE);
1845 }
1846
1847
1848 if (unlikely(IXGBE_CB(skb)->page_released)) {
1849 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1850 ixgbe_rx_pg_size(rx_ring),
1851 DMA_FROM_DEVICE,
1852 IXGBE_RX_DMA_ATTR);
1853 }
1854 }
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1879 union ixgbe_adv_rx_desc *rx_desc,
1880 struct sk_buff *skb)
1881 {
1882 struct net_device *netdev = rx_ring->netdev;
1883
1884
1885 if (IS_ERR(skb))
1886 return true;
1887
1888
1889
1890
1891 if (!netdev ||
1892 (unlikely(ixgbe_test_staterr(rx_desc,
1893 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1894 !(netdev->features & NETIF_F_RXALL)))) {
1895 dev_kfree_skb_any(skb);
1896 return true;
1897 }
1898
1899
1900 if (!skb_headlen(skb))
1901 ixgbe_pull_tail(rx_ring, skb);
1902
1903 #ifdef IXGBE_FCOE
1904
1905 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1906 return false;
1907
1908 #endif
1909
1910 if (eth_skb_pad(skb))
1911 return true;
1912
1913 return false;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923 static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1924 struct ixgbe_rx_buffer *old_buff)
1925 {
1926 struct ixgbe_rx_buffer *new_buff;
1927 u16 nta = rx_ring->next_to_alloc;
1928
1929 new_buff = &rx_ring->rx_buffer_info[nta];
1930
1931
1932 nta++;
1933 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1934
1935
1936
1937
1938
1939 new_buff->dma = old_buff->dma;
1940 new_buff->page = old_buff->page;
1941 new_buff->page_offset = old_buff->page_offset;
1942 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1943 }
1944
1945 static inline bool ixgbe_page_is_reserved(struct page *page)
1946 {
1947 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1948 }
1949
1950 static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1951 {
1952 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1953 struct page *page = rx_buffer->page;
1954
1955
1956 if (unlikely(ixgbe_page_is_reserved(page)))
1957 return false;
1958
1959 #if (PAGE_SIZE < 8192)
1960
1961 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1962 return false;
1963 #else
1964
1965
1966
1967
1968
1969 #define IXGBE_LAST_OFFSET \
1970 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1971 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1972 return false;
1973 #endif
1974
1975
1976
1977
1978
1979 if (unlikely(pagecnt_bias == 1)) {
1980 page_ref_add(page, USHRT_MAX - 1);
1981 rx_buffer->pagecnt_bias = USHRT_MAX;
1982 }
1983
1984 return true;
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002 static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2003 struct ixgbe_rx_buffer *rx_buffer,
2004 struct sk_buff *skb,
2005 unsigned int size)
2006 {
2007 #if (PAGE_SIZE < 8192)
2008 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2009 #else
2010 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2011 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2012 SKB_DATA_ALIGN(size);
2013 #endif
2014 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2015 rx_buffer->page_offset, size, truesize);
2016 #if (PAGE_SIZE < 8192)
2017 rx_buffer->page_offset ^= truesize;
2018 #else
2019 rx_buffer->page_offset += truesize;
2020 #endif
2021 }
2022
2023 static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2024 union ixgbe_adv_rx_desc *rx_desc,
2025 struct sk_buff **skb,
2026 const unsigned int size)
2027 {
2028 struct ixgbe_rx_buffer *rx_buffer;
2029
2030 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2031 prefetchw(rx_buffer->page);
2032 *skb = rx_buffer->skb;
2033
2034
2035
2036
2037
2038 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2039 if (!*skb)
2040 goto skip_sync;
2041 } else {
2042 if (*skb)
2043 ixgbe_dma_sync_frag(rx_ring, *skb);
2044 }
2045
2046
2047 dma_sync_single_range_for_cpu(rx_ring->dev,
2048 rx_buffer->dma,
2049 rx_buffer->page_offset,
2050 size,
2051 DMA_FROM_DEVICE);
2052 skip_sync:
2053 rx_buffer->pagecnt_bias--;
2054
2055 return rx_buffer;
2056 }
2057
2058 static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2059 struct ixgbe_rx_buffer *rx_buffer,
2060 struct sk_buff *skb)
2061 {
2062 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2063
2064 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2065 } else {
2066 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2067
2068 IXGBE_CB(skb)->page_released = true;
2069 } else {
2070
2071 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2072 ixgbe_rx_pg_size(rx_ring),
2073 DMA_FROM_DEVICE,
2074 IXGBE_RX_DMA_ATTR);
2075 }
2076 __page_frag_cache_drain(rx_buffer->page,
2077 rx_buffer->pagecnt_bias);
2078 }
2079
2080
2081 rx_buffer->page = NULL;
2082 rx_buffer->skb = NULL;
2083 }
2084
2085 static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2086 struct ixgbe_rx_buffer *rx_buffer,
2087 struct xdp_buff *xdp,
2088 union ixgbe_adv_rx_desc *rx_desc)
2089 {
2090 unsigned int size = xdp->data_end - xdp->data;
2091 #if (PAGE_SIZE < 8192)
2092 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2093 #else
2094 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2095 xdp->data_hard_start);
2096 #endif
2097 struct sk_buff *skb;
2098
2099
2100 prefetch(xdp->data);
2101 #if L1_CACHE_BYTES < 128
2102 prefetch(xdp->data + L1_CACHE_BYTES);
2103 #endif
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2122 if (unlikely(!skb))
2123 return NULL;
2124
2125 if (size > IXGBE_RX_HDR_SIZE) {
2126 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2127 IXGBE_CB(skb)->dma = rx_buffer->dma;
2128
2129 skb_add_rx_frag(skb, 0, rx_buffer->page,
2130 xdp->data - page_address(rx_buffer->page),
2131 size, truesize);
2132 #if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2134 #else
2135 rx_buffer->page_offset += truesize;
2136 #endif
2137 } else {
2138 memcpy(__skb_put(skb, size),
2139 xdp->data, ALIGN(size, sizeof(long)));
2140 rx_buffer->pagecnt_bias++;
2141 }
2142
2143 return skb;
2144 }
2145
2146 static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2147 struct ixgbe_rx_buffer *rx_buffer,
2148 struct xdp_buff *xdp,
2149 union ixgbe_adv_rx_desc *rx_desc)
2150 {
2151 unsigned int metasize = xdp->data - xdp->data_meta;
2152 #if (PAGE_SIZE < 8192)
2153 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2154 #else
2155 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2156 SKB_DATA_ALIGN(xdp->data_end -
2157 xdp->data_hard_start);
2158 #endif
2159 struct sk_buff *skb;
2160
2161
2162
2163
2164
2165
2166 prefetch(xdp->data_meta);
2167 #if L1_CACHE_BYTES < 128
2168 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2169 #endif
2170
2171
2172 skb = build_skb(xdp->data_hard_start, truesize);
2173 if (unlikely(!skb))
2174 return NULL;
2175
2176
2177 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2178 __skb_put(skb, xdp->data_end - xdp->data);
2179 if (metasize)
2180 skb_metadata_set(skb, metasize);
2181
2182
2183 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2184 IXGBE_CB(skb)->dma = rx_buffer->dma;
2185
2186
2187 #if (PAGE_SIZE < 8192)
2188 rx_buffer->page_offset ^= truesize;
2189 #else
2190 rx_buffer->page_offset += truesize;
2191 #endif
2192
2193 return skb;
2194 }
2195
2196 static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2197 struct ixgbe_ring *rx_ring,
2198 struct xdp_buff *xdp)
2199 {
2200 int err, result = IXGBE_XDP_PASS;
2201 struct bpf_prog *xdp_prog;
2202 struct xdp_frame *xdpf;
2203 u32 act;
2204
2205 rcu_read_lock();
2206 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2207
2208 if (!xdp_prog)
2209 goto xdp_out;
2210
2211 prefetchw(xdp->data_hard_start);
2212
2213 act = bpf_prog_run_xdp(xdp_prog, xdp);
2214 switch (act) {
2215 case XDP_PASS:
2216 break;
2217 case XDP_TX:
2218 xdpf = convert_to_xdp_frame(xdp);
2219 if (unlikely(!xdpf)) {
2220 result = IXGBE_XDP_CONSUMED;
2221 break;
2222 }
2223 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2224 break;
2225 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err)
2228 result = IXGBE_XDP_REDIR;
2229 else
2230 result = IXGBE_XDP_CONSUMED;
2231 break;
2232 default:
2233 bpf_warn_invalid_xdp_action(act);
2234
2235 case XDP_ABORTED:
2236 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2237
2238 case XDP_DROP:
2239 result = IXGBE_XDP_CONSUMED;
2240 break;
2241 }
2242 xdp_out:
2243 rcu_read_unlock();
2244 return ERR_PTR(-result);
2245 }
2246
2247 static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2248 struct ixgbe_rx_buffer *rx_buffer,
2249 unsigned int size)
2250 {
2251 #if (PAGE_SIZE < 8192)
2252 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2253
2254 rx_buffer->page_offset ^= truesize;
2255 #else
2256 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2257 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2258 SKB_DATA_ALIGN(size);
2259
2260 rx_buffer->page_offset += truesize;
2261 #endif
2262 }
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277 static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2278 struct ixgbe_ring *rx_ring,
2279 const int budget)
2280 {
2281 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2282 struct ixgbe_adapter *adapter = q_vector->adapter;
2283 #ifdef IXGBE_FCOE
2284 int ddp_bytes;
2285 unsigned int mss = 0;
2286 #endif
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp;
2290
2291 xdp.rxq = &rx_ring->xdp_rxq;
2292
2293 while (likely(total_rx_packets < budget)) {
2294 union ixgbe_adv_rx_desc *rx_desc;
2295 struct ixgbe_rx_buffer *rx_buffer;
2296 struct sk_buff *skb;
2297 unsigned int size;
2298
2299
2300 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2301 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2302 cleaned_count = 0;
2303 }
2304
2305 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2306 size = le16_to_cpu(rx_desc->wb.upper.length);
2307 if (!size)
2308 break;
2309
2310
2311
2312
2313
2314 dma_rmb();
2315
2316 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2317
2318
2319 if (!skb) {
2320 xdp.data = page_address(rx_buffer->page) +
2321 rx_buffer->page_offset;
2322 xdp.data_meta = xdp.data;
2323 xdp.data_hard_start = xdp.data -
2324 ixgbe_rx_offset(rx_ring);
2325 xdp.data_end = xdp.data + size;
2326
2327 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2328 }
2329
2330 if (IS_ERR(skb)) {
2331 unsigned int xdp_res = -PTR_ERR(skb);
2332
2333 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2334 xdp_xmit |= xdp_res;
2335 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2336 } else {
2337 rx_buffer->pagecnt_bias++;
2338 }
2339 total_rx_packets++;
2340 total_rx_bytes += size;
2341 } else if (skb) {
2342 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2343 } else if (ring_uses_build_skb(rx_ring)) {
2344 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2345 &xdp, rx_desc);
2346 } else {
2347 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2348 &xdp, rx_desc);
2349 }
2350
2351
2352 if (!skb) {
2353 rx_ring->rx_stats.alloc_rx_buff_failed++;
2354 rx_buffer->pagecnt_bias++;
2355 break;
2356 }
2357
2358 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2359 cleaned_count++;
2360
2361
2362 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2363 continue;
2364
2365
2366 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2367 continue;
2368
2369
2370 total_rx_bytes += skb->len;
2371
2372
2373 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2374
2375 #ifdef IXGBE_FCOE
2376
2377 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2378 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2379
2380 if (ddp_bytes > 0) {
2381 if (!mss) {
2382 mss = rx_ring->netdev->mtu -
2383 sizeof(struct fcoe_hdr) -
2384 sizeof(struct fc_frame_header) -
2385 sizeof(struct fcoe_crc_eof);
2386 if (mss > 512)
2387 mss &= ~511;
2388 }
2389 total_rx_bytes += ddp_bytes;
2390 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2391 mss);
2392 }
2393 if (!ddp_bytes) {
2394 dev_kfree_skb_any(skb);
2395 continue;
2396 }
2397 }
2398
2399 #endif
2400 ixgbe_rx_skb(q_vector, skb);
2401
2402
2403 total_rx_packets++;
2404 }
2405
2406 if (xdp_xmit & IXGBE_XDP_REDIR)
2407 xdp_do_flush_map();
2408
2409 if (xdp_xmit & IXGBE_XDP_TX) {
2410 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2411
2412
2413
2414
2415 wmb();
2416 writel(ring->next_to_use, ring->tail);
2417 }
2418
2419 u64_stats_update_begin(&rx_ring->syncp);
2420 rx_ring->stats.packets += total_rx_packets;
2421 rx_ring->stats.bytes += total_rx_bytes;
2422 u64_stats_update_end(&rx_ring->syncp);
2423 q_vector->rx.total_packets += total_rx_packets;
2424 q_vector->rx.total_bytes += total_rx_bytes;
2425
2426 return total_rx_packets;
2427 }
2428
2429
2430
2431
2432
2433
2434
2435
2436 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2437 {
2438 struct ixgbe_q_vector *q_vector;
2439 int v_idx;
2440 u32 mask;
2441
2442
2443 if (adapter->num_vfs > 32) {
2444 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2446 }
2447
2448
2449
2450
2451
2452 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2453 struct ixgbe_ring *ring;
2454 q_vector = adapter->q_vector[v_idx];
2455
2456 ixgbe_for_each_ring(ring, q_vector->rx)
2457 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2458
2459 ixgbe_for_each_ring(ring, q_vector->tx)
2460 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2461
2462 ixgbe_write_eitr(q_vector);
2463 }
2464
2465 switch (adapter->hw.mac.type) {
2466 case ixgbe_mac_82598EB:
2467 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2468 v_idx);
2469 break;
2470 case ixgbe_mac_82599EB:
2471 case ixgbe_mac_X540:
2472 case ixgbe_mac_X550:
2473 case ixgbe_mac_X550EM_x:
2474 case ixgbe_mac_x550em_a:
2475 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2476 break;
2477 default:
2478 break;
2479 }
2480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2481
2482
2483 mask = IXGBE_EIMS_ENABLE_MASK;
2484 mask &= ~(IXGBE_EIMS_OTHER |
2485 IXGBE_EIMS_MAILBOX |
2486 IXGBE_EIMS_LSC);
2487
2488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2489 }
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2505 struct ixgbe_ring_container *ring_container)
2506 {
2507 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2508 IXGBE_ITR_ADAPTIVE_LATENCY;
2509 unsigned int avg_wire_size, packets, bytes;
2510 unsigned long next_update = jiffies;
2511
2512
2513
2514
2515 if (!ring_container->ring)
2516 return;
2517
2518
2519
2520
2521
2522
2523 if (time_after(next_update, ring_container->next_update))
2524 goto clear_counts;
2525
2526 packets = ring_container->total_packets;
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 if (!packets) {
2537 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2538 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2539 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2540 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2541 goto clear_counts;
2542 }
2543
2544 bytes = ring_container->total_bytes;
2545
2546
2547
2548
2549
2550 if (packets < 4 && bytes < 9000) {
2551 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2552 goto adjust_by_size;
2553 }
2554
2555
2556
2557
2558
2559 if (packets < 48) {
2560 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2561 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2562 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2563 goto clear_counts;
2564 }
2565
2566
2567
2568
2569 if (packets < 96) {
2570 itr = q_vector->itr >> 2;
2571 goto clear_counts;
2572 }
2573
2574
2575
2576
2577
2578 if (packets < 256) {
2579 itr = q_vector->itr >> 3;
2580 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2581 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2582 goto clear_counts;
2583 }
2584
2585
2586
2587
2588
2589
2590
2591 itr = IXGBE_ITR_ADAPTIVE_BULK;
2592
2593 adjust_by_size:
2594
2595
2596
2597
2598
2599 avg_wire_size = bytes / packets;
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 if (avg_wire_size <= 60) {
2617
2618 avg_wire_size = 5120;
2619 } else if (avg_wire_size <= 316) {
2620
2621 avg_wire_size *= 40;
2622 avg_wire_size += 2720;
2623 } else if (avg_wire_size <= 1084) {
2624
2625 avg_wire_size *= 15;
2626 avg_wire_size += 11452;
2627 } else if (avg_wire_size < 1968) {
2628
2629 avg_wire_size *= 5;
2630 avg_wire_size += 22420;
2631 } else {
2632
2633 avg_wire_size = 32256;
2634 }
2635
2636
2637
2638
2639 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2640 avg_wire_size >>= 1;
2641
2642
2643
2644
2645
2646
2647
2648
2649 switch (q_vector->adapter->link_speed) {
2650 case IXGBE_LINK_SPEED_10GB_FULL:
2651 case IXGBE_LINK_SPEED_100_FULL:
2652 default:
2653 itr += DIV_ROUND_UP(avg_wire_size,
2654 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2655 IXGBE_ITR_ADAPTIVE_MIN_INC;
2656 break;
2657 case IXGBE_LINK_SPEED_2_5GB_FULL:
2658 case IXGBE_LINK_SPEED_1GB_FULL:
2659 case IXGBE_LINK_SPEED_10_FULL:
2660 if (avg_wire_size > 8064)
2661 avg_wire_size = 8064;
2662 itr += DIV_ROUND_UP(avg_wire_size,
2663 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2664 IXGBE_ITR_ADAPTIVE_MIN_INC;
2665 break;
2666 }
2667
2668 clear_counts:
2669
2670 ring_container->itr = itr;
2671
2672
2673 ring_container->next_update = next_update + 1;
2674
2675 ring_container->total_bytes = 0;
2676 ring_container->total_packets = 0;
2677 }
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2688 {
2689 struct ixgbe_adapter *adapter = q_vector->adapter;
2690 struct ixgbe_hw *hw = &adapter->hw;
2691 int v_idx = q_vector->v_idx;
2692 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2693
2694 switch (adapter->hw.mac.type) {
2695 case ixgbe_mac_82598EB:
2696
2697 itr_reg |= (itr_reg << 16);
2698 break;
2699 case ixgbe_mac_82599EB:
2700 case ixgbe_mac_X540:
2701 case ixgbe_mac_X550:
2702 case ixgbe_mac_X550EM_x:
2703 case ixgbe_mac_x550em_a:
2704
2705
2706
2707
2708 itr_reg |= IXGBE_EITR_CNT_WDIS;
2709 break;
2710 default:
2711 break;
2712 }
2713 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2714 }
2715
2716 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2717 {
2718 u32 new_itr;
2719
2720 ixgbe_update_itr(q_vector, &q_vector->tx);
2721 ixgbe_update_itr(q_vector, &q_vector->rx);
2722
2723
2724 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2725
2726
2727 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2728 new_itr <<= 2;
2729
2730 if (new_itr != q_vector->itr) {
2731
2732 q_vector->itr = new_itr;
2733
2734 ixgbe_write_eitr(q_vector);
2735 }
2736 }
2737
2738
2739
2740
2741
2742 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2743 {
2744 struct ixgbe_hw *hw = &adapter->hw;
2745 u32 eicr = adapter->interrupt_event;
2746 s32 rc;
2747
2748 if (test_bit(__IXGBE_DOWN, &adapter->state))
2749 return;
2750
2751 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2752 return;
2753
2754 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2755
2756 switch (hw->device_id) {
2757 case IXGBE_DEV_ID_82599_T3_LOM:
2758
2759
2760
2761
2762
2763
2764
2765 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2766 !(eicr & IXGBE_EICR_LSC))
2767 return;
2768
2769 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2770 u32 speed;
2771 bool link_up = false;
2772
2773 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2774
2775 if (link_up)
2776 return;
2777 }
2778
2779
2780 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2781 return;
2782
2783 break;
2784 case IXGBE_DEV_ID_X550EM_A_1G_T:
2785 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2786 rc = hw->phy.ops.check_overtemp(hw);
2787 if (rc != IXGBE_ERR_OVERTEMP)
2788 return;
2789 break;
2790 default:
2791 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2792 return;
2793 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2794 return;
2795 break;
2796 }
2797 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2798
2799 adapter->interrupt_event = 0;
2800 }
2801
2802 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2803 {
2804 struct ixgbe_hw *hw = &adapter->hw;
2805
2806 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2807 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2808 e_crit(probe, "Fan has stopped, replace the adapter\n");
2809
2810 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2811 }
2812 }
2813
2814 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2815 {
2816 struct ixgbe_hw *hw = &adapter->hw;
2817
2818 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2819 return;
2820
2821 switch (adapter->hw.mac.type) {
2822 case ixgbe_mac_82599EB:
2823
2824
2825
2826
2827 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2828 (eicr & IXGBE_EICR_LSC)) &&
2829 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2830 adapter->interrupt_event = eicr;
2831 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2832 ixgbe_service_event_schedule(adapter);
2833 return;
2834 }
2835 return;
2836 case ixgbe_mac_x550em_a:
2837 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2838 adapter->interrupt_event = eicr;
2839 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2840 ixgbe_service_event_schedule(adapter);
2841 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2842 IXGBE_EICR_GPI_SDP0_X550EM_a);
2843 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2844 IXGBE_EICR_GPI_SDP0_X550EM_a);
2845 }
2846 return;
2847 case ixgbe_mac_X550:
2848 case ixgbe_mac_X540:
2849 if (!(eicr & IXGBE_EICR_TS))
2850 return;
2851 break;
2852 default:
2853 return;
2854 }
2855
2856 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2857 }
2858
2859 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2860 {
2861 switch (hw->mac.type) {
2862 case ixgbe_mac_82598EB:
2863 if (hw->phy.type == ixgbe_phy_nl)
2864 return true;
2865 return false;
2866 case ixgbe_mac_82599EB:
2867 case ixgbe_mac_X550EM_x:
2868 case ixgbe_mac_x550em_a:
2869 switch (hw->mac.ops.get_media_type(hw)) {
2870 case ixgbe_media_type_fiber:
2871 case ixgbe_media_type_fiber_qsfp:
2872 return true;
2873 default:
2874 return false;
2875 }
2876 default:
2877 return false;
2878 }
2879 }
2880
2881 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2882 {
2883 struct ixgbe_hw *hw = &adapter->hw;
2884 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2885
2886 if (!ixgbe_is_sfp(hw))
2887 return;
2888
2889
2890 if (hw->mac.type >= ixgbe_mac_X540)
2891 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2892
2893 if (eicr & eicr_mask) {
2894
2895 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2896 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2897 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2898 adapter->sfp_poll_time = 0;
2899 ixgbe_service_event_schedule(adapter);
2900 }
2901 }
2902
2903 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2904 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2905
2906 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2907 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2908 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2909 ixgbe_service_event_schedule(adapter);
2910 }
2911 }
2912 }
2913
2914 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2915 {
2916 struct ixgbe_hw *hw = &adapter->hw;
2917
2918 adapter->lsc_int++;
2919 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2920 adapter->link_check_timeout = jiffies;
2921 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2922 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2923 IXGBE_WRITE_FLUSH(hw);
2924 ixgbe_service_event_schedule(adapter);
2925 }
2926 }
2927
2928 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2929 u64 qmask)
2930 {
2931 u32 mask;
2932 struct ixgbe_hw *hw = &adapter->hw;
2933
2934 switch (hw->mac.type) {
2935 case ixgbe_mac_82598EB:
2936 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2937 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2938 break;
2939 case ixgbe_mac_82599EB:
2940 case ixgbe_mac_X540:
2941 case ixgbe_mac_X550:
2942 case ixgbe_mac_X550EM_x:
2943 case ixgbe_mac_x550em_a:
2944 mask = (qmask & 0xFFFFFFFF);
2945 if (mask)
2946 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2947 mask = (qmask >> 32);
2948 if (mask)
2949 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2950 break;
2951 default:
2952 break;
2953 }
2954
2955 }
2956
2957 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2958 u64 qmask)
2959 {
2960 u32 mask;
2961 struct ixgbe_hw *hw = &adapter->hw;
2962
2963 switch (hw->mac.type) {
2964 case ixgbe_mac_82598EB:
2965 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2966 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2967 break;
2968 case ixgbe_mac_82599EB:
2969 case ixgbe_mac_X540:
2970 case ixgbe_mac_X550:
2971 case ixgbe_mac_X550EM_x:
2972 case ixgbe_mac_x550em_a:
2973 mask = (qmask & 0xFFFFFFFF);
2974 if (mask)
2975 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2976 mask = (qmask >> 32);
2977 if (mask)
2978 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2979 break;
2980 default:
2981 break;
2982 }
2983
2984 }
2985
2986
2987
2988
2989
2990
2991
2992 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2993 bool flush)
2994 {
2995 struct ixgbe_hw *hw = &adapter->hw;
2996 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2997
2998
2999 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
3000 mask &= ~IXGBE_EIMS_LSC;
3001
3002 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3003 switch (adapter->hw.mac.type) {
3004 case ixgbe_mac_82599EB:
3005 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3006 break;
3007 case ixgbe_mac_X540:
3008 case ixgbe_mac_X550:
3009 case ixgbe_mac_X550EM_x:
3010 case ixgbe_mac_x550em_a:
3011 mask |= IXGBE_EIMS_TS;
3012 break;
3013 default:
3014 break;
3015 }
3016 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3017 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3018 switch (adapter->hw.mac.type) {
3019 case ixgbe_mac_82599EB:
3020 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3021 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3022
3023 case ixgbe_mac_X540:
3024 case ixgbe_mac_X550:
3025 case ixgbe_mac_X550EM_x:
3026 case ixgbe_mac_x550em_a:
3027 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3028 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3029 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3030 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3031 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3032 mask |= IXGBE_EICR_GPI_SDP0_X540;
3033 mask |= IXGBE_EIMS_ECC;
3034 mask |= IXGBE_EIMS_MAILBOX;
3035 break;
3036 default:
3037 break;
3038 }
3039
3040 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3041 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3042 mask |= IXGBE_EIMS_FLOW_DIR;
3043
3044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3045 if (queues)
3046 ixgbe_irq_enable_queues(adapter, ~0);
3047 if (flush)
3048 IXGBE_WRITE_FLUSH(&adapter->hw);
3049 }
3050
3051 static irqreturn_t ixgbe_msix_other(int irq, void *data)
3052 {
3053 struct ixgbe_adapter *adapter = data;
3054 struct ixgbe_hw *hw = &adapter->hw;
3055 u32 eicr;
3056
3057
3058
3059
3060
3061
3062
3063 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3064
3065
3066
3067
3068
3069
3070
3071
3072 eicr &= 0xFFFF0000;
3073
3074 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3075
3076 if (eicr & IXGBE_EICR_LSC)
3077 ixgbe_check_lsc(adapter);
3078
3079 if (eicr & IXGBE_EICR_MAILBOX)
3080 ixgbe_msg_task(adapter);
3081
3082 switch (hw->mac.type) {
3083 case ixgbe_mac_82599EB:
3084 case ixgbe_mac_X540:
3085 case ixgbe_mac_X550:
3086 case ixgbe_mac_X550EM_x:
3087 case ixgbe_mac_x550em_a:
3088 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3089 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3090 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3091 ixgbe_service_event_schedule(adapter);
3092 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3093 IXGBE_EICR_GPI_SDP0_X540);
3094 }
3095 if (eicr & IXGBE_EICR_ECC) {
3096 e_info(link, "Received ECC Err, initiating reset\n");
3097 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3098 ixgbe_service_event_schedule(adapter);
3099 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3100 }
3101
3102 if (eicr & IXGBE_EICR_FLOW_DIR) {
3103 int reinit_count = 0;
3104 int i;
3105 for (i = 0; i < adapter->num_tx_queues; i++) {
3106 struct ixgbe_ring *ring = adapter->tx_ring[i];
3107 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3108 &ring->state))
3109 reinit_count++;
3110 }
3111 if (reinit_count) {
3112
3113 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3114 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3115 ixgbe_service_event_schedule(adapter);
3116 }
3117 }
3118 ixgbe_check_sfp_event(adapter, eicr);
3119 ixgbe_check_overtemp_event(adapter, eicr);
3120 break;
3121 default:
3122 break;
3123 }
3124
3125 ixgbe_check_fan_failure(adapter, eicr);
3126
3127 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3128 ixgbe_ptp_check_pps_event(adapter);
3129
3130
3131 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3132 ixgbe_irq_enable(adapter, false, false);
3133
3134 return IRQ_HANDLED;
3135 }
3136
3137 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3138 {
3139 struct ixgbe_q_vector *q_vector = data;
3140
3141
3142
3143 if (q_vector->rx.ring || q_vector->tx.ring)
3144 napi_schedule_irqoff(&q_vector->napi);
3145
3146 return IRQ_HANDLED;
3147 }
3148
3149
3150
3151
3152
3153
3154
3155
3156 int ixgbe_poll(struct napi_struct *napi, int budget)
3157 {
3158 struct ixgbe_q_vector *q_vector =
3159 container_of(napi, struct ixgbe_q_vector, napi);
3160 struct ixgbe_adapter *adapter = q_vector->adapter;
3161 struct ixgbe_ring *ring;
3162 int per_ring_budget, work_done = 0;
3163 bool clean_complete = true;
3164
3165 #ifdef CONFIG_IXGBE_DCA
3166 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3167 ixgbe_update_dca(q_vector);
3168 #endif
3169
3170 ixgbe_for_each_ring(ring, q_vector->tx) {
3171 bool wd = ring->xsk_umem ?
3172 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3173 ixgbe_clean_tx_irq(q_vector, ring, budget);
3174
3175 if (!wd)
3176 clean_complete = false;
3177 }
3178
3179
3180 if (budget <= 0)
3181 return budget;
3182
3183
3184
3185 if (q_vector->rx.count > 1)
3186 per_ring_budget = max(budget/q_vector->rx.count, 1);
3187 else
3188 per_ring_budget = budget;
3189
3190 ixgbe_for_each_ring(ring, q_vector->rx) {
3191 int cleaned = ring->xsk_umem ?
3192 ixgbe_clean_rx_irq_zc(q_vector, ring,
3193 per_ring_budget) :
3194 ixgbe_clean_rx_irq(q_vector, ring,
3195 per_ring_budget);
3196
3197 work_done += cleaned;
3198 if (cleaned >= per_ring_budget)
3199 clean_complete = false;
3200 }
3201
3202
3203 if (!clean_complete)
3204 return budget;
3205
3206
3207 if (likely(napi_complete_done(napi, work_done))) {
3208 if (adapter->rx_itr_setting & 1)
3209 ixgbe_set_itr(q_vector);
3210 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3211 ixgbe_irq_enable_queues(adapter,
3212 BIT_ULL(q_vector->v_idx));
3213 }
3214
3215 return min(work_done, budget - 1);
3216 }
3217
3218
3219
3220
3221
3222
3223
3224
3225 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3226 {
3227 struct net_device *netdev = adapter->netdev;
3228 unsigned int ri = 0, ti = 0;
3229 int vector, err;
3230
3231 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3232 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3233 struct msix_entry *entry = &adapter->msix_entries[vector];
3234
3235 if (q_vector->tx.ring && q_vector->rx.ring) {
3236 snprintf(q_vector->name, sizeof(q_vector->name),
3237 "%s-TxRx-%u", netdev->name, ri++);
3238 ti++;
3239 } else if (q_vector->rx.ring) {
3240 snprintf(q_vector->name, sizeof(q_vector->name),
3241 "%s-rx-%u", netdev->name, ri++);
3242 } else if (q_vector->tx.ring) {
3243 snprintf(q_vector->name, sizeof(q_vector->name),
3244 "%s-tx-%u", netdev->name, ti++);
3245 } else {
3246
3247 continue;
3248 }
3249 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3250 q_vector->name, q_vector);
3251 if (err) {
3252 e_err(probe, "request_irq failed for MSIX interrupt "
3253 "Error: %d\n", err);
3254 goto free_queue_irqs;
3255 }
3256
3257 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3258
3259 irq_set_affinity_hint(entry->vector,
3260 &q_vector->affinity_mask);
3261 }
3262 }
3263
3264 err = request_irq(adapter->msix_entries[vector].vector,
3265 ixgbe_msix_other, 0, netdev->name, adapter);
3266 if (err) {
3267 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3268 goto free_queue_irqs;
3269 }
3270
3271 return 0;
3272
3273 free_queue_irqs:
3274 while (vector) {
3275 vector--;
3276 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3277 NULL);
3278 free_irq(adapter->msix_entries[vector].vector,
3279 adapter->q_vector[vector]);
3280 }
3281 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3282 pci_disable_msix(adapter->pdev);
3283 kfree(adapter->msix_entries);
3284 adapter->msix_entries = NULL;
3285 return err;
3286 }
3287
3288
3289
3290
3291
3292
3293 static irqreturn_t ixgbe_intr(int irq, void *data)
3294 {
3295 struct ixgbe_adapter *adapter = data;
3296 struct ixgbe_hw *hw = &adapter->hw;
3297 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3298 u32 eicr;
3299
3300
3301
3302
3303
3304 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3305
3306
3307
3308 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3309 if (!eicr) {
3310
3311
3312
3313
3314
3315
3316
3317 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3318 ixgbe_irq_enable(adapter, true, true);
3319 return IRQ_NONE;
3320 }
3321
3322 if (eicr & IXGBE_EICR_LSC)
3323 ixgbe_check_lsc(adapter);
3324
3325 switch (hw->mac.type) {
3326 case ixgbe_mac_82599EB:
3327 ixgbe_check_sfp_event(adapter, eicr);
3328
3329 case ixgbe_mac_X540:
3330 case ixgbe_mac_X550:
3331 case ixgbe_mac_X550EM_x:
3332 case ixgbe_mac_x550em_a:
3333 if (eicr & IXGBE_EICR_ECC) {
3334 e_info(link, "Received ECC Err, initiating reset\n");
3335 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3336 ixgbe_service_event_schedule(adapter);
3337 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3338 }
3339 ixgbe_check_overtemp_event(adapter, eicr);
3340 break;
3341 default:
3342 break;
3343 }
3344
3345 ixgbe_check_fan_failure(adapter, eicr);
3346 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3347 ixgbe_ptp_check_pps_event(adapter);
3348
3349
3350 napi_schedule_irqoff(&q_vector->napi);
3351
3352
3353
3354
3355
3356 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3357 ixgbe_irq_enable(adapter, false, false);
3358
3359 return IRQ_HANDLED;
3360 }
3361
3362
3363
3364
3365
3366
3367
3368
3369 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3370 {
3371 struct net_device *netdev = adapter->netdev;
3372 int err;
3373
3374 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3375 err = ixgbe_request_msix_irqs(adapter);
3376 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3377 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3378 netdev->name, adapter);
3379 else
3380 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3381 netdev->name, adapter);
3382
3383 if (err)
3384 e_err(probe, "request_irq failed, Error %d\n", err);
3385
3386 return err;
3387 }
3388
3389 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3390 {
3391 int vector;
3392
3393 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3394 free_irq(adapter->pdev->irq, adapter);
3395 return;
3396 }
3397
3398 if (!adapter->msix_entries)
3399 return;
3400
3401 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3402 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3403 struct msix_entry *entry = &adapter->msix_entries[vector];
3404
3405
3406 if (!q_vector->rx.ring && !q_vector->tx.ring)
3407 continue;
3408
3409
3410 irq_set_affinity_hint(entry->vector, NULL);
3411
3412 free_irq(entry->vector, q_vector);
3413 }
3414
3415 free_irq(adapter->msix_entries[vector].vector, adapter);
3416 }
3417
3418
3419
3420
3421
3422 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3423 {
3424 switch (adapter->hw.mac.type) {
3425 case ixgbe_mac_82598EB:
3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3427 break;
3428 case ixgbe_mac_82599EB:
3429 case ixgbe_mac_X540:
3430 case ixgbe_mac_X550:
3431 case ixgbe_mac_X550EM_x:
3432 case ixgbe_mac_x550em_a:
3433 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3436 break;
3437 default:
3438 break;
3439 }
3440 IXGBE_WRITE_FLUSH(&adapter->hw);
3441 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3442 int vector;
3443
3444 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3445 synchronize_irq(adapter->msix_entries[vector].vector);
3446
3447 synchronize_irq(adapter->msix_entries[vector++].vector);
3448 } else {
3449 synchronize_irq(adapter->pdev->irq);
3450 }
3451 }
3452
3453
3454
3455
3456
3457
3458 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3459 {
3460 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3461
3462 ixgbe_write_eitr(q_vector);
3463
3464 ixgbe_set_ivar(adapter, 0, 0, 0);
3465 ixgbe_set_ivar(adapter, 1, 0, 0);
3466
3467 e_info(hw, "Legacy interrupt IVAR setup done\n");
3468 }
3469
3470
3471
3472
3473
3474
3475
3476
3477 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3478 struct ixgbe_ring *ring)
3479 {
3480 struct ixgbe_hw *hw = &adapter->hw;
3481 u64 tdba = ring->dma;
3482 int wait_loop = 10;
3483 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3484 u8 reg_idx = ring->reg_idx;
3485
3486 ring->xsk_umem = NULL;
3487 if (ring_is_xdp(ring))
3488 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
3489
3490
3491 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3492 IXGBE_WRITE_FLUSH(hw);
3493
3494 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3495 (tdba & DMA_BIT_MASK(32)));
3496 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3497 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3498 ring->count * sizeof(union ixgbe_adv_tx_desc));
3499 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3500 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3501 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3514 txdctl |= 1u << 16;
3515 else
3516 txdctl |= 8u << 16;
3517
3518
3519
3520
3521
3522 txdctl |= (1u << 8) |
3523 32;
3524
3525
3526 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3527 ring->atr_sample_rate = adapter->atr_sample_rate;
3528 ring->atr_count = 0;
3529 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3530 } else {
3531 ring->atr_sample_rate = 0;
3532 }
3533
3534
3535 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3536 struct ixgbe_q_vector *q_vector = ring->q_vector;
3537
3538 if (q_vector)
3539 netif_set_xps_queue(ring->netdev,
3540 &q_vector->affinity_mask,
3541 ring->queue_index);
3542 }
3543
3544 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3545
3546
3547 memset(ring->tx_buffer_info, 0,
3548 sizeof(struct ixgbe_tx_buffer) * ring->count);
3549
3550
3551 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3552
3553
3554 if (hw->mac.type == ixgbe_mac_82598EB &&
3555 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3556 return;
3557
3558
3559 do {
3560 usleep_range(1000, 2000);
3561 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3562 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3563 if (!wait_loop)
3564 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3565 }
3566
3567 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3568 {
3569 struct ixgbe_hw *hw = &adapter->hw;
3570 u32 rttdcs, mtqc;
3571 u8 tcs = adapter->hw_tcs;
3572
3573 if (hw->mac.type == ixgbe_mac_82598EB)
3574 return;
3575
3576
3577 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3578 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3579 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3580
3581
3582 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3583 mtqc = IXGBE_MTQC_VT_ENA;
3584 if (tcs > 4)
3585 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3586 else if (tcs > 1)
3587 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3588 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3589 IXGBE_82599_VMDQ_4Q_MASK)
3590 mtqc |= IXGBE_MTQC_32VF;
3591 else
3592 mtqc |= IXGBE_MTQC_64VF;
3593 } else {
3594 if (tcs > 4) {
3595 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3596 } else if (tcs > 1) {
3597 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3598 } else {
3599 u8 max_txq = adapter->num_tx_queues +
3600 adapter->num_xdp_queues;
3601 if (max_txq > 63)
3602 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3603 else
3604 mtqc = IXGBE_MTQC_64Q_1PB;
3605 }
3606 }
3607
3608 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3609
3610
3611 if (tcs) {
3612 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3613 sectx |= IXGBE_SECTX_DCB;
3614 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3615 }
3616
3617
3618 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3619 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3620 }
3621
3622
3623
3624
3625
3626
3627
3628 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3629 {
3630 struct ixgbe_hw *hw = &adapter->hw;
3631 u32 dmatxctl;
3632 u32 i;
3633
3634 ixgbe_setup_mtqc(adapter);
3635
3636 if (hw->mac.type != ixgbe_mac_82598EB) {
3637
3638 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3639 dmatxctl |= IXGBE_DMATXCTL_TE;
3640 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3641 }
3642
3643
3644 for (i = 0; i < adapter->num_tx_queues; i++)
3645 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3646 for (i = 0; i < adapter->num_xdp_queues; i++)
3647 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3648 }
3649
3650 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3651 struct ixgbe_ring *ring)
3652 {
3653 struct ixgbe_hw *hw = &adapter->hw;
3654 u8 reg_idx = ring->reg_idx;
3655 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3656
3657 srrctl |= IXGBE_SRRCTL_DROP_EN;
3658
3659 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3660 }
3661
3662 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3663 struct ixgbe_ring *ring)
3664 {
3665 struct ixgbe_hw *hw = &adapter->hw;
3666 u8 reg_idx = ring->reg_idx;
3667 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3668
3669 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3670
3671 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3672 }
3673
3674 #ifdef CONFIG_IXGBE_DCB
3675 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3676 #else
3677 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3678 #endif
3679 {
3680 int i;
3681 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3682
3683 if (adapter->ixgbe_ieee_pfc)
3684 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3696 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3697 for (i = 0; i < adapter->num_rx_queues; i++)
3698 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3699 } else {
3700 for (i = 0; i < adapter->num_rx_queues; i++)
3701 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3702 }
3703 }
3704
3705 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3706
3707 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3708 struct ixgbe_ring *rx_ring)
3709 {
3710 struct ixgbe_hw *hw = &adapter->hw;
3711 u32 srrctl;
3712 u8 reg_idx = rx_ring->reg_idx;
3713
3714 if (hw->mac.type == ixgbe_mac_82598EB) {
3715 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3716
3717
3718
3719
3720
3721 reg_idx &= mask;
3722 }
3723
3724
3725 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3726
3727
3728 if (rx_ring->xsk_umem) {
3729 u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
3730 XDP_PACKET_HEADROOM;
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740 if (hw->mac.type != ixgbe_mac_82599EB)
3741 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3742 else
3743 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3744 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3745 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3746 } else {
3747 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3748 }
3749
3750
3751 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3752
3753 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3754 }
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3765 {
3766 if (adapter->hw.mac.type < ixgbe_mac_X550)
3767 return 128;
3768 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3769 return 64;
3770 else
3771 return 512;
3772 }
3773
3774
3775
3776
3777
3778
3779
3780 void ixgbe_store_key(struct ixgbe_adapter *adapter)
3781 {
3782 struct ixgbe_hw *hw = &adapter->hw;
3783 int i;
3784
3785 for (i = 0; i < 10; i++)
3786 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3787 }
3788
3789
3790
3791
3792
3793
3794
3795 static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3796 {
3797 u32 *rss_key;
3798
3799 if (!adapter->rss_key) {
3800 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3801 if (unlikely(!rss_key))
3802 return -ENOMEM;
3803
3804 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3805 adapter->rss_key = rss_key;
3806 }
3807
3808 return 0;
3809 }
3810
3811
3812
3813
3814
3815
3816
3817 void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3818 {
3819 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3820 struct ixgbe_hw *hw = &adapter->hw;
3821 u32 reta = 0;
3822 u32 indices_multi;
3823 u8 *indir_tbl = adapter->rss_indir_tbl;
3824
3825
3826
3827
3828
3829
3830
3831 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3832 indices_multi = 0x11;
3833 else
3834 indices_multi = 0x1;
3835
3836
3837 for (i = 0; i < reta_entries; i++) {
3838 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3839 if ((i & 3) == 3) {
3840 if (i < 128)
3841 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3842 else
3843 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3844 reta);
3845 reta = 0;
3846 }
3847 }
3848 }
3849
3850
3851
3852
3853
3854
3855
3856 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3857 {
3858 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3859 struct ixgbe_hw *hw = &adapter->hw;
3860 u32 vfreta = 0;
3861
3862
3863 for (i = 0; i < reta_entries; i++) {
3864 u16 pool = adapter->num_rx_pools;
3865
3866 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3867 if ((i & 3) != 3)
3868 continue;
3869
3870 while (pool--)
3871 IXGBE_WRITE_REG(hw,
3872 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3873 vfreta);
3874 vfreta = 0;
3875 }
3876 }
3877
3878 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3879 {
3880 u32 i, j;
3881 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3882 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3883
3884
3885
3886
3887
3888 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3889 rss_i = 4;
3890
3891
3892 ixgbe_store_key(adapter);
3893
3894
3895 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3896
3897 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3898 if (j == rss_i)
3899 j = 0;
3900
3901 adapter->rss_indir_tbl[i] = j;
3902 }
3903
3904 ixgbe_store_reta(adapter);
3905 }
3906
3907 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3908 {
3909 struct ixgbe_hw *hw = &adapter->hw;
3910 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3911 int i, j;
3912
3913
3914 for (i = 0; i < 10; i++) {
3915 u16 pool = adapter->num_rx_pools;
3916
3917 while (pool--)
3918 IXGBE_WRITE_REG(hw,
3919 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3920 *(adapter->rss_key + i));
3921 }
3922
3923
3924 for (i = 0, j = 0; i < 64; i++, j++) {
3925 if (j == rss_i)
3926 j = 0;
3927
3928 adapter->rss_indir_tbl[i] = j;
3929 }
3930
3931 ixgbe_store_vfreta(adapter);
3932 }
3933
3934 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3935 {
3936 struct ixgbe_hw *hw = &adapter->hw;
3937 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3938 u32 rxcsum;
3939
3940
3941 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3942 rxcsum |= IXGBE_RXCSUM_PCSD;
3943 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3944
3945 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3946 if (adapter->ring_feature[RING_F_RSS].mask)
3947 mrqc = IXGBE_MRQC_RSSEN;
3948 } else {
3949 u8 tcs = adapter->hw_tcs;
3950
3951 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3952 if (tcs > 4)
3953 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3954 else if (tcs > 1)
3955 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3956 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3957 IXGBE_82599_VMDQ_4Q_MASK)
3958 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3959 else
3960 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3961
3962
3963
3964
3965 if (hw->mac.type >= ixgbe_mac_X550)
3966 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3967 } else {
3968 if (tcs > 4)
3969 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3970 else if (tcs > 1)
3971 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3972 else
3973 mrqc = IXGBE_MRQC_RSSEN;
3974 }
3975 }
3976
3977
3978 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3979 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3980 IXGBE_MRQC_RSS_FIELD_IPV6 |
3981 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3982
3983 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3984 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3985 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3986 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3987
3988 if ((hw->mac.type >= ixgbe_mac_X550) &&
3989 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3990 u16 pool = adapter->num_rx_pools;
3991
3992
3993 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3994 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3995
3996
3997 ixgbe_setup_vfreta(adapter);
3998 vfmrqc = IXGBE_MRQC_RSSEN;
3999 vfmrqc |= rss_field;
4000
4001 while (pool--)
4002 IXGBE_WRITE_REG(hw,
4003 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4004 vfmrqc);
4005 } else {
4006 ixgbe_setup_reta(adapter);
4007 mrqc |= rss_field;
4008 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4009 }
4010 }
4011
4012
4013
4014
4015
4016
4017 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4018 struct ixgbe_ring *ring)
4019 {
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 u32 rscctrl;
4022 u8 reg_idx = ring->reg_idx;
4023
4024 if (!ring_is_rsc_enabled(ring))
4025 return;
4026
4027 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4028 rscctrl |= IXGBE_RSCCTL_RSCEN;
4029
4030
4031
4032
4033
4034 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4035 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4036 }
4037
4038 #define IXGBE_MAX_RX_DESC_POLL 10
4039 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4040 struct ixgbe_ring *ring)
4041 {
4042 struct ixgbe_hw *hw = &adapter->hw;
4043 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4044 u32 rxdctl;
4045 u8 reg_idx = ring->reg_idx;
4046
4047 if (ixgbe_removed(hw->hw_addr))
4048 return;
4049
4050 if (hw->mac.type == ixgbe_mac_82598EB &&
4051 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4052 return;
4053
4054 do {
4055 usleep_range(1000, 2000);
4056 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4057 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4058
4059 if (!wait_loop) {
4060 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4061 "the polling period\n", reg_idx);
4062 }
4063 }
4064
4065 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4066 struct ixgbe_ring *ring)
4067 {
4068 struct ixgbe_hw *hw = &adapter->hw;
4069 union ixgbe_adv_rx_desc *rx_desc;
4070 u64 rdba = ring->dma;
4071 u32 rxdctl;
4072 u8 reg_idx = ring->reg_idx;
4073
4074 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4075 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
4076 if (ring->xsk_umem) {
4077 ring->zca.free = ixgbe_zca_free;
4078 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4079 MEM_TYPE_ZERO_COPY,
4080 &ring->zca));
4081
4082 } else {
4083 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4084 MEM_TYPE_PAGE_SHARED, NULL));
4085 }
4086
4087
4088 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4089 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4090
4091
4092 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4093 IXGBE_WRITE_FLUSH(hw);
4094
4095 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4096 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4097 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4098 ring->count * sizeof(union ixgbe_adv_rx_desc));
4099
4100 IXGBE_WRITE_FLUSH(hw);
4101
4102 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4103 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4104 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4105
4106 ixgbe_configure_srrctl(adapter, ring);
4107 ixgbe_configure_rscctl(adapter, ring);
4108
4109 if (hw->mac.type == ixgbe_mac_82598EB) {
4110
4111
4112
4113
4114
4115
4116
4117 rxdctl &= ~0x3FFFFF;
4118 rxdctl |= 0x080420;
4119 #if (PAGE_SIZE < 8192)
4120
4121 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4122 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4123 IXGBE_RXDCTL_RLPML_EN);
4124
4125
4126
4127
4128
4129 if (ring_uses_build_skb(ring) &&
4130 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4131 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4132 IXGBE_RXDCTL_RLPML_EN;
4133 #endif
4134 }
4135
4136 if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
4137 u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
4138 XDP_PACKET_HEADROOM;
4139
4140 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4141 IXGBE_RXDCTL_RLPML_EN);
4142 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4143
4144 ring->rx_buf_len = xsk_buf_len;
4145 }
4146
4147
4148 memset(ring->rx_buffer_info, 0,
4149 sizeof(struct ixgbe_rx_buffer) * ring->count);
4150
4151
4152 rx_desc = IXGBE_RX_DESC(ring, 0);
4153 rx_desc->wb.upper.length = 0;
4154
4155
4156 rxdctl |= IXGBE_RXDCTL_ENABLE;
4157 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4158
4159 ixgbe_rx_desc_queue_enable(adapter, ring);
4160 if (ring->xsk_umem)
4161 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4162 else
4163 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4164 }
4165
4166 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4167 {
4168 struct ixgbe_hw *hw = &adapter->hw;
4169 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4170 u16 pool = adapter->num_rx_pools;
4171
4172
4173 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4174 IXGBE_PSRTYPE_UDPHDR |
4175 IXGBE_PSRTYPE_IPV4HDR |
4176 IXGBE_PSRTYPE_L2HDR |
4177 IXGBE_PSRTYPE_IPV6HDR;
4178
4179 if (hw->mac.type == ixgbe_mac_82598EB)
4180 return;
4181
4182 if (rss_i > 3)
4183 psrtype |= 2u << 29;
4184 else if (rss_i > 1)
4185 psrtype |= 1u << 29;
4186
4187 while (pool--)
4188 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4189 }
4190
4191 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4192 {
4193 struct ixgbe_hw *hw = &adapter->hw;
4194 u16 pool = adapter->num_rx_pools;
4195 u32 reg_offset, vf_shift, vmolr;
4196 u32 gcr_ext, vmdctl;
4197 int i;
4198
4199 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4200 return;
4201
4202 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4203 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4204 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4205 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4206 vmdctl |= IXGBE_VT_CTL_REPLEN;
4207 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4208
4209
4210
4211
4212 vmolr = IXGBE_VMOLR_AUPE;
4213 while (pool--)
4214 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4215
4216 vf_shift = VMDQ_P(0) % 32;
4217 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4218
4219
4220 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4221 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4222 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4223 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4224 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4225 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4226
4227
4228 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4229
4230
4231 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4232
4233
4234
4235
4236
4237 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4238 case IXGBE_82599_VMDQ_8Q_MASK:
4239 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4240 break;
4241 case IXGBE_82599_VMDQ_4Q_MASK:
4242 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4243 break;
4244 default:
4245 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4246 break;
4247 }
4248
4249 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4250
4251 for (i = 0; i < adapter->num_vfs; i++) {
4252
4253 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4254 adapter->vfinfo[i].spoofchk_enabled);
4255
4256
4257 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4258 adapter->vfinfo[i].rss_query_enabled);
4259 }
4260 }
4261
4262 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4263 {
4264 struct ixgbe_hw *hw = &adapter->hw;
4265 struct net_device *netdev = adapter->netdev;
4266 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4267 struct ixgbe_ring *rx_ring;
4268 int i;
4269 u32 mhadd, hlreg0;
4270
4271 #ifdef IXGBE_FCOE
4272
4273 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4274 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4275 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4276
4277 #endif
4278
4279
4280 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4281 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4282
4283 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4284 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4285 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4286 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4287
4288 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4289 }
4290
4291 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4292
4293 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4294 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4295
4296
4297
4298
4299
4300 for (i = 0; i < adapter->num_rx_queues; i++) {
4301 rx_ring = adapter->rx_ring[i];
4302
4303 clear_ring_rsc_enabled(rx_ring);
4304 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4305 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4306
4307 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4308 set_ring_rsc_enabled(rx_ring);
4309
4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4312
4313 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4314 continue;
4315
4316 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4317
4318 #if (PAGE_SIZE < 8192)
4319 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4320 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4321
4322 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4323 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4324 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4325 #endif
4326 }
4327 }
4328
4329 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4330 {
4331 struct ixgbe_hw *hw = &adapter->hw;
4332 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4333
4334 switch (hw->mac.type) {
4335 case ixgbe_mac_82598EB:
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4347 break;
4348 case ixgbe_mac_X550:
4349 case ixgbe_mac_X550EM_x:
4350 case ixgbe_mac_x550em_a:
4351 if (adapter->num_vfs)
4352 rdrxctl |= IXGBE_RDRXCTL_PSP;
4353
4354 case ixgbe_mac_82599EB:
4355 case ixgbe_mac_X540:
4356
4357 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4358 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4359 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4360
4361 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4362 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4363 break;
4364 default:
4365
4366 return;
4367 }
4368
4369 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4370 }
4371
4372
4373
4374
4375
4376
4377
4378 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4379 {
4380 struct ixgbe_hw *hw = &adapter->hw;
4381 int i;
4382 u32 rxctrl, rfctl;
4383
4384
4385 hw->mac.ops.disable_rx(hw);
4386
4387 ixgbe_setup_psrtype(adapter);
4388 ixgbe_setup_rdrxctl(adapter);
4389
4390
4391 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4392 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4393 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4394 rfctl |= IXGBE_RFCTL_RSC_DIS;
4395
4396
4397 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4398 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4399
4400
4401 ixgbe_setup_mrqc(adapter);
4402
4403
4404 ixgbe_set_rx_buffer_len(adapter);
4405
4406
4407
4408
4409
4410 for (i = 0; i < adapter->num_rx_queues; i++)
4411 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4412
4413 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4414
4415 if (hw->mac.type == ixgbe_mac_82598EB)
4416 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4417
4418
4419 rxctrl |= IXGBE_RXCTRL_RXEN;
4420 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4421 }
4422
4423 static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4424 __be16 proto, u16 vid)
4425 {
4426 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4427 struct ixgbe_hw *hw = &adapter->hw;
4428
4429
4430 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4431 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4432
4433 set_bit(vid, adapter->active_vlans);
4434
4435 return 0;
4436 }
4437
4438 static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4439 {
4440 u32 vlvf;
4441 int idx;
4442
4443
4444 if (vlan == 0)
4445 return 0;
4446
4447
4448 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4449 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4450 if ((vlvf & VLAN_VID_MASK) == vlan)
4451 break;
4452 }
4453
4454 return idx;
4455 }
4456
4457 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4458 {
4459 struct ixgbe_hw *hw = &adapter->hw;
4460 u32 bits, word;
4461 int idx;
4462
4463 idx = ixgbe_find_vlvf_entry(hw, vid);
4464 if (!idx)
4465 return;
4466
4467
4468
4469
4470 word = idx * 2 + (VMDQ_P(0) / 32);
4471 bits = ~BIT(VMDQ_P(0) % 32);
4472 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4473
4474
4475 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4476 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4477 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4478 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4479 }
4480 }
4481
4482 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4483 __be16 proto, u16 vid)
4484 {
4485 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4486 struct ixgbe_hw *hw = &adapter->hw;
4487
4488
4489 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4490 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4491
4492 clear_bit(vid, adapter->active_vlans);
4493
4494 return 0;
4495 }
4496
4497
4498
4499
4500
4501 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4502 {
4503 struct ixgbe_hw *hw = &adapter->hw;
4504 u32 vlnctrl;
4505 int i, j;
4506
4507 switch (hw->mac.type) {
4508 case ixgbe_mac_82598EB:
4509 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4510 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4511 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4512 break;
4513 case ixgbe_mac_82599EB:
4514 case ixgbe_mac_X540:
4515 case ixgbe_mac_X550:
4516 case ixgbe_mac_X550EM_x:
4517 case ixgbe_mac_x550em_a:
4518 for (i = 0; i < adapter->num_rx_queues; i++) {
4519 struct ixgbe_ring *ring = adapter->rx_ring[i];
4520
4521 if (!netif_is_ixgbe(ring->netdev))
4522 continue;
4523
4524 j = ring->reg_idx;
4525 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4526 vlnctrl &= ~IXGBE_RXDCTL_VME;
4527 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4528 }
4529 break;
4530 default:
4531 break;
4532 }
4533 }
4534
4535
4536
4537
4538
4539 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4540 {
4541 struct ixgbe_hw *hw = &adapter->hw;
4542 u32 vlnctrl;
4543 int i, j;
4544
4545 switch (hw->mac.type) {
4546 case ixgbe_mac_82598EB:
4547 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4548 vlnctrl |= IXGBE_VLNCTRL_VME;
4549 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4550 break;
4551 case ixgbe_mac_82599EB:
4552 case ixgbe_mac_X540:
4553 case ixgbe_mac_X550:
4554 case ixgbe_mac_X550EM_x:
4555 case ixgbe_mac_x550em_a:
4556 for (i = 0; i < adapter->num_rx_queues; i++) {
4557 struct ixgbe_ring *ring = adapter->rx_ring[i];
4558
4559 if (!netif_is_ixgbe(ring->netdev))
4560 continue;
4561
4562 j = ring->reg_idx;
4563 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4564 vlnctrl |= IXGBE_RXDCTL_VME;
4565 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4566 }
4567 break;
4568 default:
4569 break;
4570 }
4571 }
4572
4573 static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4574 {
4575 struct ixgbe_hw *hw = &adapter->hw;
4576 u32 vlnctrl, i;
4577
4578 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4579
4580 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4581
4582 vlnctrl |= IXGBE_VLNCTRL_VFE;
4583 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4584 } else {
4585 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4586 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4587 return;
4588 }
4589
4590
4591 if (hw->mac.type == ixgbe_mac_82598EB)
4592 return;
4593
4594
4595 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4596 return;
4597
4598
4599 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4600
4601
4602 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4603 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4604 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4605
4606 vlvfb |= BIT(VMDQ_P(0) % 32);
4607 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4608 }
4609
4610
4611 for (i = hw->mac.vft_size; i--;)
4612 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4613 }
4614
4615 #define VFTA_BLOCK_SIZE 8
4616 static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4617 {
4618 struct ixgbe_hw *hw = &adapter->hw;
4619 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4620 u32 vid_start = vfta_offset * 32;
4621 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4622 u32 i, vid, word, bits;
4623
4624 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4625 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4626
4627
4628 vid = vlvf & VLAN_VID_MASK;
4629
4630
4631 if (vid < vid_start || vid >= vid_end)
4632 continue;
4633
4634 if (vlvf) {
4635
4636 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4637
4638
4639 if (test_bit(vid, adapter->active_vlans))
4640 continue;
4641 }
4642
4643
4644 word = i * 2 + VMDQ_P(0) / 32;
4645 bits = ~BIT(VMDQ_P(0) % 32);
4646 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4647 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4648 }
4649
4650
4651 for (i = VFTA_BLOCK_SIZE; i--;) {
4652 vid = (vfta_offset + i) * 32;
4653 word = vid / BITS_PER_LONG;
4654 bits = vid % BITS_PER_LONG;
4655
4656 vfta[i] |= adapter->active_vlans[word] >> bits;
4657
4658 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4659 }
4660 }
4661
4662 static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4663 {
4664 struct ixgbe_hw *hw = &adapter->hw;
4665 u32 vlnctrl, i;
4666
4667
4668 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4669 vlnctrl |= IXGBE_VLNCTRL_VFE;
4670 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4671
4672 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4673 hw->mac.type == ixgbe_mac_82598EB)
4674 return;
4675
4676
4677 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4678 return;
4679
4680
4681 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4682
4683 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4684 ixgbe_scrub_vfta(adapter, i);
4685 }
4686
4687 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4688 {
4689 u16 vid = 1;
4690
4691 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4692
4693 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4694 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4695 }
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706 static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4707 {
4708 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4709 struct ixgbe_hw *hw = &adapter->hw;
4710
4711 if (!netif_running(netdev))
4712 return 0;
4713
4714 if (hw->mac.ops.update_mc_addr_list)
4715 hw->mac.ops.update_mc_addr_list(hw, netdev);
4716 else
4717 return -ENOMEM;
4718
4719 #ifdef CONFIG_PCI_IOV
4720 ixgbe_restore_vf_multicasts(adapter);
4721 #endif
4722
4723 return netdev_mc_count(netdev);
4724 }
4725
4726 #ifdef CONFIG_PCI_IOV
4727 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4728 {
4729 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4730 struct ixgbe_hw *hw = &adapter->hw;
4731 int i;
4732
4733 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4734 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4735
4736 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4737 hw->mac.ops.set_rar(hw, i,
4738 mac_table->addr,
4739 mac_table->pool,
4740 IXGBE_RAH_AV);
4741 else
4742 hw->mac.ops.clear_rar(hw, i);
4743 }
4744 }
4745
4746 #endif
4747 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4748 {
4749 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4750 struct ixgbe_hw *hw = &adapter->hw;
4751 int i;
4752
4753 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4754 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4755 continue;
4756
4757 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4758
4759 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4760 hw->mac.ops.set_rar(hw, i,
4761 mac_table->addr,
4762 mac_table->pool,
4763 IXGBE_RAH_AV);
4764 else
4765 hw->mac.ops.clear_rar(hw, i);
4766 }
4767 }
4768
4769 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4770 {
4771 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4772 struct ixgbe_hw *hw = &adapter->hw;
4773 int i;
4774
4775 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4776 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4777 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4778 }
4779
4780 ixgbe_sync_mac_table(adapter);
4781 }
4782
4783 static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4784 {
4785 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4786 struct ixgbe_hw *hw = &adapter->hw;
4787 int i, count = 0;
4788
4789 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4790
4791 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4792 continue;
4793
4794
4795 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4796 if (mac_table->pool != pool)
4797 continue;
4798 }
4799
4800 count++;
4801 }
4802
4803 return count;
4804 }
4805
4806
4807 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4808 {
4809 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4810 struct ixgbe_hw *hw = &adapter->hw;
4811
4812 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4813 mac_table->pool = VMDQ_P(0);
4814
4815 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4816
4817 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4818 IXGBE_RAH_AV);
4819 }
4820
4821 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4822 const u8 *addr, u16 pool)
4823 {
4824 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4825 struct ixgbe_hw *hw = &adapter->hw;
4826 int i;
4827
4828 if (is_zero_ether_addr(addr))
4829 return -EINVAL;
4830
4831 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4832 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4833 continue;
4834
4835 ether_addr_copy(mac_table->addr, addr);
4836 mac_table->pool = pool;
4837
4838 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4839 IXGBE_MAC_STATE_IN_USE;
4840
4841 ixgbe_sync_mac_table(adapter);
4842
4843 return i;
4844 }
4845
4846 return -ENOMEM;
4847 }
4848
4849 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4850 const u8 *addr, u16 pool)
4851 {
4852 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4853 struct ixgbe_hw *hw = &adapter->hw;
4854 int i;
4855
4856 if (is_zero_ether_addr(addr))
4857 return -EINVAL;
4858
4859
4860 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4861
4862 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4863 continue;
4864
4865 if (mac_table->pool != pool)
4866 continue;
4867
4868 if (!ether_addr_equal(addr, mac_table->addr))
4869 continue;
4870
4871 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4872 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4873
4874 ixgbe_sync_mac_table(adapter);
4875
4876 return 0;
4877 }
4878
4879 return -ENOMEM;
4880 }
4881
4882 static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4883 {
4884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4885 int ret;
4886
4887 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4888
4889 return min_t(int, ret, 0);
4890 }
4891
4892 static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4893 {
4894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4895
4896 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4897
4898 return 0;
4899 }
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910 void ixgbe_set_rx_mode(struct net_device *netdev)
4911 {
4912 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4913 struct ixgbe_hw *hw = &adapter->hw;
4914 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4915 netdev_features_t features = netdev->features;
4916 int count;
4917
4918
4919 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4920
4921
4922 fctrl &= ~IXGBE_FCTRL_SBP;
4923 fctrl |= IXGBE_FCTRL_BAM;
4924 fctrl |= IXGBE_FCTRL_DPF;
4925 fctrl |= IXGBE_FCTRL_PMCF;
4926
4927
4928 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4929 if (netdev->flags & IFF_PROMISC) {
4930 hw->addr_ctrl.user_set_promisc = true;
4931 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4932 vmolr |= IXGBE_VMOLR_MPE;
4933 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4934 } else {
4935 if (netdev->flags & IFF_ALLMULTI) {
4936 fctrl |= IXGBE_FCTRL_MPE;
4937 vmolr |= IXGBE_VMOLR_MPE;
4938 }
4939 hw->addr_ctrl.user_set_promisc = false;
4940 }
4941
4942
4943
4944
4945
4946
4947 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4948 fctrl |= IXGBE_FCTRL_UPE;
4949 vmolr |= IXGBE_VMOLR_ROPE;
4950 }
4951
4952
4953
4954
4955
4956 count = ixgbe_write_mc_addr_list(netdev);
4957 if (count < 0) {
4958 fctrl |= IXGBE_FCTRL_MPE;
4959 vmolr |= IXGBE_VMOLR_MPE;
4960 } else if (count) {
4961 vmolr |= IXGBE_VMOLR_ROMPE;
4962 }
4963
4964 if (hw->mac.type != ixgbe_mac_82598EB) {
4965 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4966 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4967 IXGBE_VMOLR_ROPE);
4968 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4969 }
4970
4971
4972 if (features & NETIF_F_RXALL) {
4973
4974
4975 fctrl |= (IXGBE_FCTRL_SBP |
4976 IXGBE_FCTRL_BAM |
4977 IXGBE_FCTRL_PMCF);
4978
4979 fctrl &= ~(IXGBE_FCTRL_DPF);
4980
4981 }
4982
4983 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4984
4985 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4986 ixgbe_vlan_strip_enable(adapter);
4987 else
4988 ixgbe_vlan_strip_disable(adapter);
4989
4990 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4991 ixgbe_vlan_promisc_disable(adapter);
4992 else
4993 ixgbe_vlan_promisc_enable(adapter);
4994 }
4995
4996 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4997 {
4998 int q_idx;
4999
5000 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5001 napi_enable(&adapter->q_vector[q_idx]->napi);
5002 }
5003
5004 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5005 {
5006 int q_idx;
5007
5008 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5009 napi_disable(&adapter->q_vector[q_idx]->napi);
5010 }
5011
5012 static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5013 {
5014 struct ixgbe_hw *hw = &adapter->hw;
5015 u32 vxlanctrl;
5016
5017 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5018 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5019 return;
5020
5021 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5022 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5023
5024 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5025 adapter->vxlan_port = 0;
5026
5027 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5028 adapter->geneve_port = 0;
5029 }
5030
5031 #ifdef CONFIG_IXGBE_DCB
5032
5033
5034
5035
5036
5037
5038
5039
5040 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5041 {
5042 struct ixgbe_hw *hw = &adapter->hw;
5043 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5044
5045 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5046 if (hw->mac.type == ixgbe_mac_82598EB)
5047 netif_set_gso_max_size(adapter->netdev, 65536);
5048 return;
5049 }
5050
5051 if (hw->mac.type == ixgbe_mac_82598EB)
5052 netif_set_gso_max_size(adapter->netdev, 32768);
5053
5054 #ifdef IXGBE_FCOE
5055 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5056 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5057 #endif
5058
5059
5060 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5061 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5062 DCB_TX_CONFIG);
5063 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5064 DCB_RX_CONFIG);
5065 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5066 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5067 ixgbe_dcb_hw_ets(&adapter->hw,
5068 adapter->ixgbe_ieee_ets,
5069 max_frame);
5070 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5071 adapter->ixgbe_ieee_pfc->pfc_en,
5072 adapter->ixgbe_ieee_ets->prio_tc);
5073 }
5074
5075
5076 if (hw->mac.type != ixgbe_mac_82598EB) {
5077 u32 msb = 0;
5078 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5079
5080 while (rss_i) {
5081 msb++;
5082 rss_i >>= 1;
5083 }
5084
5085
5086 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5087 }
5088 }
5089 #endif
5090
5091
5092 #define IXGBE_ETH_FRAMING 20
5093
5094
5095
5096
5097
5098
5099
5100 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5101 {
5102 struct ixgbe_hw *hw = &adapter->hw;
5103 struct net_device *dev = adapter->netdev;
5104 int link, tc, kb, marker;
5105 u32 dv_id, rx_pba;
5106
5107
5108 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5109
5110 #ifdef IXGBE_FCOE
5111
5112 if ((dev->features & NETIF_F_FCOE_MTU) &&
5113 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5114 (pb == ixgbe_fcoe_get_tc(adapter)))
5115 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5116 #endif
5117
5118
5119 switch (hw->mac.type) {
5120 case ixgbe_mac_X540:
5121 case ixgbe_mac_X550:
5122 case ixgbe_mac_X550EM_x:
5123 case ixgbe_mac_x550em_a:
5124 dv_id = IXGBE_DV_X540(link, tc);
5125 break;
5126 default:
5127 dv_id = IXGBE_DV(link, tc);
5128 break;
5129 }
5130
5131
5132 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5133 dv_id += IXGBE_B2BT(tc);
5134
5135
5136 kb = IXGBE_BT2KB(dv_id);
5137 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5138
5139 marker = rx_pba - kb;
5140
5141
5142
5143
5144
5145 if (marker < 0) {
5146 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5147 "headroom to support flow control."
5148 "Decrease MTU or number of traffic classes\n", pb);
5149 marker = tc + 1;
5150 }
5151
5152 return marker;
5153 }
5154
5155
5156
5157
5158
5159
5160
5161 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5162 {
5163 struct ixgbe_hw *hw = &adapter->hw;
5164 struct net_device *dev = adapter->netdev;
5165 int tc;
5166 u32 dv_id;
5167
5168
5169 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5170
5171 #ifdef IXGBE_FCOE
5172
5173 if ((dev->features & NETIF_F_FCOE_MTU) &&
5174 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5175 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5176 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5177 #endif
5178
5179
5180 switch (hw->mac.type) {
5181 case ixgbe_mac_X540:
5182 case ixgbe_mac_X550:
5183 case ixgbe_mac_X550EM_x:
5184 case ixgbe_mac_x550em_a:
5185 dv_id = IXGBE_LOW_DV_X540(tc);
5186 break;
5187 default:
5188 dv_id = IXGBE_LOW_DV(tc);
5189 break;
5190 }
5191
5192
5193 return IXGBE_BT2KB(dv_id);
5194 }
5195
5196
5197
5198
5199 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5200 {
5201 struct ixgbe_hw *hw = &adapter->hw;
5202 int num_tc = adapter->hw_tcs;
5203 int i;
5204
5205 if (!num_tc)
5206 num_tc = 1;
5207
5208 for (i = 0; i < num_tc; i++) {
5209 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5210 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5211
5212
5213 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5214 hw->fc.low_water[i] = 0;
5215 }
5216
5217 for (; i < MAX_TRAFFIC_CLASS; i++)
5218 hw->fc.high_water[i] = 0;
5219 }
5220
5221 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5222 {
5223 struct ixgbe_hw *hw = &adapter->hw;
5224 int hdrm;
5225 u8 tc = adapter->hw_tcs;
5226
5227 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5228 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5229 hdrm = 32 << adapter->fdir_pballoc;
5230 else
5231 hdrm = 0;
5232
5233 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5234 ixgbe_pbthresh_setup(adapter);
5235 }
5236
5237 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5238 {
5239 struct ixgbe_hw *hw = &adapter->hw;
5240 struct hlist_node *node2;
5241 struct ixgbe_fdir_filter *filter;
5242 u8 queue;
5243
5244 spin_lock(&adapter->fdir_perfect_lock);
5245
5246 if (!hlist_empty(&adapter->fdir_filter_list))
5247 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5248
5249 hlist_for_each_entry_safe(filter, node2,
5250 &adapter->fdir_filter_list, fdir_node) {
5251 if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5252 queue = IXGBE_FDIR_DROP_QUEUE;
5253 } else {
5254 u32 ring = ethtool_get_flow_spec_ring(filter->action);
5255 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5256
5257 if (!vf && (ring >= adapter->num_rx_queues)) {
5258 e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5259 ring);
5260 continue;
5261 } else if (vf &&
5262 ((vf > adapter->num_vfs) ||
5263 ring >= adapter->num_rx_queues_per_pool)) {
5264 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5265 vf, ring);
5266 continue;
5267 }
5268
5269
5270 if (!vf)
5271 queue = adapter->rx_ring[ring]->reg_idx;
5272 else
5273 queue = ((vf - 1) *
5274 adapter->num_rx_queues_per_pool) + ring;
5275 }
5276
5277 ixgbe_fdir_write_perfect_filter_82599(hw,
5278 &filter->filter, filter->sw_idx, queue);
5279 }
5280
5281 spin_unlock(&adapter->fdir_perfect_lock);
5282 }
5283
5284
5285
5286
5287
5288 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5289 {
5290 u16 i = rx_ring->next_to_clean;
5291 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5292
5293 if (rx_ring->xsk_umem) {
5294 ixgbe_xsk_clean_rx_ring(rx_ring);
5295 goto skip_free;
5296 }
5297
5298
5299 while (i != rx_ring->next_to_alloc) {
5300 if (rx_buffer->skb) {
5301 struct sk_buff *skb = rx_buffer->skb;
5302 if (IXGBE_CB(skb)->page_released)
5303 dma_unmap_page_attrs(rx_ring->dev,
5304 IXGBE_CB(skb)->dma,
5305 ixgbe_rx_pg_size(rx_ring),
5306 DMA_FROM_DEVICE,
5307 IXGBE_RX_DMA_ATTR);
5308 dev_kfree_skb(skb);
5309 }
5310
5311
5312
5313
5314 dma_sync_single_range_for_cpu(rx_ring->dev,
5315 rx_buffer->dma,
5316 rx_buffer->page_offset,
5317 ixgbe_rx_bufsz(rx_ring),
5318 DMA_FROM_DEVICE);
5319
5320
5321 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5322 ixgbe_rx_pg_size(rx_ring),
5323 DMA_FROM_DEVICE,
5324 IXGBE_RX_DMA_ATTR);
5325 __page_frag_cache_drain(rx_buffer->page,
5326 rx_buffer->pagecnt_bias);
5327
5328 i++;
5329 rx_buffer++;
5330 if (i == rx_ring->count) {
5331 i = 0;
5332 rx_buffer = rx_ring->rx_buffer_info;
5333 }
5334 }
5335
5336 skip_free:
5337 rx_ring->next_to_alloc = 0;
5338 rx_ring->next_to_clean = 0;
5339 rx_ring->next_to_use = 0;
5340 }
5341
5342 static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5343 struct ixgbe_fwd_adapter *accel)
5344 {
5345 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5346 int num_tc = netdev_get_num_tc(adapter->netdev);
5347 struct net_device *vdev = accel->netdev;
5348 int i, baseq, err;
5349
5350 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5351 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5352 accel->pool, adapter->num_rx_pools,
5353 baseq, baseq + adapter->num_rx_queues_per_pool);
5354
5355 accel->rx_base_queue = baseq;
5356 accel->tx_base_queue = baseq;
5357
5358
5359 for (i = 0; i < num_tc; i++)
5360 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5361 i, rss_i, baseq + (rss_i * i));
5362
5363 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5364 adapter->rx_ring[baseq + i]->netdev = vdev;
5365
5366
5367
5368
5369 wmb();
5370
5371
5372
5373
5374 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5375 VMDQ_P(accel->pool));
5376 if (err >= 0)
5377 return 0;
5378
5379
5380 macvlan_release_l2fw_offload(vdev);
5381
5382 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5383 adapter->rx_ring[baseq + i]->netdev = NULL;
5384
5385 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5386
5387
5388 netdev_unbind_sb_channel(adapter->netdev, vdev);
5389 netdev_set_sb_channel(vdev, 0);
5390
5391 clear_bit(accel->pool, adapter->fwd_bitmask);
5392 kfree(accel);
5393
5394 return err;
5395 }
5396
5397 static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
5398 {
5399 struct ixgbe_adapter *adapter = data;
5400 struct ixgbe_fwd_adapter *accel;
5401
5402 if (!netif_is_macvlan(vdev))
5403 return 0;
5404
5405 accel = macvlan_accel_priv(vdev);
5406 if (!accel)
5407 return 0;
5408
5409 ixgbe_fwd_ring_up(adapter, accel);
5410
5411 return 0;
5412 }
5413
5414 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5415 {
5416 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5417 ixgbe_macvlan_up, adapter);
5418 }
5419
5420 static void ixgbe_configure(struct ixgbe_adapter *adapter)
5421 {
5422 struct ixgbe_hw *hw = &adapter->hw;
5423
5424 ixgbe_configure_pb(adapter);
5425 #ifdef CONFIG_IXGBE_DCB
5426 ixgbe_configure_dcb(adapter);
5427 #endif
5428
5429
5430
5431
5432 ixgbe_configure_virtualization(adapter);
5433
5434 ixgbe_set_rx_mode(adapter->netdev);
5435 ixgbe_restore_vlan(adapter);
5436 ixgbe_ipsec_restore(adapter);
5437
5438 switch (hw->mac.type) {
5439 case ixgbe_mac_82599EB:
5440 case ixgbe_mac_X540:
5441 hw->mac.ops.disable_rx_buff(hw);
5442 break;
5443 default:
5444 break;
5445 }
5446
5447 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5448 ixgbe_init_fdir_signature_82599(&adapter->hw,
5449 adapter->fdir_pballoc);
5450 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5451 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5452 adapter->fdir_pballoc);
5453 ixgbe_fdir_filter_restore(adapter);
5454 }
5455
5456 switch (hw->mac.type) {
5457 case ixgbe_mac_82599EB:
5458 case ixgbe_mac_X540:
5459 hw->mac.ops.enable_rx_buff(hw);
5460 break;
5461 default:
5462 break;
5463 }
5464
5465 #ifdef CONFIG_IXGBE_DCA
5466
5467 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5468 ixgbe_setup_dca(adapter);
5469 #endif
5470
5471 #ifdef IXGBE_FCOE
5472
5473 ixgbe_configure_fcoe(adapter);
5474
5475 #endif
5476 ixgbe_configure_tx(adapter);
5477 ixgbe_configure_rx(adapter);
5478 ixgbe_configure_dfwd(adapter);
5479 }
5480
5481
5482
5483
5484
5485 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5486 {
5487
5488
5489
5490
5491
5492
5493 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5494 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5495
5496 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5497 adapter->sfp_poll_time = 0;
5498 }
5499
5500
5501
5502
5503
5504
5505
5506 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5507 {
5508 u32 speed;
5509 bool autoneg, link_up = false;
5510 int ret = IXGBE_ERR_LINK_SETUP;
5511
5512 if (hw->mac.ops.check_link)
5513 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5514
5515 if (ret)
5516 return ret;
5517
5518 speed = hw->phy.autoneg_advertised;
5519 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5520 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5521 &autoneg);
5522 if (ret)
5523 return ret;
5524
5525 if (hw->mac.ops.setup_link)
5526 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5527
5528 return ret;
5529 }
5530
5531 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5532 {
5533 struct ixgbe_hw *hw = &adapter->hw;
5534 u32 gpie = 0;
5535
5536 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5537 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5538 IXGBE_GPIE_OCD;
5539 gpie |= IXGBE_GPIE_EIAME;
5540
5541
5542
5543
5544 switch (hw->mac.type) {
5545 case ixgbe_mac_82598EB:
5546 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5547 break;
5548 case ixgbe_mac_82599EB:
5549 case ixgbe_mac_X540:
5550 case ixgbe_mac_X550:
5551 case ixgbe_mac_X550EM_x:
5552 case ixgbe_mac_x550em_a:
5553 default:
5554 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5555 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5556 break;
5557 }
5558 } else {
5559
5560
5561 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5562 }
5563
5564
5565
5566
5567 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5568 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5569
5570 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5571 case IXGBE_82599_VMDQ_8Q_MASK:
5572 gpie |= IXGBE_GPIE_VTMODE_16;
5573 break;
5574 case IXGBE_82599_VMDQ_4Q_MASK:
5575 gpie |= IXGBE_GPIE_VTMODE_32;
5576 break;
5577 default:
5578 gpie |= IXGBE_GPIE_VTMODE_64;
5579 break;
5580 }
5581 }
5582
5583
5584 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5585 switch (adapter->hw.mac.type) {
5586 case ixgbe_mac_82599EB:
5587 gpie |= IXGBE_SDP0_GPIEN_8259X;
5588 break;
5589 default:
5590 break;
5591 }
5592 }
5593
5594
5595 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5596 gpie |= IXGBE_SDP1_GPIEN(hw);
5597
5598 switch (hw->mac.type) {
5599 case ixgbe_mac_82599EB:
5600 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5601 break;
5602 case ixgbe_mac_X550EM_x:
5603 case ixgbe_mac_x550em_a:
5604 gpie |= IXGBE_SDP0_GPIEN_X540;
5605 break;
5606 default:
5607 break;
5608 }
5609
5610 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5611 }
5612
5613 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5614 {
5615 struct ixgbe_hw *hw = &adapter->hw;
5616 int err;
5617 u32 ctrl_ext;
5618
5619 ixgbe_get_hw_control(adapter);
5620 ixgbe_setup_gpie(adapter);
5621
5622 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5623 ixgbe_configure_msix(adapter);
5624 else
5625 ixgbe_configure_msi_and_legacy(adapter);
5626
5627
5628 if (hw->mac.ops.enable_tx_laser)
5629 hw->mac.ops.enable_tx_laser(hw);
5630
5631 if (hw->phy.ops.set_phy_power)
5632 hw->phy.ops.set_phy_power(hw, true);
5633
5634 smp_mb__before_atomic();
5635 clear_bit(__IXGBE_DOWN, &adapter->state);
5636 ixgbe_napi_enable_all(adapter);
5637
5638 if (ixgbe_is_sfp(hw)) {
5639 ixgbe_sfp_link_config(adapter);
5640 } else {
5641 err = ixgbe_non_sfp_link_config(hw);
5642 if (err)
5643 e_err(probe, "link_config FAILED %d\n", err);
5644 }
5645
5646
5647 IXGBE_READ_REG(hw, IXGBE_EICR);
5648 ixgbe_irq_enable(adapter, true, true);
5649
5650
5651
5652
5653
5654 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5655 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5656 if (esdp & IXGBE_ESDP_SDP1)
5657 e_crit(drv, "Fan has stopped, replace the adapter\n");
5658 }
5659
5660
5661
5662 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5663 adapter->link_check_timeout = jiffies;
5664 mod_timer(&adapter->service_timer, jiffies);
5665
5666
5667 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5668 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5669 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5670 }
5671
5672 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5673 {
5674 WARN_ON(in_interrupt());
5675
5676 netif_trans_update(adapter->netdev);
5677
5678 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5679 usleep_range(1000, 2000);
5680 if (adapter->hw.phy.type == ixgbe_phy_fw)
5681 ixgbe_watchdog_link_is_down(adapter);
5682 ixgbe_down(adapter);
5683
5684
5685
5686
5687
5688
5689 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5690 msleep(2000);
5691 ixgbe_up(adapter);
5692 clear_bit(__IXGBE_RESETTING, &adapter->state);
5693 }
5694
5695 void ixgbe_up(struct ixgbe_adapter *adapter)
5696 {
5697
5698 ixgbe_configure(adapter);
5699
5700 ixgbe_up_complete(adapter);
5701 }
5702
5703 static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5704 {
5705 u16 devctl2;
5706
5707 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5708
5709 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5710 case IXGBE_PCIDEVCTRL2_17_34s:
5711 case IXGBE_PCIDEVCTRL2_4_8s:
5712
5713
5714
5715
5716 case IXGBE_PCIDEVCTRL2_1_2s:
5717 return 2000000ul;
5718 case IXGBE_PCIDEVCTRL2_260_520ms:
5719 return 520000ul;
5720 case IXGBE_PCIDEVCTRL2_65_130ms:
5721 return 130000ul;
5722 case IXGBE_PCIDEVCTRL2_16_32ms:
5723 return 32000ul;
5724 case IXGBE_PCIDEVCTRL2_1_2ms:
5725 return 2000ul;
5726 case IXGBE_PCIDEVCTRL2_50_100us:
5727 return 100ul;
5728 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5729 return 32000ul;
5730 default:
5731 break;
5732 }
5733
5734
5735
5736
5737 return 32000ul;
5738 }
5739
5740 void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5741 {
5742 unsigned long wait_delay, delay_interval;
5743 struct ixgbe_hw *hw = &adapter->hw;
5744 int i, wait_loop;
5745 u32 rxdctl;
5746
5747
5748 hw->mac.ops.disable_rx(hw);
5749
5750 if (ixgbe_removed(hw->hw_addr))
5751 return;
5752
5753
5754 for (i = 0; i < adapter->num_rx_queues; i++) {
5755 struct ixgbe_ring *ring = adapter->rx_ring[i];
5756 u8 reg_idx = ring->reg_idx;
5757
5758 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5759 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5760 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5761
5762
5763 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5764 }
5765
5766
5767 if (hw->mac.type == ixgbe_mac_82598EB &&
5768 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5769 return;
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5783
5784 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5785 wait_delay = delay_interval;
5786
5787 while (wait_loop--) {
5788 usleep_range(wait_delay, wait_delay + 10);
5789 wait_delay += delay_interval * 2;
5790 rxdctl = 0;
5791
5792
5793
5794
5795
5796
5797 for (i = 0; i < adapter->num_rx_queues; i++) {
5798 struct ixgbe_ring *ring = adapter->rx_ring[i];
5799 u8 reg_idx = ring->reg_idx;
5800
5801 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5802 }
5803
5804 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5805 return;
5806 }
5807
5808 e_err(drv,
5809 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5810 }
5811
5812 void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5813 {
5814 unsigned long wait_delay, delay_interval;
5815 struct ixgbe_hw *hw = &adapter->hw;
5816 int i, wait_loop;
5817 u32 txdctl;
5818
5819 if (ixgbe_removed(hw->hw_addr))
5820 return;
5821
5822
5823 for (i = 0; i < adapter->num_tx_queues; i++) {
5824 struct ixgbe_ring *ring = adapter->tx_ring[i];
5825 u8 reg_idx = ring->reg_idx;
5826
5827 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5828 }
5829
5830
5831 for (i = 0; i < adapter->num_xdp_queues; i++) {
5832 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5833 u8 reg_idx = ring->reg_idx;
5834
5835 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5836 }
5837
5838
5839
5840
5841
5842
5843 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5844 goto dma_engine_disable;
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5858
5859 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5860 wait_delay = delay_interval;
5861
5862 while (wait_loop--) {
5863 usleep_range(wait_delay, wait_delay + 10);
5864 wait_delay += delay_interval * 2;
5865 txdctl = 0;
5866
5867
5868
5869
5870
5871
5872 for (i = 0; i < adapter->num_tx_queues; i++) {
5873 struct ixgbe_ring *ring = adapter->tx_ring[i];
5874 u8 reg_idx = ring->reg_idx;
5875
5876 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5877 }
5878 for (i = 0; i < adapter->num_xdp_queues; i++) {
5879 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5880 u8 reg_idx = ring->reg_idx;
5881
5882 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5883 }
5884
5885 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5886 goto dma_engine_disable;
5887 }
5888
5889 e_err(drv,
5890 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5891
5892 dma_engine_disable:
5893
5894 switch (hw->mac.type) {
5895 case ixgbe_mac_82599EB:
5896 case ixgbe_mac_X540:
5897 case ixgbe_mac_X550:
5898 case ixgbe_mac_X550EM_x:
5899 case ixgbe_mac_x550em_a:
5900 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5901 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5902 ~IXGBE_DMATXCTL_TE));
5903
5904 default:
5905 break;
5906 }
5907 }
5908
5909 void ixgbe_reset(struct ixgbe_adapter *adapter)
5910 {
5911 struct ixgbe_hw *hw = &adapter->hw;
5912 struct net_device *netdev = adapter->netdev;
5913 int err;
5914
5915 if (ixgbe_removed(hw->hw_addr))
5916 return;
5917
5918 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5919 usleep_range(1000, 2000);
5920
5921
5922 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5923 IXGBE_FLAG2_SFP_NEEDS_RESET);
5924 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5925
5926 err = hw->mac.ops.init_hw(hw);
5927 switch (err) {
5928 case 0:
5929 case IXGBE_ERR_SFP_NOT_PRESENT:
5930 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5931 break;
5932 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5933 e_dev_err("master disable timed out\n");
5934 break;
5935 case IXGBE_ERR_EEPROM_VERSION:
5936
5937 e_dev_warn("This device is a pre-production adapter/LOM. "
5938 "Please be aware there may be issues associated with "
5939 "your hardware. If you are experiencing problems "
5940 "please contact your Intel or hardware "
5941 "representative who provided you with this "
5942 "hardware.\n");
5943 break;
5944 default:
5945 e_dev_err("Hardware Error: %d\n", err);
5946 }
5947
5948 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5949
5950
5951 ixgbe_flush_sw_mac_table(adapter);
5952 __dev_uc_unsync(netdev, NULL);
5953
5954
5955 ixgbe_mac_set_default_filter(adapter);
5956
5957
5958 if (hw->mac.san_mac_rar_index)
5959 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5960
5961 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5962 ixgbe_ptp_reset(adapter);
5963
5964 if (hw->phy.ops.set_phy_power) {
5965 if (!netif_running(adapter->netdev) && !adapter->wol)
5966 hw->phy.ops.set_phy_power(hw, false);
5967 else
5968 hw->phy.ops.set_phy_power(hw, true);
5969 }
5970 }
5971
5972
5973
5974
5975
5976 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5977 {
5978 u16 i = tx_ring->next_to_clean;
5979 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5980
5981 if (tx_ring->xsk_umem) {
5982 ixgbe_xsk_clean_tx_ring(tx_ring);
5983 goto out;
5984 }
5985
5986 while (i != tx_ring->next_to_use) {
5987 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5988
5989
5990 if (ring_is_xdp(tx_ring))
5991 xdp_return_frame(tx_buffer->xdpf);
5992 else
5993 dev_kfree_skb_any(tx_buffer->skb);
5994
5995
5996 dma_unmap_single(tx_ring->dev,
5997 dma_unmap_addr(tx_buffer, dma),
5998 dma_unmap_len(tx_buffer, len),
5999 DMA_TO_DEVICE);
6000
6001
6002 eop_desc = tx_buffer->next_to_watch;
6003 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6004
6005
6006 while (tx_desc != eop_desc) {
6007 tx_buffer++;
6008 tx_desc++;
6009 i++;
6010 if (unlikely(i == tx_ring->count)) {
6011 i = 0;
6012 tx_buffer = tx_ring->tx_buffer_info;
6013 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6014 }
6015
6016
6017 if (dma_unmap_len(tx_buffer, len))
6018 dma_unmap_page(tx_ring->dev,
6019 dma_unmap_addr(tx_buffer, dma),
6020 dma_unmap_len(tx_buffer, len),
6021 DMA_TO_DEVICE);
6022 }
6023
6024
6025 tx_buffer++;
6026 i++;
6027 if (unlikely(i == tx_ring->count)) {
6028 i = 0;
6029 tx_buffer = tx_ring->tx_buffer_info;
6030 }
6031 }
6032
6033
6034 if (!ring_is_xdp(tx_ring))
6035 netdev_tx_reset_queue(txring_txq(tx_ring));
6036
6037 out:
6038
6039 tx_ring->next_to_use = 0;
6040 tx_ring->next_to_clean = 0;
6041 }
6042
6043
6044
6045
6046
6047 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6048 {
6049 int i;
6050
6051 for (i = 0; i < adapter->num_rx_queues; i++)
6052 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6053 }
6054
6055
6056
6057
6058
6059 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6060 {
6061 int i;
6062
6063 for (i = 0; i < adapter->num_tx_queues; i++)
6064 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6065 for (i = 0; i < adapter->num_xdp_queues; i++)
6066 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6067 }
6068
6069 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6070 {
6071 struct hlist_node *node2;
6072 struct ixgbe_fdir_filter *filter;
6073
6074 spin_lock(&adapter->fdir_perfect_lock);
6075
6076 hlist_for_each_entry_safe(filter, node2,
6077 &adapter->fdir_filter_list, fdir_node) {
6078 hlist_del(&filter->fdir_node);
6079 kfree(filter);
6080 }
6081 adapter->fdir_filter_count = 0;
6082
6083 spin_unlock(&adapter->fdir_perfect_lock);
6084 }
6085
6086 void ixgbe_down(struct ixgbe_adapter *adapter)
6087 {
6088 struct net_device *netdev = adapter->netdev;
6089 struct ixgbe_hw *hw = &adapter->hw;
6090 int i;
6091
6092
6093 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6094 return;
6095
6096
6097 netif_tx_stop_all_queues(netdev);
6098
6099
6100 netif_carrier_off(netdev);
6101 netif_tx_disable(netdev);
6102
6103
6104 ixgbe_disable_rx(adapter);
6105
6106
6107 if (adapter->xdp_ring[0])
6108 synchronize_rcu();
6109
6110 ixgbe_irq_disable(adapter);
6111
6112 ixgbe_napi_disable_all(adapter);
6113
6114 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6115 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6116 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6117
6118 del_timer_sync(&adapter->service_timer);
6119
6120 if (adapter->num_vfs) {
6121
6122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6123
6124
6125 for (i = 0 ; i < adapter->num_vfs; i++)
6126 adapter->vfinfo[i].clear_to_send = false;
6127
6128
6129 ixgbe_ping_all_vfs(adapter);
6130
6131
6132 ixgbe_disable_tx_rx(adapter);
6133 }
6134
6135
6136 ixgbe_disable_tx(adapter);
6137
6138 if (!pci_channel_offline(adapter->pdev))
6139 ixgbe_reset(adapter);
6140
6141
6142 if (hw->mac.ops.disable_tx_laser)
6143 hw->mac.ops.disable_tx_laser(hw);
6144
6145 ixgbe_clean_all_tx_rings(adapter);
6146 ixgbe_clean_all_rx_rings(adapter);
6147 }
6148
6149
6150
6151
6152
6153 static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6154 {
6155 struct ixgbe_hw *hw = &adapter->hw;
6156
6157 switch (hw->device_id) {
6158 case IXGBE_DEV_ID_X550EM_A_1G_T:
6159 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6160 if (!hw->phy.eee_speeds_supported)
6161 break;
6162 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6163 if (!hw->phy.eee_speeds_advertised)
6164 break;
6165 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6166 break;
6167 default:
6168 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6169 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6170 break;
6171 }
6172 }
6173
6174
6175
6176
6177
6178 static void ixgbe_tx_timeout(struct net_device *netdev)
6179 {
6180 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6181
6182
6183 ixgbe_tx_timeout_reset(adapter);
6184 }
6185
6186 #ifdef CONFIG_IXGBE_DCB
6187 static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6188 {
6189 struct ixgbe_hw *hw = &adapter->hw;
6190 struct tc_configuration *tc;
6191 int j;
6192
6193 switch (hw->mac.type) {
6194 case ixgbe_mac_82598EB:
6195 case ixgbe_mac_82599EB:
6196 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6197 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6198 break;
6199 case ixgbe_mac_X540:
6200 case ixgbe_mac_X550:
6201 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6202 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6203 break;
6204 case ixgbe_mac_X550EM_x:
6205 case ixgbe_mac_x550em_a:
6206 default:
6207 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6208 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6209 break;
6210 }
6211
6212
6213 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6214 tc = &adapter->dcb_cfg.tc_config[j];
6215 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6216 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6217 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6218 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6219 tc->dcb_pfc = pfc_disabled;
6220 }
6221
6222
6223 tc = &adapter->dcb_cfg.tc_config[0];
6224 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6225 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6226
6227 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6228 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6229 adapter->dcb_cfg.pfc_mode_enable = false;
6230 adapter->dcb_set_bitmap = 0x00;
6231 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6232 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6233 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6234 sizeof(adapter->temp_dcb_cfg));
6235 }
6236 #endif
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247 static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6248 const struct ixgbe_info *ii)
6249 {
6250 struct ixgbe_hw *hw = &adapter->hw;
6251 struct pci_dev *pdev = adapter->pdev;
6252 unsigned int rss, fdir;
6253 u32 fwsm;
6254 int i;
6255
6256
6257
6258 hw->vendor_id = pdev->vendor;
6259 hw->device_id = pdev->device;
6260 hw->revision_id = pdev->revision;
6261 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6262 hw->subsystem_device_id = pdev->subsystem_device;
6263
6264
6265 ii->get_invariants(hw);
6266
6267
6268 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6269 adapter->ring_feature[RING_F_RSS].limit = rss;
6270 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6271 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6272 adapter->atr_sample_rate = 20;
6273 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6274 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6275 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6276 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6277 #ifdef CONFIG_IXGBE_DCA
6278 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6279 #endif
6280 #ifdef CONFIG_IXGBE_DCB
6281 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6282 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6283 #endif
6284 #ifdef IXGBE_FCOE
6285 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6286 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6287 #ifdef CONFIG_IXGBE_DCB
6288
6289 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6290 #endif
6291 #endif
6292
6293
6294 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6295 GFP_KERNEL);
6296 if (!adapter->jump_tables[0])
6297 return -ENOMEM;
6298 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6299
6300 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6301 adapter->jump_tables[i] = NULL;
6302
6303 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6304 sizeof(struct ixgbe_mac_addr),
6305 GFP_KERNEL);
6306 if (!adapter->mac_table)
6307 return -ENOMEM;
6308
6309 if (ixgbe_init_rss_key(adapter))
6310 return -ENOMEM;
6311
6312 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6313 if (!adapter->af_xdp_zc_qps)
6314 return -ENOMEM;
6315
6316
6317 switch (hw->mac.type) {
6318 case ixgbe_mac_82598EB:
6319 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6320
6321 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6322 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6323
6324 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6325 adapter->ring_feature[RING_F_FDIR].limit = 0;
6326 adapter->atr_sample_rate = 0;
6327 adapter->fdir_pballoc = 0;
6328 #ifdef IXGBE_FCOE
6329 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6330 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6331 #ifdef CONFIG_IXGBE_DCB
6332 adapter->fcoe.up = 0;
6333 #endif
6334 #endif
6335 break;
6336 case ixgbe_mac_82599EB:
6337 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6338 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6339 break;
6340 case ixgbe_mac_X540:
6341 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6342 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6343 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6344 break;
6345 case ixgbe_mac_x550em_a:
6346 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6347 switch (hw->device_id) {
6348 case IXGBE_DEV_ID_X550EM_A_1G_T:
6349 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6350 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6351 break;
6352 default:
6353 break;
6354 }
6355
6356 case ixgbe_mac_X550EM_x:
6357 #ifdef CONFIG_IXGBE_DCB
6358 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6359 #endif
6360 #ifdef IXGBE_FCOE
6361 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6362 #ifdef CONFIG_IXGBE_DCB
6363 adapter->fcoe.up = 0;
6364 #endif
6365 #endif
6366
6367 case ixgbe_mac_X550:
6368 if (hw->mac.type == ixgbe_mac_X550)
6369 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6370 #ifdef CONFIG_IXGBE_DCA
6371 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6372 #endif
6373 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6374 break;
6375 default:
6376 break;
6377 }
6378
6379 #ifdef IXGBE_FCOE
6380
6381 spin_lock_init(&adapter->fcoe.lock);
6382
6383 #endif
6384
6385 spin_lock_init(&adapter->fdir_perfect_lock);
6386
6387 #ifdef CONFIG_IXGBE_DCB
6388 ixgbe_init_dcb(adapter);
6389 #endif
6390 ixgbe_init_ipsec_offload(adapter);
6391
6392
6393 hw->fc.requested_mode = ixgbe_fc_full;
6394 hw->fc.current_mode = ixgbe_fc_full;
6395 ixgbe_pbthresh_setup(adapter);
6396 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6397 hw->fc.send_xon = true;
6398 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6399
6400 #ifdef CONFIG_PCI_IOV
6401 if (max_vfs > 0)
6402 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6403
6404
6405 if (hw->mac.type != ixgbe_mac_82598EB) {
6406 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6407 max_vfs = 0;
6408 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6409 }
6410 }
6411 #endif
6412
6413
6414 adapter->rx_itr_setting = 1;
6415 adapter->tx_itr_setting = 1;
6416
6417
6418 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6419 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6420
6421
6422 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6423
6424
6425 if (ixgbe_init_eeprom_params_generic(hw)) {
6426 e_dev_err("EEPROM initialization failed\n");
6427 return -EIO;
6428 }
6429
6430
6431 set_bit(0, adapter->fwd_bitmask);
6432 set_bit(__IXGBE_DOWN, &adapter->state);
6433
6434 return 0;
6435 }
6436
6437
6438
6439
6440
6441
6442
6443 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6444 {
6445 struct device *dev = tx_ring->dev;
6446 int orig_node = dev_to_node(dev);
6447 int ring_node = NUMA_NO_NODE;
6448 int size;
6449
6450 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6451
6452 if (tx_ring->q_vector)
6453 ring_node = tx_ring->q_vector->numa_node;
6454
6455 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6456 if (!tx_ring->tx_buffer_info)
6457 tx_ring->tx_buffer_info = vmalloc(size);
6458 if (!tx_ring->tx_buffer_info)
6459 goto err;
6460
6461
6462 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6463 tx_ring->size = ALIGN(tx_ring->size, 4096);
6464
6465 set_dev_node(dev, ring_node);
6466 tx_ring->desc = dma_alloc_coherent(dev,
6467 tx_ring->size,
6468 &tx_ring->dma,
6469 GFP_KERNEL);
6470 set_dev_node(dev, orig_node);
6471 if (!tx_ring->desc)
6472 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6473 &tx_ring->dma, GFP_KERNEL);
6474 if (!tx_ring->desc)
6475 goto err;
6476
6477 tx_ring->next_to_use = 0;
6478 tx_ring->next_to_clean = 0;
6479 return 0;
6480
6481 err:
6482 vfree(tx_ring->tx_buffer_info);
6483 tx_ring->tx_buffer_info = NULL;
6484 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6485 return -ENOMEM;
6486 }
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6499 {
6500 int i, j = 0, err = 0;
6501
6502 for (i = 0; i < adapter->num_tx_queues; i++) {
6503 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6504 if (!err)
6505 continue;
6506
6507 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6508 goto err_setup_tx;
6509 }
6510 for (j = 0; j < adapter->num_xdp_queues; j++) {
6511 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6512 if (!err)
6513 continue;
6514
6515 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6516 goto err_setup_tx;
6517 }
6518
6519 return 0;
6520 err_setup_tx:
6521
6522 while (j--)
6523 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6524 while (i--)
6525 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6526 return err;
6527 }
6528
6529
6530
6531
6532
6533
6534
6535
6536 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6537 struct ixgbe_ring *rx_ring)
6538 {
6539 struct device *dev = rx_ring->dev;
6540 int orig_node = dev_to_node(dev);
6541 int ring_node = NUMA_NO_NODE;
6542 int size;
6543
6544 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6545
6546 if (rx_ring->q_vector)
6547 ring_node = rx_ring->q_vector->numa_node;
6548
6549 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6550 if (!rx_ring->rx_buffer_info)
6551 rx_ring->rx_buffer_info = vmalloc(size);
6552 if (!rx_ring->rx_buffer_info)
6553 goto err;
6554
6555
6556 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6557 rx_ring->size = ALIGN(rx_ring->size, 4096);
6558
6559 set_dev_node(dev, ring_node);
6560 rx_ring->desc = dma_alloc_coherent(dev,
6561 rx_ring->size,
6562 &rx_ring->dma,
6563 GFP_KERNEL);
6564 set_dev_node(dev, orig_node);
6565 if (!rx_ring->desc)
6566 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6567 &rx_ring->dma, GFP_KERNEL);
6568 if (!rx_ring->desc)
6569 goto err;
6570
6571 rx_ring->next_to_clean = 0;
6572 rx_ring->next_to_use = 0;
6573
6574
6575 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6576 rx_ring->queue_index) < 0)
6577 goto err;
6578
6579 rx_ring->xdp_prog = adapter->xdp_prog;
6580
6581 return 0;
6582 err:
6583 vfree(rx_ring->rx_buffer_info);
6584 rx_ring->rx_buffer_info = NULL;
6585 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6586 return -ENOMEM;
6587 }
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6600 {
6601 int i, err = 0;
6602
6603 for (i = 0; i < adapter->num_rx_queues; i++) {
6604 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6605 if (!err)
6606 continue;
6607
6608 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6609 goto err_setup_rx;
6610 }
6611
6612 #ifdef IXGBE_FCOE
6613 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6614 if (!err)
6615 #endif
6616 return 0;
6617 err_setup_rx:
6618
6619 while (i--)
6620 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6621 return err;
6622 }
6623
6624
6625
6626
6627
6628
6629
6630 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6631 {
6632 ixgbe_clean_tx_ring(tx_ring);
6633
6634 vfree(tx_ring->tx_buffer_info);
6635 tx_ring->tx_buffer_info = NULL;
6636
6637
6638 if (!tx_ring->desc)
6639 return;
6640
6641 dma_free_coherent(tx_ring->dev, tx_ring->size,
6642 tx_ring->desc, tx_ring->dma);
6643
6644 tx_ring->desc = NULL;
6645 }
6646
6647
6648
6649
6650
6651
6652
6653 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6654 {
6655 int i;
6656
6657 for (i = 0; i < adapter->num_tx_queues; i++)
6658 if (adapter->tx_ring[i]->desc)
6659 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6660 for (i = 0; i < adapter->num_xdp_queues; i++)
6661 if (adapter->xdp_ring[i]->desc)
6662 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6663 }
6664
6665
6666
6667
6668
6669
6670
6671 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6672 {
6673 ixgbe_clean_rx_ring(rx_ring);
6674
6675 rx_ring->xdp_prog = NULL;
6676 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6677 vfree(rx_ring->rx_buffer_info);
6678 rx_ring->rx_buffer_info = NULL;
6679
6680
6681 if (!rx_ring->desc)
6682 return;
6683
6684 dma_free_coherent(rx_ring->dev, rx_ring->size,
6685 rx_ring->desc, rx_ring->dma);
6686
6687 rx_ring->desc = NULL;
6688 }
6689
6690
6691
6692
6693
6694
6695
6696 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6697 {
6698 int i;
6699
6700 #ifdef IXGBE_FCOE
6701 ixgbe_free_fcoe_ddp_resources(adapter);
6702
6703 #endif
6704 for (i = 0; i < adapter->num_rx_queues; i++)
6705 if (adapter->rx_ring[i]->desc)
6706 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6707 }
6708
6709
6710
6711
6712
6713
6714
6715
6716 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6717 {
6718 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6719
6720 if (adapter->xdp_prog) {
6721 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6722 VLAN_HLEN;
6723 int i;
6724
6725 for (i = 0; i < adapter->num_rx_queues; i++) {
6726 struct ixgbe_ring *ring = adapter->rx_ring[i];
6727
6728 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6729 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6730 return -EINVAL;
6731 }
6732 }
6733 }
6734
6735
6736
6737
6738
6739
6740 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6741 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6742 (new_mtu > ETH_DATA_LEN))
6743 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6744
6745 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6746
6747
6748 netdev->mtu = new_mtu;
6749
6750 if (netif_running(netdev))
6751 ixgbe_reinit_locked(adapter);
6752
6753 return 0;
6754 }
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768 int ixgbe_open(struct net_device *netdev)
6769 {
6770 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6771 struct ixgbe_hw *hw = &adapter->hw;
6772 int err, queues;
6773
6774
6775 if (test_bit(__IXGBE_TESTING, &adapter->state))
6776 return -EBUSY;
6777
6778 netif_carrier_off(netdev);
6779
6780
6781 err = ixgbe_setup_all_tx_resources(adapter);
6782 if (err)
6783 goto err_setup_tx;
6784
6785
6786 err = ixgbe_setup_all_rx_resources(adapter);
6787 if (err)
6788 goto err_setup_rx;
6789
6790 ixgbe_configure(adapter);
6791
6792 err = ixgbe_request_irq(adapter);
6793 if (err)
6794 goto err_req_irq;
6795
6796
6797 queues = adapter->num_tx_queues;
6798 err = netif_set_real_num_tx_queues(netdev, queues);
6799 if (err)
6800 goto err_set_queues;
6801
6802 queues = adapter->num_rx_queues;
6803 err = netif_set_real_num_rx_queues(netdev, queues);
6804 if (err)
6805 goto err_set_queues;
6806
6807 ixgbe_ptp_init(adapter);
6808
6809 ixgbe_up_complete(adapter);
6810
6811 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6812 udp_tunnel_get_rx_info(netdev);
6813
6814 return 0;
6815
6816 err_set_queues:
6817 ixgbe_free_irq(adapter);
6818 err_req_irq:
6819 ixgbe_free_all_rx_resources(adapter);
6820 if (hw->phy.ops.set_phy_power && !adapter->wol)
6821 hw->phy.ops.set_phy_power(&adapter->hw, false);
6822 err_setup_rx:
6823 ixgbe_free_all_tx_resources(adapter);
6824 err_setup_tx:
6825 ixgbe_reset(adapter);
6826
6827 return err;
6828 }
6829
6830 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6831 {
6832 ixgbe_ptp_suspend(adapter);
6833
6834 if (adapter->hw.phy.ops.enter_lplu) {
6835 adapter->hw.phy.reset_disable = true;
6836 ixgbe_down(adapter);
6837 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6838 adapter->hw.phy.reset_disable = false;
6839 } else {
6840 ixgbe_down(adapter);
6841 }
6842
6843 ixgbe_free_irq(adapter);
6844
6845 ixgbe_free_all_tx_resources(adapter);
6846 ixgbe_free_all_rx_resources(adapter);
6847 }
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860 int ixgbe_close(struct net_device *netdev)
6861 {
6862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6863
6864 ixgbe_ptp_stop(adapter);
6865
6866 if (netif_device_present(netdev))
6867 ixgbe_close_suspend(adapter);
6868
6869 ixgbe_fdir_filter_exit(adapter);
6870
6871 ixgbe_release_hw_control(adapter);
6872
6873 return 0;
6874 }
6875
6876 #ifdef CONFIG_PM
6877 static int ixgbe_resume(struct pci_dev *pdev)
6878 {
6879 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6880 struct net_device *netdev = adapter->netdev;
6881 u32 err;
6882
6883 adapter->hw.hw_addr = adapter->io_addr;
6884 pci_set_power_state(pdev, PCI_D0);
6885 pci_restore_state(pdev);
6886
6887
6888
6889
6890 pci_save_state(pdev);
6891
6892 err = pci_enable_device_mem(pdev);
6893 if (err) {
6894 e_dev_err("Cannot enable PCI device from suspend\n");
6895 return err;
6896 }
6897 smp_mb__before_atomic();
6898 clear_bit(__IXGBE_DISABLED, &adapter->state);
6899 pci_set_master(pdev);
6900
6901 pci_wake_from_d3(pdev, false);
6902
6903 ixgbe_reset(adapter);
6904
6905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6906
6907 rtnl_lock();
6908 err = ixgbe_init_interrupt_scheme(adapter);
6909 if (!err && netif_running(netdev))
6910 err = ixgbe_open(netdev);
6911
6912
6913 if (!err)
6914 netif_device_attach(netdev);
6915 rtnl_unlock();
6916
6917 return err;
6918 }
6919 #endif
6920
6921 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6922 {
6923 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6924 struct net_device *netdev = adapter->netdev;
6925 struct ixgbe_hw *hw = &adapter->hw;
6926 u32 ctrl;
6927 u32 wufc = adapter->wol;
6928 #ifdef CONFIG_PM
6929 int retval = 0;
6930 #endif
6931
6932 rtnl_lock();
6933 netif_device_detach(netdev);
6934
6935 if (netif_running(netdev))
6936 ixgbe_close_suspend(adapter);
6937
6938 ixgbe_clear_interrupt_scheme(adapter);
6939 rtnl_unlock();
6940
6941 #ifdef CONFIG_PM
6942 retval = pci_save_state(pdev);
6943 if (retval)
6944 return retval;
6945
6946 #endif
6947 if (hw->mac.ops.stop_link_on_d3)
6948 hw->mac.ops.stop_link_on_d3(hw);
6949
6950 if (wufc) {
6951 u32 fctrl;
6952
6953 ixgbe_set_rx_mode(netdev);
6954
6955
6956 if (hw->mac.ops.enable_tx_laser)
6957 hw->mac.ops.enable_tx_laser(hw);
6958
6959
6960 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6961 fctrl |= IXGBE_FCTRL_MPE;
6962 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6963
6964 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6965 ctrl |= IXGBE_CTRL_GIO_DIS;
6966 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6967
6968 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6969 } else {
6970 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6971 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6972 }
6973
6974 switch (hw->mac.type) {
6975 case ixgbe_mac_82598EB:
6976 pci_wake_from_d3(pdev, false);
6977 break;
6978 case ixgbe_mac_82599EB:
6979 case ixgbe_mac_X540:
6980 case ixgbe_mac_X550:
6981 case ixgbe_mac_X550EM_x:
6982 case ixgbe_mac_x550em_a:
6983 pci_wake_from_d3(pdev, !!wufc);
6984 break;
6985 default:
6986 break;
6987 }
6988
6989 *enable_wake = !!wufc;
6990 if (hw->phy.ops.set_phy_power && !*enable_wake)
6991 hw->phy.ops.set_phy_power(hw, false);
6992
6993 ixgbe_release_hw_control(adapter);
6994
6995 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6996 pci_disable_device(pdev);
6997
6998 return 0;
6999 }
7000
7001 #ifdef CONFIG_PM
7002 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
7003 {
7004 int retval;
7005 bool wake;
7006
7007 retval = __ixgbe_shutdown(pdev, &wake);
7008 if (retval)
7009 return retval;
7010
7011 if (wake) {
7012 pci_prepare_to_sleep(pdev);
7013 } else {
7014 pci_wake_from_d3(pdev, false);
7015 pci_set_power_state(pdev, PCI_D3hot);
7016 }
7017
7018 return 0;
7019 }
7020 #endif
7021
7022 static void ixgbe_shutdown(struct pci_dev *pdev)
7023 {
7024 bool wake;
7025
7026 __ixgbe_shutdown(pdev, &wake);
7027
7028 if (system_state == SYSTEM_POWER_OFF) {
7029 pci_wake_from_d3(pdev, wake);
7030 pci_set_power_state(pdev, PCI_D3hot);
7031 }
7032 }
7033
7034
7035
7036
7037
7038 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7039 {
7040 struct net_device *netdev = adapter->netdev;
7041 struct ixgbe_hw *hw = &adapter->hw;
7042 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7043 u64 total_mpc = 0;
7044 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7045 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7046 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7047 u64 alloc_rx_page = 0;
7048 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7049
7050 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7051 test_bit(__IXGBE_RESETTING, &adapter->state))
7052 return;
7053
7054 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7055 u64 rsc_count = 0;
7056 u64 rsc_flush = 0;
7057 for (i = 0; i < adapter->num_rx_queues; i++) {
7058 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7059 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7060 }
7061 adapter->rsc_total_count = rsc_count;
7062 adapter->rsc_total_flush = rsc_flush;
7063 }
7064
7065 for (i = 0; i < adapter->num_rx_queues; i++) {
7066 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
7067 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7068 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7069 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7070 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7071 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7072 bytes += rx_ring->stats.bytes;
7073 packets += rx_ring->stats.packets;
7074 }
7075 adapter->non_eop_descs = non_eop_descs;
7076 adapter->alloc_rx_page = alloc_rx_page;
7077 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7078 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7079 adapter->hw_csum_rx_error = hw_csum_rx_error;
7080 netdev->stats.rx_bytes = bytes;
7081 netdev->stats.rx_packets = packets;
7082
7083 bytes = 0;
7084 packets = 0;
7085
7086 for (i = 0; i < adapter->num_tx_queues; i++) {
7087 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7088 restart_queue += tx_ring->tx_stats.restart_queue;
7089 tx_busy += tx_ring->tx_stats.tx_busy;
7090 bytes += tx_ring->stats.bytes;
7091 packets += tx_ring->stats.packets;
7092 }
7093 for (i = 0; i < adapter->num_xdp_queues; i++) {
7094 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
7095
7096 restart_queue += xdp_ring->tx_stats.restart_queue;
7097 tx_busy += xdp_ring->tx_stats.tx_busy;
7098 bytes += xdp_ring->stats.bytes;
7099 packets += xdp_ring->stats.packets;
7100 }
7101 adapter->restart_queue = restart_queue;
7102 adapter->tx_busy = tx_busy;
7103 netdev->stats.tx_bytes = bytes;
7104 netdev->stats.tx_packets = packets;
7105
7106 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7107
7108
7109 for (i = 0; i < 8; i++) {
7110
7111 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7112 missed_rx += mpc;
7113 hwstats->mpc[i] += mpc;
7114 total_mpc += hwstats->mpc[i];
7115 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7116 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7117 switch (hw->mac.type) {
7118 case ixgbe_mac_82598EB:
7119 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7120 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7121 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7122 hwstats->pxonrxc[i] +=
7123 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7124 break;
7125 case ixgbe_mac_82599EB:
7126 case ixgbe_mac_X540:
7127 case ixgbe_mac_X550:
7128 case ixgbe_mac_X550EM_x:
7129 case ixgbe_mac_x550em_a:
7130 hwstats->pxonrxc[i] +=
7131 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7132 break;
7133 default:
7134 break;
7135 }
7136 }
7137
7138
7139 for (i = 0; i < 16; i++) {
7140 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7141 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7142 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7143 (hw->mac.type == ixgbe_mac_X540) ||
7144 (hw->mac.type == ixgbe_mac_X550) ||
7145 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7146 (hw->mac.type == ixgbe_mac_x550em_a)) {
7147 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7148 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7149 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7150 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7151 }
7152 }
7153
7154 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7155
7156 hwstats->gprc -= missed_rx;
7157
7158 ixgbe_update_xoff_received(adapter);
7159
7160
7161 switch (hw->mac.type) {
7162 case ixgbe_mac_82598EB:
7163 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7164 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7165 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7166 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7167 break;
7168 case ixgbe_mac_X540:
7169 case ixgbe_mac_X550:
7170 case ixgbe_mac_X550EM_x:
7171 case ixgbe_mac_x550em_a:
7172
7173 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7174 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7175 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7176 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7177
7178 case ixgbe_mac_82599EB:
7179 for (i = 0; i < 16; i++)
7180 adapter->hw_rx_no_dma_resources +=
7181 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7182 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7183 IXGBE_READ_REG(hw, IXGBE_GORCH);
7184 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7185 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7186 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7187 IXGBE_READ_REG(hw, IXGBE_TORH);
7188 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7189 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7190 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7191 #ifdef IXGBE_FCOE
7192 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7193 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7194 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7195 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7196 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7197 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7198
7199 if (adapter->fcoe.ddp_pool) {
7200 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7201 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7202 unsigned int cpu;
7203 u64 noddp = 0, noddp_ext_buff = 0;
7204 for_each_possible_cpu(cpu) {
7205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7206 noddp += ddp_pool->noddp;
7207 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7208 }
7209 hwstats->fcoe_noddp = noddp;
7210 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7211 }
7212 #endif
7213 break;
7214 default:
7215 break;
7216 }
7217 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7218 hwstats->bprc += bprc;
7219 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7220 if (hw->mac.type == ixgbe_mac_82598EB)
7221 hwstats->mprc -= bprc;
7222 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7223 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7224 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7225 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7226 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7227 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7228 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7229 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7230 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7231 hwstats->lxontxc += lxon;
7232 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7233 hwstats->lxofftxc += lxoff;
7234 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7235 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7236
7237
7238
7239 xon_off_tot = lxon + lxoff;
7240 hwstats->gptc -= xon_off_tot;
7241 hwstats->mptc -= xon_off_tot;
7242 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7243 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7244 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7245 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7246 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7247 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7248 hwstats->ptc64 -= xon_off_tot;
7249 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7250 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7251 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7252 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7253 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7254 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7255
7256
7257 netdev->stats.multicast = hwstats->mprc;
7258
7259
7260 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7261 netdev->stats.rx_dropped = 0;
7262 netdev->stats.rx_length_errors = hwstats->rlec;
7263 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7264 netdev->stats.rx_missed_errors = total_mpc;
7265 }
7266
7267
7268
7269
7270
7271 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7272 {
7273 struct ixgbe_hw *hw = &adapter->hw;
7274 int i;
7275
7276 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7277 return;
7278
7279 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7280
7281
7282 if (test_bit(__IXGBE_DOWN, &adapter->state))
7283 return;
7284
7285
7286 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7287 return;
7288
7289 adapter->fdir_overflow++;
7290
7291 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7292 for (i = 0; i < adapter->num_tx_queues; i++)
7293 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7294 &(adapter->tx_ring[i]->state));
7295 for (i = 0; i < adapter->num_xdp_queues; i++)
7296 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7297 &adapter->xdp_ring[i]->state);
7298
7299 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7300 } else {
7301 e_err(probe, "failed to finish FDIR re-initialization, "
7302 "ignored adding FDIR ATR filters\n");
7303 }
7304 }
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7316 {
7317 struct ixgbe_hw *hw = &adapter->hw;
7318 u64 eics = 0;
7319 int i;
7320
7321
7322 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7323 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7324 test_bit(__IXGBE_RESETTING, &adapter->state))
7325 return;
7326
7327
7328 if (netif_carrier_ok(adapter->netdev)) {
7329 for (i = 0; i < adapter->num_tx_queues; i++)
7330 set_check_for_tx_hang(adapter->tx_ring[i]);
7331 for (i = 0; i < adapter->num_xdp_queues; i++)
7332 set_check_for_tx_hang(adapter->xdp_ring[i]);
7333 }
7334
7335 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7336
7337
7338
7339
7340
7341 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7342 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7343 } else {
7344
7345 for (i = 0; i < adapter->num_q_vectors; i++) {
7346 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7347 if (qv->rx.ring || qv->tx.ring)
7348 eics |= BIT_ULL(i);
7349 }
7350 }
7351
7352
7353 ixgbe_irq_rearm_queues(adapter, eics);
7354 }
7355
7356
7357
7358
7359
7360 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7361 {
7362 struct ixgbe_hw *hw = &adapter->hw;
7363 u32 link_speed = adapter->link_speed;
7364 bool link_up = adapter->link_up;
7365 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7366
7367 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7368 return;
7369
7370 if (hw->mac.ops.check_link) {
7371 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7372 } else {
7373
7374 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7375 link_up = true;
7376 }
7377
7378 if (adapter->ixgbe_ieee_pfc)
7379 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7380
7381 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7382 hw->mac.ops.fc_enable(hw);
7383 ixgbe_set_rx_drop_en(adapter);
7384 }
7385
7386 if (link_up ||
7387 time_after(jiffies, (adapter->link_check_timeout +
7388 IXGBE_TRY_LINK_TIMEOUT))) {
7389 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7390 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7391 IXGBE_WRITE_FLUSH(hw);
7392 }
7393
7394 adapter->link_up = link_up;
7395 adapter->link_speed = link_speed;
7396 }
7397
7398 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7399 {
7400 #ifdef CONFIG_IXGBE_DCB
7401 struct net_device *netdev = adapter->netdev;
7402 struct dcb_app app = {
7403 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7404 .protocol = 0,
7405 };
7406 u8 up = 0;
7407
7408 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7409 up = dcb_ieee_getapp_mask(netdev, &app);
7410
7411 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7412 #endif
7413 }
7414
7415
7416
7417
7418
7419
7420 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7421 {
7422 struct net_device *netdev = adapter->netdev;
7423 struct ixgbe_hw *hw = &adapter->hw;
7424 u32 link_speed = adapter->link_speed;
7425 const char *speed_str;
7426 bool flow_rx, flow_tx;
7427
7428
7429 if (netif_carrier_ok(netdev))
7430 return;
7431
7432 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7433
7434 switch (hw->mac.type) {
7435 case ixgbe_mac_82598EB: {
7436 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7437 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7438 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7439 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7440 }
7441 break;
7442 case ixgbe_mac_X540:
7443 case ixgbe_mac_X550:
7444 case ixgbe_mac_X550EM_x:
7445 case ixgbe_mac_x550em_a:
7446 case ixgbe_mac_82599EB: {
7447 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7448 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7449 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7450 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7451 }
7452 break;
7453 default:
7454 flow_tx = false;
7455 flow_rx = false;
7456 break;
7457 }
7458
7459 adapter->last_rx_ptp_check = jiffies;
7460
7461 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7462 ixgbe_ptp_start_cyclecounter(adapter);
7463
7464 switch (link_speed) {
7465 case IXGBE_LINK_SPEED_10GB_FULL:
7466 speed_str = "10 Gbps";
7467 break;
7468 case IXGBE_LINK_SPEED_5GB_FULL:
7469 speed_str = "5 Gbps";
7470 break;
7471 case IXGBE_LINK_SPEED_2_5GB_FULL:
7472 speed_str = "2.5 Gbps";
7473 break;
7474 case IXGBE_LINK_SPEED_1GB_FULL:
7475 speed_str = "1 Gbps";
7476 break;
7477 case IXGBE_LINK_SPEED_100_FULL:
7478 speed_str = "100 Mbps";
7479 break;
7480 case IXGBE_LINK_SPEED_10_FULL:
7481 speed_str = "10 Mbps";
7482 break;
7483 default:
7484 speed_str = "unknown speed";
7485 break;
7486 }
7487 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7488 ((flow_rx && flow_tx) ? "RX/TX" :
7489 (flow_rx ? "RX" :
7490 (flow_tx ? "TX" : "None"))));
7491
7492 netif_carrier_on(netdev);
7493 ixgbe_check_vf_rate_limit(adapter);
7494
7495
7496 netif_tx_wake_all_queues(adapter->netdev);
7497
7498
7499 ixgbe_update_default_up(adapter);
7500
7501
7502 ixgbe_ping_all_vfs(adapter);
7503 }
7504
7505
7506
7507
7508
7509
7510 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7511 {
7512 struct net_device *netdev = adapter->netdev;
7513 struct ixgbe_hw *hw = &adapter->hw;
7514
7515 adapter->link_up = false;
7516 adapter->link_speed = 0;
7517
7518
7519 if (!netif_carrier_ok(netdev))
7520 return;
7521
7522
7523 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7524 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7525
7526 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7527 ixgbe_ptp_start_cyclecounter(adapter);
7528
7529 e_info(drv, "NIC Link is Down\n");
7530 netif_carrier_off(netdev);
7531
7532
7533 ixgbe_ping_all_vfs(adapter);
7534 }
7535
7536 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7537 {
7538 int i;
7539
7540 for (i = 0; i < adapter->num_tx_queues; i++) {
7541 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7542
7543 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7544 return true;
7545 }
7546
7547 for (i = 0; i < adapter->num_xdp_queues; i++) {
7548 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7549
7550 if (ring->next_to_use != ring->next_to_clean)
7551 return true;
7552 }
7553
7554 return false;
7555 }
7556
7557 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7558 {
7559 struct ixgbe_hw *hw = &adapter->hw;
7560 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7561 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7562
7563 int i, j;
7564
7565 if (!adapter->num_vfs)
7566 return false;
7567
7568
7569 if (hw->mac.type >= ixgbe_mac_X550)
7570 return false;
7571
7572 for (i = 0; i < adapter->num_vfs; i++) {
7573 for (j = 0; j < q_per_pool; j++) {
7574 u32 h, t;
7575
7576 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7577 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7578
7579 if (h != t)
7580 return true;
7581 }
7582 }
7583
7584 return false;
7585 }
7586
7587
7588
7589
7590
7591 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7592 {
7593 if (!netif_carrier_ok(adapter->netdev)) {
7594 if (ixgbe_ring_tx_pending(adapter) ||
7595 ixgbe_vf_tx_pending(adapter)) {
7596
7597
7598
7599
7600
7601 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7602 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7603 }
7604 }
7605 }
7606
7607 #ifdef CONFIG_PCI_IOV
7608 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7609 {
7610 struct ixgbe_hw *hw = &adapter->hw;
7611 struct pci_dev *pdev = adapter->pdev;
7612 unsigned int vf;
7613 u32 gpc;
7614
7615 if (!(netif_carrier_ok(adapter->netdev)))
7616 return;
7617
7618 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7619 if (gpc)
7620 return;
7621
7622
7623
7624
7625
7626
7627 if (!pdev)
7628 return;
7629
7630
7631 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7632 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7633 u16 status_reg;
7634
7635 if (!vfdev)
7636 continue;
7637 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7638 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7639 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7640 pcie_flr(vfdev);
7641 }
7642 }
7643
7644 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7645 {
7646 u32 ssvpc;
7647
7648
7649 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7650 adapter->num_vfs == 0)
7651 return;
7652
7653 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7654
7655
7656
7657
7658
7659 if (!ssvpc)
7660 return;
7661
7662 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7663 }
7664 #else
7665 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7666 {
7667 }
7668
7669 static void
7670 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7671 {
7672 }
7673 #endif
7674
7675
7676
7677
7678
7679
7680 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7681 {
7682
7683 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7684 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7685 test_bit(__IXGBE_RESETTING, &adapter->state))
7686 return;
7687
7688 ixgbe_watchdog_update_link(adapter);
7689
7690 if (adapter->link_up)
7691 ixgbe_watchdog_link_is_up(adapter);
7692 else
7693 ixgbe_watchdog_link_is_down(adapter);
7694
7695 ixgbe_check_for_bad_vf(adapter);
7696 ixgbe_spoof_check(adapter);
7697 ixgbe_update_stats(adapter);
7698
7699 ixgbe_watchdog_flush_tx(adapter);
7700 }
7701
7702
7703
7704
7705
7706 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7707 {
7708 struct ixgbe_hw *hw = &adapter->hw;
7709 s32 err;
7710
7711
7712 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7713 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7714 return;
7715
7716 if (adapter->sfp_poll_time &&
7717 time_after(adapter->sfp_poll_time, jiffies))
7718 return;
7719
7720
7721 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7722 return;
7723
7724 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7725
7726 err = hw->phy.ops.identify_sfp(hw);
7727 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7728 goto sfp_out;
7729
7730 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7731
7732
7733 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7734 }
7735
7736
7737 if (err)
7738 goto sfp_out;
7739
7740
7741 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7742 goto sfp_out;
7743
7744 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7745
7746
7747
7748
7749
7750
7751 if (hw->mac.type == ixgbe_mac_82598EB)
7752 err = hw->phy.ops.reset(hw);
7753 else
7754 err = hw->mac.ops.setup_sfp(hw);
7755
7756 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7757 goto sfp_out;
7758
7759 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7760 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7761
7762 sfp_out:
7763 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7764
7765 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7766 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7767 e_dev_err("failed to initialize because an unsupported "
7768 "SFP+ module type was detected.\n");
7769 e_dev_err("Reload the driver after installing a "
7770 "supported module.\n");
7771 unregister_netdev(adapter->netdev);
7772 }
7773 }
7774
7775
7776
7777
7778
7779 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7780 {
7781 struct ixgbe_hw *hw = &adapter->hw;
7782 u32 cap_speed;
7783 u32 speed;
7784 bool autoneg = false;
7785
7786 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7787 return;
7788
7789
7790 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7791 return;
7792
7793 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7794
7795 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7796
7797
7798 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7799 speed = IXGBE_LINK_SPEED_10GB_FULL;
7800 else
7801 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7802 IXGBE_LINK_SPEED_1GB_FULL);
7803
7804 if (hw->mac.ops.setup_link)
7805 hw->mac.ops.setup_link(hw, speed, true);
7806
7807 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7808 adapter->link_check_timeout = jiffies;
7809 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7810 }
7811
7812
7813
7814
7815
7816 static void ixgbe_service_timer(struct timer_list *t)
7817 {
7818 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7819 unsigned long next_event_offset;
7820
7821
7822 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7823 next_event_offset = HZ / 10;
7824 else
7825 next_event_offset = HZ * 2;
7826
7827
7828 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7829
7830 ixgbe_service_event_schedule(adapter);
7831 }
7832
7833 static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7834 {
7835 struct ixgbe_hw *hw = &adapter->hw;
7836 u32 status;
7837
7838 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7839 return;
7840
7841 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7842
7843 if (!hw->phy.ops.handle_lasi)
7844 return;
7845
7846 status = hw->phy.ops.handle_lasi(&adapter->hw);
7847 if (status != IXGBE_ERR_OVERTEMP)
7848 return;
7849
7850 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7851 }
7852
7853 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7854 {
7855 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7856 return;
7857
7858 rtnl_lock();
7859
7860 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7861 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7862 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7863 rtnl_unlock();
7864 return;
7865 }
7866
7867 ixgbe_dump(adapter);
7868 netdev_err(adapter->netdev, "Reset adapter\n");
7869 adapter->tx_timeout_count++;
7870
7871 ixgbe_reinit_locked(adapter);
7872 rtnl_unlock();
7873 }
7874
7875
7876
7877
7878
7879
7880
7881 static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7882 {
7883 struct ixgbe_hw *hw = &adapter->hw;
7884 u32 fwsm;
7885
7886
7887 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7888
7889 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7890 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7891 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7892 fwsm);
7893
7894 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7895 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7896 return true;
7897 }
7898
7899 return false;
7900 }
7901
7902
7903
7904
7905
7906 static void ixgbe_service_task(struct work_struct *work)
7907 {
7908 struct ixgbe_adapter *adapter = container_of(work,
7909 struct ixgbe_adapter,
7910 service_task);
7911 if (ixgbe_removed(adapter->hw.hw_addr)) {
7912 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7913 rtnl_lock();
7914 ixgbe_down(adapter);
7915 rtnl_unlock();
7916 }
7917 ixgbe_service_event_complete(adapter);
7918 return;
7919 }
7920 if (ixgbe_check_fw_error(adapter)) {
7921 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7922 unregister_netdev(adapter->netdev);
7923 ixgbe_service_event_complete(adapter);
7924 return;
7925 }
7926 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7927 rtnl_lock();
7928 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7929 udp_tunnel_get_rx_info(adapter->netdev);
7930 rtnl_unlock();
7931 }
7932 ixgbe_reset_subtask(adapter);
7933 ixgbe_phy_interrupt_subtask(adapter);
7934 ixgbe_sfp_detection_subtask(adapter);
7935 ixgbe_sfp_link_config_subtask(adapter);
7936 ixgbe_check_overtemp_subtask(adapter);
7937 ixgbe_watchdog_subtask(adapter);
7938 ixgbe_fdir_reinit_subtask(adapter);
7939 ixgbe_check_hang_subtask(adapter);
7940
7941 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7942 ixgbe_ptp_overflow_check(adapter);
7943 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7944 ixgbe_ptp_rx_hang(adapter);
7945 ixgbe_ptp_tx_hang(adapter);
7946 }
7947
7948 ixgbe_service_event_complete(adapter);
7949 }
7950
7951 static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7952 struct ixgbe_tx_buffer *first,
7953 u8 *hdr_len,
7954 struct ixgbe_ipsec_tx_data *itd)
7955 {
7956 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7957 struct sk_buff *skb = first->skb;
7958 union {
7959 struct iphdr *v4;
7960 struct ipv6hdr *v6;
7961 unsigned char *hdr;
7962 } ip;
7963 union {
7964 struct tcphdr *tcp;
7965 unsigned char *hdr;
7966 } l4;
7967 u32 paylen, l4_offset;
7968 u32 fceof_saidx = 0;
7969 int err;
7970
7971 if (skb->ip_summed != CHECKSUM_PARTIAL)
7972 return 0;
7973
7974 if (!skb_is_gso(skb))
7975 return 0;
7976
7977 err = skb_cow_head(skb, 0);
7978 if (err < 0)
7979 return err;
7980
7981 if (eth_p_mpls(first->protocol))
7982 ip.hdr = skb_inner_network_header(skb);
7983 else
7984 ip.hdr = skb_network_header(skb);
7985 l4.hdr = skb_checksum_start(skb);
7986
7987
7988 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7989
7990
7991 if (ip.v4->version == 4) {
7992 unsigned char *csum_start = skb_checksum_start(skb);
7993 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7994 int len = csum_start - trans_start;
7995
7996
7997
7998
7999
8000 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
8001 csum_fold(csum_partial(trans_start,
8002 len, 0)) : 0;
8003 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
8004
8005 ip.v4->tot_len = 0;
8006 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8007 IXGBE_TX_FLAGS_CSUM |
8008 IXGBE_TX_FLAGS_IPV4;
8009 } else {
8010 ip.v6->payload_len = 0;
8011 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8012 IXGBE_TX_FLAGS_CSUM;
8013 }
8014
8015
8016 l4_offset = l4.hdr - skb->data;
8017
8018
8019 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8020
8021
8022 paylen = skb->len - l4_offset;
8023 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
8024
8025
8026 first->gso_segs = skb_shinfo(skb)->gso_segs;
8027 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8028
8029
8030 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8031 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8032
8033 fceof_saidx |= itd->sa_idx;
8034 type_tucmd |= itd->flags | itd->trailer_len;
8035
8036
8037 vlan_macip_lens = l4.hdr - ip.hdr;
8038 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8039 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8040
8041 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8042 mss_l4len_idx);
8043
8044 return 1;
8045 }
8046
8047 static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
8048 {
8049 unsigned int offset = 0;
8050
8051 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
8052
8053 return offset == skb_checksum_start_offset(skb);
8054 }
8055
8056 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8057 struct ixgbe_tx_buffer *first,
8058 struct ixgbe_ipsec_tx_data *itd)
8059 {
8060 struct sk_buff *skb = first->skb;
8061 u32 vlan_macip_lens = 0;
8062 u32 fceof_saidx = 0;
8063 u32 type_tucmd = 0;
8064
8065 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8066 csum_failed:
8067 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8068 IXGBE_TX_FLAGS_CC)))
8069 return;
8070 goto no_csum;
8071 }
8072
8073 switch (skb->csum_offset) {
8074 case offsetof(struct tcphdr, check):
8075 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8076
8077 case offsetof(struct udphdr, check):
8078 break;
8079 case offsetof(struct sctphdr, checksum):
8080
8081 if (((first->protocol == htons(ETH_P_IP)) &&
8082 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
8083 ((first->protocol == htons(ETH_P_IPV6)) &&
8084 ixgbe_ipv6_csum_is_sctp(skb))) {
8085 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8086 break;
8087 }
8088
8089 default:
8090 skb_checksum_help(skb);
8091 goto csum_failed;
8092 }
8093
8094
8095 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8096 vlan_macip_lens = skb_checksum_start_offset(skb) -
8097 skb_network_offset(skb);
8098 no_csum:
8099
8100 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8101 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8102
8103 fceof_saidx |= itd->sa_idx;
8104 type_tucmd |= itd->flags | itd->trailer_len;
8105
8106 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8107 }
8108
8109 #define IXGBE_SET_FLAG(_input, _flag, _result) \
8110 ((_flag <= _result) ? \
8111 ((u32)(_input & _flag) * (_result / _flag)) : \
8112 ((u32)(_input & _flag) / (_flag / _result)))
8113
8114 static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8115 {
8116
8117 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8118 IXGBE_ADVTXD_DCMD_DEXT |
8119 IXGBE_ADVTXD_DCMD_IFCS;
8120
8121
8122 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8123 IXGBE_ADVTXD_DCMD_VLE);
8124
8125
8126 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8127 IXGBE_ADVTXD_DCMD_TSE);
8128
8129
8130 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8131 IXGBE_ADVTXD_MAC_TSTAMP);
8132
8133
8134 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8135
8136 return cmd_type;
8137 }
8138
8139 static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8140 u32 tx_flags, unsigned int paylen)
8141 {
8142 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8143
8144
8145 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8146 IXGBE_TX_FLAGS_CSUM,
8147 IXGBE_ADVTXD_POPTS_TXSM);
8148
8149
8150 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8151 IXGBE_TX_FLAGS_IPV4,
8152 IXGBE_ADVTXD_POPTS_IXSM);
8153
8154
8155 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8156 IXGBE_TX_FLAGS_IPSEC,
8157 IXGBE_ADVTXD_POPTS_IPSEC);
8158
8159
8160
8161
8162
8163 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8164 IXGBE_TX_FLAGS_CC,
8165 IXGBE_ADVTXD_CC);
8166
8167 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8168 }
8169
8170 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8171 {
8172 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8173
8174
8175
8176
8177
8178 smp_mb();
8179
8180
8181
8182
8183 if (likely(ixgbe_desc_unused(tx_ring) < size))
8184 return -EBUSY;
8185
8186
8187 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8188 ++tx_ring->tx_stats.restart_queue;
8189 return 0;
8190 }
8191
8192 static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8193 {
8194 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8195 return 0;
8196
8197 return __ixgbe_maybe_stop_tx(tx_ring, size);
8198 }
8199
8200 static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8201 struct ixgbe_tx_buffer *first,
8202 const u8 hdr_len)
8203 {
8204 struct sk_buff *skb = first->skb;
8205 struct ixgbe_tx_buffer *tx_buffer;
8206 union ixgbe_adv_tx_desc *tx_desc;
8207 skb_frag_t *frag;
8208 dma_addr_t dma;
8209 unsigned int data_len, size;
8210 u32 tx_flags = first->tx_flags;
8211 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8212 u16 i = tx_ring->next_to_use;
8213
8214 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8215
8216 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8217
8218 size = skb_headlen(skb);
8219 data_len = skb->data_len;
8220
8221 #ifdef IXGBE_FCOE
8222 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8223 if (data_len < sizeof(struct fcoe_crc_eof)) {
8224 size -= sizeof(struct fcoe_crc_eof) - data_len;
8225 data_len = 0;
8226 } else {
8227 data_len -= sizeof(struct fcoe_crc_eof);
8228 }
8229 }
8230
8231 #endif
8232 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8233
8234 tx_buffer = first;
8235
8236 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8237 if (dma_mapping_error(tx_ring->dev, dma))
8238 goto dma_error;
8239
8240
8241 dma_unmap_len_set(tx_buffer, len, size);
8242 dma_unmap_addr_set(tx_buffer, dma, dma);
8243
8244 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8245
8246 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8247 tx_desc->read.cmd_type_len =
8248 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8249
8250 i++;
8251 tx_desc++;
8252 if (i == tx_ring->count) {
8253 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8254 i = 0;
8255 }
8256 tx_desc->read.olinfo_status = 0;
8257
8258 dma += IXGBE_MAX_DATA_PER_TXD;
8259 size -= IXGBE_MAX_DATA_PER_TXD;
8260
8261 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8262 }
8263
8264 if (likely(!data_len))
8265 break;
8266
8267 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8268
8269 i++;
8270 tx_desc++;
8271 if (i == tx_ring->count) {
8272 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8273 i = 0;
8274 }
8275 tx_desc->read.olinfo_status = 0;
8276
8277 #ifdef IXGBE_FCOE
8278 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8279 #else
8280 size = skb_frag_size(frag);
8281 #endif
8282 data_len -= size;
8283
8284 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8285 DMA_TO_DEVICE);
8286
8287 tx_buffer = &tx_ring->tx_buffer_info[i];
8288 }
8289
8290
8291 cmd_type |= size | IXGBE_TXD_CMD;
8292 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8293
8294 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8295
8296
8297 first->time_stamp = jiffies;
8298
8299 skb_tx_timestamp(skb);
8300
8301
8302
8303
8304
8305
8306
8307
8308
8309 wmb();
8310
8311
8312 first->next_to_watch = tx_desc;
8313
8314 i++;
8315 if (i == tx_ring->count)
8316 i = 0;
8317
8318 tx_ring->next_to_use = i;
8319
8320 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8321
8322 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8323 writel(i, tx_ring->tail);
8324 }
8325
8326 return 0;
8327 dma_error:
8328 dev_err(tx_ring->dev, "TX DMA map failed\n");
8329
8330
8331 for (;;) {
8332 tx_buffer = &tx_ring->tx_buffer_info[i];
8333 if (dma_unmap_len(tx_buffer, len))
8334 dma_unmap_page(tx_ring->dev,
8335 dma_unmap_addr(tx_buffer, dma),
8336 dma_unmap_len(tx_buffer, len),
8337 DMA_TO_DEVICE);
8338 dma_unmap_len_set(tx_buffer, len, 0);
8339 if (tx_buffer == first)
8340 break;
8341 if (i == 0)
8342 i += tx_ring->count;
8343 i--;
8344 }
8345
8346 dev_kfree_skb_any(first->skb);
8347 first->skb = NULL;
8348
8349 tx_ring->next_to_use = i;
8350
8351 return -1;
8352 }
8353
8354 static void ixgbe_atr(struct ixgbe_ring *ring,
8355 struct ixgbe_tx_buffer *first)
8356 {
8357 struct ixgbe_q_vector *q_vector = ring->q_vector;
8358 union ixgbe_atr_hash_dword input = { .dword = 0 };
8359 union ixgbe_atr_hash_dword common = { .dword = 0 };
8360 union {
8361 unsigned char *network;
8362 struct iphdr *ipv4;
8363 struct ipv6hdr *ipv6;
8364 } hdr;
8365 struct tcphdr *th;
8366 unsigned int hlen;
8367 struct sk_buff *skb;
8368 __be16 vlan_id;
8369 int l4_proto;
8370
8371
8372 if (!q_vector)
8373 return;
8374
8375
8376 if (!ring->atr_sample_rate)
8377 return;
8378
8379 ring->atr_count++;
8380
8381
8382 if ((first->protocol != htons(ETH_P_IP)) &&
8383 (first->protocol != htons(ETH_P_IPV6)))
8384 return;
8385
8386
8387 skb = first->skb;
8388 hdr.network = skb_network_header(skb);
8389 if (unlikely(hdr.network <= skb->data))
8390 return;
8391 if (skb->encapsulation &&
8392 first->protocol == htons(ETH_P_IP) &&
8393 hdr.ipv4->protocol == IPPROTO_UDP) {
8394 struct ixgbe_adapter *adapter = q_vector->adapter;
8395
8396 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8397 VXLAN_HEADROOM))
8398 return;
8399
8400
8401 if (adapter->vxlan_port &&
8402 udp_hdr(skb)->dest == adapter->vxlan_port)
8403 hdr.network = skb_inner_network_header(skb);
8404
8405 if (adapter->geneve_port &&
8406 udp_hdr(skb)->dest == adapter->geneve_port)
8407 hdr.network = skb_inner_network_header(skb);
8408 }
8409
8410
8411
8412
8413 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8414 return;
8415
8416
8417 switch (hdr.ipv4->version) {
8418 case IPVERSION:
8419
8420 hlen = (hdr.network[0] & 0x0F) << 2;
8421 l4_proto = hdr.ipv4->protocol;
8422 break;
8423 case 6:
8424 hlen = hdr.network - skb->data;
8425 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8426 hlen -= hdr.network - skb->data;
8427 break;
8428 default:
8429 return;
8430 }
8431
8432 if (l4_proto != IPPROTO_TCP)
8433 return;
8434
8435 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8436 hlen + sizeof(struct tcphdr)))
8437 return;
8438
8439 th = (struct tcphdr *)(hdr.network + hlen);
8440
8441
8442 if (th->fin)
8443 return;
8444
8445
8446 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8447 return;
8448
8449
8450 ring->atr_count = 0;
8451
8452 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8453
8454
8455
8456
8457
8458
8459
8460
8461 input.formatted.vlan_id = vlan_id;
8462
8463
8464
8465
8466
8467 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8468 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8469 else
8470 common.port.src ^= th->dest ^ first->protocol;
8471 common.port.dst ^= th->source;
8472
8473 switch (hdr.ipv4->version) {
8474 case IPVERSION:
8475 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8476 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8477 break;
8478 case 6:
8479 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8480 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8481 hdr.ipv6->saddr.s6_addr32[1] ^
8482 hdr.ipv6->saddr.s6_addr32[2] ^
8483 hdr.ipv6->saddr.s6_addr32[3] ^
8484 hdr.ipv6->daddr.s6_addr32[0] ^
8485 hdr.ipv6->daddr.s6_addr32[1] ^
8486 hdr.ipv6->daddr.s6_addr32[2] ^
8487 hdr.ipv6->daddr.s6_addr32[3];
8488 break;
8489 default:
8490 break;
8491 }
8492
8493 if (hdr.network != skb_network_header(skb))
8494 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8495
8496
8497 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8498 input, common, ring->queue_index);
8499 }
8500
8501 #ifdef IXGBE_FCOE
8502 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8503 struct net_device *sb_dev)
8504 {
8505 struct ixgbe_adapter *adapter;
8506 struct ixgbe_ring_feature *f;
8507 int txq;
8508
8509 if (sb_dev) {
8510 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8511 struct net_device *vdev = sb_dev;
8512
8513 txq = vdev->tc_to_txq[tc].offset;
8514 txq += reciprocal_scale(skb_get_hash(skb),
8515 vdev->tc_to_txq[tc].count);
8516
8517 return txq;
8518 }
8519
8520
8521
8522
8523
8524 switch (vlan_get_protocol(skb)) {
8525 case htons(ETH_P_FCOE):
8526 case htons(ETH_P_FIP):
8527 adapter = netdev_priv(dev);
8528
8529 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8530 break;
8531
8532 default:
8533 return netdev_pick_tx(dev, skb, sb_dev);
8534 }
8535
8536 f = &adapter->ring_feature[RING_F_FCOE];
8537
8538 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8539 smp_processor_id();
8540
8541 while (txq >= f->indices)
8542 txq -= f->indices;
8543
8544 return txq + f->offset;
8545 }
8546
8547 #endif
8548 int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8549 struct xdp_frame *xdpf)
8550 {
8551 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8552 struct ixgbe_tx_buffer *tx_buffer;
8553 union ixgbe_adv_tx_desc *tx_desc;
8554 u32 len, cmd_type;
8555 dma_addr_t dma;
8556 u16 i;
8557
8558 len = xdpf->len;
8559
8560 if (unlikely(!ixgbe_desc_unused(ring)))
8561 return IXGBE_XDP_CONSUMED;
8562
8563 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8564 if (dma_mapping_error(ring->dev, dma))
8565 return IXGBE_XDP_CONSUMED;
8566
8567
8568 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8569 tx_buffer->bytecount = len;
8570 tx_buffer->gso_segs = 1;
8571 tx_buffer->protocol = 0;
8572
8573 i = ring->next_to_use;
8574 tx_desc = IXGBE_TX_DESC(ring, i);
8575
8576 dma_unmap_len_set(tx_buffer, len, len);
8577 dma_unmap_addr_set(tx_buffer, dma, dma);
8578 tx_buffer->xdpf = xdpf;
8579
8580 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8581
8582
8583 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8584 IXGBE_ADVTXD_DCMD_DEXT |
8585 IXGBE_ADVTXD_DCMD_IFCS;
8586 cmd_type |= len | IXGBE_TXD_CMD;
8587 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8588 tx_desc->read.olinfo_status =
8589 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8590
8591
8592 smp_wmb();
8593
8594
8595 i++;
8596 if (i == ring->count)
8597 i = 0;
8598
8599 tx_buffer->next_to_watch = tx_desc;
8600 ring->next_to_use = i;
8601
8602 return IXGBE_XDP_TX;
8603 }
8604
8605 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8606 struct ixgbe_adapter *adapter,
8607 struct ixgbe_ring *tx_ring)
8608 {
8609 struct ixgbe_tx_buffer *first;
8610 int tso;
8611 u32 tx_flags = 0;
8612 unsigned short f;
8613 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8614 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8615 __be16 protocol = skb->protocol;
8616 u8 hdr_len = 0;
8617
8618
8619
8620
8621
8622
8623
8624
8625 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8626 count += TXD_USE_COUNT(skb_frag_size(
8627 &skb_shinfo(skb)->frags[f]));
8628
8629 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8630 tx_ring->tx_stats.tx_busy++;
8631 return NETDEV_TX_BUSY;
8632 }
8633
8634
8635 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8636 first->skb = skb;
8637 first->bytecount = skb->len;
8638 first->gso_segs = 1;
8639
8640
8641 if (skb_vlan_tag_present(skb)) {
8642 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8643 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8644
8645 } else if (protocol == htons(ETH_P_8021Q)) {
8646 struct vlan_hdr *vhdr, _vhdr;
8647 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8648 if (!vhdr)
8649 goto out_drop;
8650
8651 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8652 IXGBE_TX_FLAGS_VLAN_SHIFT;
8653 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8654 }
8655 protocol = vlan_get_protocol(skb);
8656
8657 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8658 adapter->ptp_clock) {
8659 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8660 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8661 &adapter->state)) {
8662 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8663 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8664
8665
8666 adapter->ptp_tx_skb = skb_get(skb);
8667 adapter->ptp_tx_start = jiffies;
8668 schedule_work(&adapter->ptp_tx_work);
8669 } else {
8670 adapter->tx_hwtstamp_skipped++;
8671 }
8672 }
8673
8674 #ifdef CONFIG_PCI_IOV
8675
8676
8677
8678
8679 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8680 tx_flags |= IXGBE_TX_FLAGS_CC;
8681
8682 #endif
8683
8684 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8685 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8686 (skb->priority != TC_PRIO_CONTROL))) {
8687 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8688 tx_flags |= (skb->priority & 0x7) <<
8689 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8690 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8691 struct vlan_ethhdr *vhdr;
8692
8693 if (skb_cow_head(skb, 0))
8694 goto out_drop;
8695 vhdr = (struct vlan_ethhdr *)skb->data;
8696 vhdr->h_vlan_TCI = htons(tx_flags >>
8697 IXGBE_TX_FLAGS_VLAN_SHIFT);
8698 } else {
8699 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8700 }
8701 }
8702
8703
8704 first->tx_flags = tx_flags;
8705 first->protocol = protocol;
8706
8707 #ifdef IXGBE_FCOE
8708
8709 if ((protocol == htons(ETH_P_FCOE)) &&
8710 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8711 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8712 if (tso < 0)
8713 goto out_drop;
8714
8715 goto xmit_fcoe;
8716 }
8717
8718 #endif
8719
8720 #ifdef CONFIG_IXGBE_IPSEC
8721 if (xfrm_offload(skb) &&
8722 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8723 goto out_drop;
8724 #endif
8725 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8726 if (tso < 0)
8727 goto out_drop;
8728 else if (!tso)
8729 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8730
8731
8732 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8733 ixgbe_atr(tx_ring, first);
8734
8735 #ifdef IXGBE_FCOE
8736 xmit_fcoe:
8737 #endif
8738 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8739 goto cleanup_tx_timestamp;
8740
8741 return NETDEV_TX_OK;
8742
8743 out_drop:
8744 dev_kfree_skb_any(first->skb);
8745 first->skb = NULL;
8746 cleanup_tx_timestamp:
8747 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8748 dev_kfree_skb_any(adapter->ptp_tx_skb);
8749 adapter->ptp_tx_skb = NULL;
8750 cancel_work_sync(&adapter->ptp_tx_work);
8751 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8752 }
8753
8754 return NETDEV_TX_OK;
8755 }
8756
8757 static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8758 struct net_device *netdev,
8759 struct ixgbe_ring *ring)
8760 {
8761 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8762 struct ixgbe_ring *tx_ring;
8763
8764
8765
8766
8767
8768 if (skb_put_padto(skb, 17))
8769 return NETDEV_TX_OK;
8770
8771 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8772 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8773 return NETDEV_TX_BUSY;
8774
8775 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8776 }
8777
8778 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8779 struct net_device *netdev)
8780 {
8781 return __ixgbe_xmit_frame(skb, netdev, NULL);
8782 }
8783
8784
8785
8786
8787
8788
8789
8790
8791 static int ixgbe_set_mac(struct net_device *netdev, void *p)
8792 {
8793 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8794 struct ixgbe_hw *hw = &adapter->hw;
8795 struct sockaddr *addr = p;
8796
8797 if (!is_valid_ether_addr(addr->sa_data))
8798 return -EADDRNOTAVAIL;
8799
8800 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8801 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8802
8803 ixgbe_mac_set_default_filter(adapter);
8804
8805 return 0;
8806 }
8807
8808 static int
8809 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8810 {
8811 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8812 struct ixgbe_hw *hw = &adapter->hw;
8813 u16 value;
8814 int rc;
8815
8816 if (adapter->mii_bus) {
8817 int regnum = addr;
8818
8819 if (devad != MDIO_DEVAD_NONE)
8820 regnum |= (devad << 16) | MII_ADDR_C45;
8821
8822 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8823 }
8824
8825 if (prtad != hw->phy.mdio.prtad)
8826 return -EINVAL;
8827 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8828 if (!rc)
8829 rc = value;
8830 return rc;
8831 }
8832
8833 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8834 u16 addr, u16 value)
8835 {
8836 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8837 struct ixgbe_hw *hw = &adapter->hw;
8838
8839 if (adapter->mii_bus) {
8840 int regnum = addr;
8841
8842 if (devad != MDIO_DEVAD_NONE)
8843 regnum |= (devad << 16) | MII_ADDR_C45;
8844
8845 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8846 }
8847
8848 if (prtad != hw->phy.mdio.prtad)
8849 return -EINVAL;
8850 return hw->phy.ops.write_reg(hw, addr, devad, value);
8851 }
8852
8853 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8854 {
8855 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8856
8857 switch (cmd) {
8858 case SIOCSHWTSTAMP:
8859 return ixgbe_ptp_set_ts_config(adapter, req);
8860 case SIOCGHWTSTAMP:
8861 return ixgbe_ptp_get_ts_config(adapter, req);
8862 case SIOCGMIIPHY:
8863 if (!adapter->hw.phy.ops.read_reg)
8864 return -EOPNOTSUPP;
8865
8866 default:
8867 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8868 }
8869 }
8870
8871
8872
8873
8874
8875
8876
8877
8878 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8879 {
8880 int err = 0;
8881 struct ixgbe_adapter *adapter = netdev_priv(dev);
8882 struct ixgbe_hw *hw = &adapter->hw;
8883
8884 if (is_valid_ether_addr(hw->mac.san_addr)) {
8885 rtnl_lock();
8886 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8887 rtnl_unlock();
8888
8889
8890 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8891 }
8892 return err;
8893 }
8894
8895
8896
8897
8898
8899
8900
8901
8902 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8903 {
8904 int err = 0;
8905 struct ixgbe_adapter *adapter = netdev_priv(dev);
8906 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8907
8908 if (is_valid_ether_addr(mac->san_addr)) {
8909 rtnl_lock();
8910 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8911 rtnl_unlock();
8912 }
8913 return err;
8914 }
8915
8916 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8917 struct ixgbe_ring *ring)
8918 {
8919 u64 bytes, packets;
8920 unsigned int start;
8921
8922 if (ring) {
8923 do {
8924 start = u64_stats_fetch_begin_irq(&ring->syncp);
8925 packets = ring->stats.packets;
8926 bytes = ring->stats.bytes;
8927 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8928 stats->tx_packets += packets;
8929 stats->tx_bytes += bytes;
8930 }
8931 }
8932
8933 static void ixgbe_get_stats64(struct net_device *netdev,
8934 struct rtnl_link_stats64 *stats)
8935 {
8936 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8937 int i;
8938
8939 rcu_read_lock();
8940 for (i = 0; i < adapter->num_rx_queues; i++) {
8941 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8942 u64 bytes, packets;
8943 unsigned int start;
8944
8945 if (ring) {
8946 do {
8947 start = u64_stats_fetch_begin_irq(&ring->syncp);
8948 packets = ring->stats.packets;
8949 bytes = ring->stats.bytes;
8950 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8951 stats->rx_packets += packets;
8952 stats->rx_bytes += bytes;
8953 }
8954 }
8955
8956 for (i = 0; i < adapter->num_tx_queues; i++) {
8957 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8958
8959 ixgbe_get_ring_stats64(stats, ring);
8960 }
8961 for (i = 0; i < adapter->num_xdp_queues; i++) {
8962 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8963
8964 ixgbe_get_ring_stats64(stats, ring);
8965 }
8966 rcu_read_unlock();
8967
8968
8969 stats->multicast = netdev->stats.multicast;
8970 stats->rx_errors = netdev->stats.rx_errors;
8971 stats->rx_length_errors = netdev->stats.rx_length_errors;
8972 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8973 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8974 }
8975
8976 #ifdef CONFIG_IXGBE_DCB
8977
8978
8979
8980
8981
8982
8983
8984
8985 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8986 {
8987 struct ixgbe_hw *hw = &adapter->hw;
8988 u32 reg, rsave;
8989 int i;
8990
8991
8992
8993
8994 if (hw->mac.type == ixgbe_mac_82598EB)
8995 return;
8996
8997 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8998 rsave = reg;
8999
9000 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
9001 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
9002
9003
9004 if (up2tc > tc)
9005 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
9006 }
9007
9008 if (reg != rsave)
9009 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9010
9011 return;
9012 }
9013
9014
9015
9016
9017
9018
9019
9020 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9021 {
9022 struct net_device *dev = adapter->netdev;
9023 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9024 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9025 u8 prio;
9026
9027 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9028 u8 tc = 0;
9029
9030 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9031 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9032 else if (ets)
9033 tc = ets->prio_tc[prio];
9034
9035 netdev_set_prio_tc_map(dev, prio, tc);
9036 }
9037 }
9038
9039 #endif
9040 static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
9041 {
9042 struct ixgbe_adapter *adapter = data;
9043 struct ixgbe_fwd_adapter *accel;
9044 int pool;
9045
9046
9047 if (!netif_is_macvlan(vdev))
9048 return 0;
9049
9050
9051 accel = macvlan_accel_priv(vdev);
9052 if (!accel)
9053 return 0;
9054
9055
9056 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9057 if (pool < adapter->num_rx_pools) {
9058 set_bit(pool, adapter->fwd_bitmask);
9059 accel->pool = pool;
9060 return 0;
9061 }
9062
9063
9064 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9065 macvlan_release_l2fw_offload(vdev);
9066
9067
9068 netdev_unbind_sb_channel(adapter->netdev, vdev);
9069 netdev_set_sb_channel(vdev, 0);
9070
9071 kfree(accel);
9072
9073 return 0;
9074 }
9075
9076 static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9077 {
9078 struct ixgbe_adapter *adapter = netdev_priv(dev);
9079
9080
9081 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9082
9083
9084 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9085 adapter);
9086 }
9087
9088
9089
9090
9091
9092
9093
9094 int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9095 {
9096 struct ixgbe_adapter *adapter = netdev_priv(dev);
9097 struct ixgbe_hw *hw = &adapter->hw;
9098
9099
9100 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9101 return -EINVAL;
9102
9103 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9104 return -EINVAL;
9105
9106
9107
9108
9109
9110 if (netif_running(dev))
9111 ixgbe_close(dev);
9112 else
9113 ixgbe_reset(adapter);
9114
9115 ixgbe_clear_interrupt_scheme(adapter);
9116
9117 #ifdef CONFIG_IXGBE_DCB
9118 if (tc) {
9119 if (adapter->xdp_prog) {
9120 e_warn(probe, "DCB is not supported with XDP\n");
9121
9122 ixgbe_init_interrupt_scheme(adapter);
9123 if (netif_running(dev))
9124 ixgbe_open(dev);
9125 return -EINVAL;
9126 }
9127
9128 netdev_set_num_tc(dev, tc);
9129 ixgbe_set_prio_tc_map(adapter);
9130
9131 adapter->hw_tcs = tc;
9132 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9133
9134 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9135 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9136 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9137 }
9138 } else {
9139 netdev_reset_tc(dev);
9140
9141 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9142 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9143
9144 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9145 adapter->hw_tcs = tc;
9146
9147 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9148 adapter->dcb_cfg.pfc_mode_enable = false;
9149 }
9150
9151 ixgbe_validate_rtr(adapter, tc);
9152
9153 #endif
9154 ixgbe_init_interrupt_scheme(adapter);
9155
9156 ixgbe_defrag_macvlan_pools(dev);
9157
9158 if (netif_running(dev))
9159 return ixgbe_open(dev);
9160
9161 return 0;
9162 }
9163
9164 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9165 struct tc_cls_u32_offload *cls)
9166 {
9167 u32 hdl = cls->knode.handle;
9168 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9169 u32 loc = cls->knode.handle & 0xfffff;
9170 int err = 0, i, j;
9171 struct ixgbe_jump_table *jump = NULL;
9172
9173 if (loc > IXGBE_MAX_HW_ENTRIES)
9174 return -EINVAL;
9175
9176 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9177 return -EINVAL;
9178
9179
9180 if (uhtid != 0x800) {
9181 jump = adapter->jump_tables[uhtid];
9182 if (!jump)
9183 return -EINVAL;
9184 if (!test_bit(loc - 1, jump->child_loc_map))
9185 return -EINVAL;
9186 clear_bit(loc - 1, jump->child_loc_map);
9187 }
9188
9189
9190 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9191 jump = adapter->jump_tables[i];
9192 if (jump && jump->link_hdl == hdl) {
9193
9194
9195
9196 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9197 if (!test_bit(j, jump->child_loc_map))
9198 continue;
9199 spin_lock(&adapter->fdir_perfect_lock);
9200 err = ixgbe_update_ethtool_fdir_entry(adapter,
9201 NULL,
9202 j + 1);
9203 spin_unlock(&adapter->fdir_perfect_lock);
9204 clear_bit(j, jump->child_loc_map);
9205 }
9206
9207 kfree(jump->input);
9208 kfree(jump->mask);
9209 kfree(jump);
9210 adapter->jump_tables[i] = NULL;
9211 return err;
9212 }
9213 }
9214
9215 spin_lock(&adapter->fdir_perfect_lock);
9216 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9217 spin_unlock(&adapter->fdir_perfect_lock);
9218 return err;
9219 }
9220
9221 static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9222 struct tc_cls_u32_offload *cls)
9223 {
9224 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9225
9226 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9227 return -EINVAL;
9228
9229
9230
9231
9232 if (cls->hnode.divisor > 0)
9233 return -EINVAL;
9234
9235 set_bit(uhtid - 1, &adapter->tables);
9236 return 0;
9237 }
9238
9239 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9240 struct tc_cls_u32_offload *cls)
9241 {
9242 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9243
9244 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9245 return -EINVAL;
9246
9247 clear_bit(uhtid - 1, &adapter->tables);
9248 return 0;
9249 }
9250
9251 #ifdef CONFIG_NET_CLS_ACT
9252 struct upper_walk_data {
9253 struct ixgbe_adapter *adapter;
9254 u64 action;
9255 int ifindex;
9256 u8 queue;
9257 };
9258
9259 static int get_macvlan_queue(struct net_device *upper, void *_data)
9260 {
9261 if (netif_is_macvlan(upper)) {
9262 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9263 struct upper_walk_data *data = _data;
9264 struct ixgbe_adapter *adapter = data->adapter;
9265 int ifindex = data->ifindex;
9266
9267 if (vadapter && upper->ifindex == ifindex) {
9268 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9269 data->action = data->queue;
9270 return 1;
9271 }
9272 }
9273
9274 return 0;
9275 }
9276
9277 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9278 u8 *queue, u64 *action)
9279 {
9280 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9281 unsigned int num_vfs = adapter->num_vfs, vf;
9282 struct upper_walk_data data;
9283 struct net_device *upper;
9284
9285
9286 for (vf = 0; vf < num_vfs; ++vf) {
9287 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9288 if (upper->ifindex == ifindex) {
9289 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9290 *action = vf + 1;
9291 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9292 return 0;
9293 }
9294 }
9295
9296
9297 data.adapter = adapter;
9298 data.ifindex = ifindex;
9299 data.action = 0;
9300 data.queue = 0;
9301 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9302 get_macvlan_queue, &data)) {
9303 *action = data.action;
9304 *queue = data.queue;
9305
9306 return 0;
9307 }
9308
9309 return -EINVAL;
9310 }
9311
9312 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9313 struct tcf_exts *exts, u64 *action, u8 *queue)
9314 {
9315 const struct tc_action *a;
9316 int i;
9317
9318 if (!tcf_exts_has_actions(exts))
9319 return -EINVAL;
9320
9321 tcf_exts_for_each_action(i, a, exts) {
9322
9323 if (is_tcf_gact_shot(a)) {
9324 *action = IXGBE_FDIR_DROP_QUEUE;
9325 *queue = IXGBE_FDIR_DROP_QUEUE;
9326 return 0;
9327 }
9328
9329
9330 if (is_tcf_mirred_egress_redirect(a)) {
9331 struct net_device *dev = tcf_mirred_dev(a);
9332
9333 if (!dev)
9334 return -EINVAL;
9335 return handle_redirect_action(adapter, dev->ifindex,
9336 queue, action);
9337 }
9338
9339 return -EINVAL;
9340 }
9341
9342 return -EINVAL;
9343 }
9344 #else
9345 static int parse_tc_actions(struct ixgbe_adapter *adapter,
9346 struct tcf_exts *exts, u64 *action, u8 *queue)
9347 {
9348 return -EINVAL;
9349 }
9350 #endif
9351
9352 static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9353 union ixgbe_atr_input *mask,
9354 struct tc_cls_u32_offload *cls,
9355 struct ixgbe_mat_field *field_ptr,
9356 struct ixgbe_nexthdr *nexthdr)
9357 {
9358 int i, j, off;
9359 __be32 val, m;
9360 bool found_entry = false, found_jump_field = false;
9361
9362 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9363 off = cls->knode.sel->keys[i].off;
9364 val = cls->knode.sel->keys[i].val;
9365 m = cls->knode.sel->keys[i].mask;
9366
9367 for (j = 0; field_ptr[j].val; j++) {
9368 if (field_ptr[j].off == off) {
9369 field_ptr[j].val(input, mask, (__force u32)val,
9370 (__force u32)m);
9371 input->filter.formatted.flow_type |=
9372 field_ptr[j].type;
9373 found_entry = true;
9374 break;
9375 }
9376 }
9377 if (nexthdr) {
9378 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9379 nexthdr->val ==
9380 (__force u32)cls->knode.sel->keys[i].val &&
9381 nexthdr->mask ==
9382 (__force u32)cls->knode.sel->keys[i].mask)
9383 found_jump_field = true;
9384 else
9385 continue;
9386 }
9387 }
9388
9389 if (nexthdr && !found_jump_field)
9390 return -EINVAL;
9391
9392 if (!found_entry)
9393 return 0;
9394
9395 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9396 IXGBE_ATR_L4TYPE_MASK;
9397
9398 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9399 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9400
9401 return 0;
9402 }
9403
9404 static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9405 struct tc_cls_u32_offload *cls)
9406 {
9407 __be16 protocol = cls->common.protocol;
9408 u32 loc = cls->knode.handle & 0xfffff;
9409 struct ixgbe_hw *hw = &adapter->hw;
9410 struct ixgbe_mat_field *field_ptr;
9411 struct ixgbe_fdir_filter *input = NULL;
9412 union ixgbe_atr_input *mask = NULL;
9413 struct ixgbe_jump_table *jump = NULL;
9414 int i, err = -EINVAL;
9415 u8 queue;
9416 u32 uhtid, link_uhtid;
9417
9418 uhtid = TC_U32_USERHTID(cls->knode.handle);
9419 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9420
9421
9422
9423
9424
9425
9426
9427
9428 if (protocol != htons(ETH_P_IP))
9429 return err;
9430
9431 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9432 e_err(drv, "Location out of range\n");
9433 return err;
9434 }
9435
9436
9437
9438
9439
9440
9441
9442
9443 if (uhtid == 0x800) {
9444 field_ptr = (adapter->jump_tables[0])->mat;
9445 } else {
9446 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9447 return err;
9448 if (!adapter->jump_tables[uhtid])
9449 return err;
9450 field_ptr = (adapter->jump_tables[uhtid])->mat;
9451 }
9452
9453 if (!field_ptr)
9454 return err;
9455
9456
9457
9458
9459
9460
9461
9462 if (link_uhtid) {
9463 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9464
9465 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9466 return err;
9467
9468 if (!test_bit(link_uhtid - 1, &adapter->tables))
9469 return err;
9470
9471
9472
9473
9474
9475
9476 if (adapter->jump_tables[link_uhtid] &&
9477 (adapter->jump_tables[link_uhtid])->link_hdl) {
9478 e_err(drv, "Link filter exists for link: %x\n",
9479 link_uhtid);
9480 return err;
9481 }
9482
9483 for (i = 0; nexthdr[i].jump; i++) {
9484 if (nexthdr[i].o != cls->knode.sel->offoff ||
9485 nexthdr[i].s != cls->knode.sel->offshift ||
9486 nexthdr[i].m !=
9487 (__force u32)cls->knode.sel->offmask)
9488 return err;
9489
9490 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9491 if (!jump)
9492 return -ENOMEM;
9493 input = kzalloc(sizeof(*input), GFP_KERNEL);
9494 if (!input) {
9495 err = -ENOMEM;
9496 goto free_jump;
9497 }
9498 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9499 if (!mask) {
9500 err = -ENOMEM;
9501 goto free_input;
9502 }
9503 jump->input = input;
9504 jump->mask = mask;
9505 jump->link_hdl = cls->knode.handle;
9506
9507 err = ixgbe_clsu32_build_input(input, mask, cls,
9508 field_ptr, &nexthdr[i]);
9509 if (!err) {
9510 jump->mat = nexthdr[i].jump;
9511 adapter->jump_tables[link_uhtid] = jump;
9512 break;
9513 } else {
9514 kfree(mask);
9515 kfree(input);
9516 kfree(jump);
9517 }
9518 }
9519 return 0;
9520 }
9521
9522 input = kzalloc(sizeof(*input), GFP_KERNEL);
9523 if (!input)
9524 return -ENOMEM;
9525 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9526 if (!mask) {
9527 err = -ENOMEM;
9528 goto free_input;
9529 }
9530
9531 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9532 if ((adapter->jump_tables[uhtid])->input)
9533 memcpy(input, (adapter->jump_tables[uhtid])->input,
9534 sizeof(*input));
9535 if ((adapter->jump_tables[uhtid])->mask)
9536 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9537 sizeof(*mask));
9538
9539
9540
9541
9542 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9543 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9544
9545 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9546 e_err(drv, "Filter exists in location: %x\n",
9547 loc);
9548 err = -EINVAL;
9549 goto err_out;
9550 }
9551 }
9552 }
9553 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9554 if (err)
9555 goto err_out;
9556
9557 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9558 &queue);
9559 if (err < 0)
9560 goto err_out;
9561
9562 input->sw_idx = loc;
9563
9564 spin_lock(&adapter->fdir_perfect_lock);
9565
9566 if (hlist_empty(&adapter->fdir_filter_list)) {
9567 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9568 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9569 if (err)
9570 goto err_out_w_lock;
9571 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9572 err = -EINVAL;
9573 goto err_out_w_lock;
9574 }
9575
9576 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9577 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9578 input->sw_idx, queue);
9579 if (!err)
9580 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9581 spin_unlock(&adapter->fdir_perfect_lock);
9582
9583 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9584 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9585
9586 kfree(mask);
9587 return err;
9588 err_out_w_lock:
9589 spin_unlock(&adapter->fdir_perfect_lock);
9590 err_out:
9591 kfree(mask);
9592 free_input:
9593 kfree(input);
9594 free_jump:
9595 kfree(jump);
9596 return err;
9597 }
9598
9599 static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9600 struct tc_cls_u32_offload *cls_u32)
9601 {
9602 switch (cls_u32->command) {
9603 case TC_CLSU32_NEW_KNODE:
9604 case TC_CLSU32_REPLACE_KNODE:
9605 return ixgbe_configure_clsu32(adapter, cls_u32);
9606 case TC_CLSU32_DELETE_KNODE:
9607 return ixgbe_delete_clsu32(adapter, cls_u32);
9608 case TC_CLSU32_NEW_HNODE:
9609 case TC_CLSU32_REPLACE_HNODE:
9610 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9611 case TC_CLSU32_DELETE_HNODE:
9612 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9613 default:
9614 return -EOPNOTSUPP;
9615 }
9616 }
9617
9618 static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9619 void *cb_priv)
9620 {
9621 struct ixgbe_adapter *adapter = cb_priv;
9622
9623 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9624 return -EOPNOTSUPP;
9625
9626 switch (type) {
9627 case TC_SETUP_CLSU32:
9628 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9629 default:
9630 return -EOPNOTSUPP;
9631 }
9632 }
9633
9634 static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9635 struct tc_mqprio_qopt *mqprio)
9636 {
9637 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9638 return ixgbe_setup_tc(dev, mqprio->num_tc);
9639 }
9640
9641 static LIST_HEAD(ixgbe_block_cb_list);
9642
9643 static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9644 void *type_data)
9645 {
9646 struct ixgbe_adapter *adapter = netdev_priv(dev);
9647
9648 switch (type) {
9649 case TC_SETUP_BLOCK:
9650 return flow_block_cb_setup_simple(type_data,
9651 &ixgbe_block_cb_list,
9652 ixgbe_setup_tc_block_cb,
9653 adapter, adapter, true);
9654 case TC_SETUP_QDISC_MQPRIO:
9655 return ixgbe_setup_tc_mqprio(dev, type_data);
9656 default:
9657 return -EOPNOTSUPP;
9658 }
9659 }
9660
9661 #ifdef CONFIG_PCI_IOV
9662 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9663 {
9664 struct net_device *netdev = adapter->netdev;
9665
9666 rtnl_lock();
9667 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9668 rtnl_unlock();
9669 }
9670
9671 #endif
9672 void ixgbe_do_reset(struct net_device *netdev)
9673 {
9674 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9675
9676 if (netif_running(netdev))
9677 ixgbe_reinit_locked(adapter);
9678 else
9679 ixgbe_reset(adapter);
9680 }
9681
9682 static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9683 netdev_features_t features)
9684 {
9685 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9686
9687
9688 if (!(features & NETIF_F_RXCSUM))
9689 features &= ~NETIF_F_LRO;
9690
9691
9692 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9693 features &= ~NETIF_F_LRO;
9694
9695 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9696 e_dev_err("LRO is not supported with XDP\n");
9697 features &= ~NETIF_F_LRO;
9698 }
9699
9700 return features;
9701 }
9702
9703 static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9704 {
9705 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9706 num_online_cpus());
9707
9708
9709 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9710 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9711 IXGBE_FLAG_SRIOV_ENABLED);
9712
9713 adapter->ring_feature[RING_F_RSS].limit = rss;
9714 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9715
9716 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9717 }
9718
9719 static int ixgbe_set_features(struct net_device *netdev,
9720 netdev_features_t features)
9721 {
9722 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9723 netdev_features_t changed = netdev->features ^ features;
9724 bool need_reset = false;
9725
9726
9727 if (!(features & NETIF_F_LRO)) {
9728 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9729 need_reset = true;
9730 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9731 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9732 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9733 if (adapter->rx_itr_setting == 1 ||
9734 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9735 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9736 need_reset = true;
9737 } else if ((changed ^ features) & NETIF_F_LRO) {
9738 e_info(probe, "rx-usecs set too low, "
9739 "disabling RSC\n");
9740 }
9741 }
9742
9743
9744
9745
9746
9747 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9748
9749 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9750 need_reset = true;
9751
9752 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9753 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9754 } else {
9755
9756 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9757 need_reset = true;
9758
9759 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9760
9761
9762 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9763
9764 (adapter->hw_tcs > 1) ||
9765
9766 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9767
9768 (!adapter->atr_sample_rate))
9769 ;
9770 else
9771 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9772 }
9773
9774 if (changed & NETIF_F_RXALL)
9775 need_reset = true;
9776
9777 netdev->features = features;
9778
9779 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9780 if (features & NETIF_F_RXCSUM) {
9781 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9782 } else {
9783 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9784
9785 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9786 }
9787 }
9788
9789 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9790 if (features & NETIF_F_RXCSUM) {
9791 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9792 } else {
9793 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9794
9795 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9796 }
9797 }
9798
9799 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9800 ixgbe_reset_l2fw_offload(adapter);
9801 else if (need_reset)
9802 ixgbe_do_reset(netdev);
9803 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9804 NETIF_F_HW_VLAN_CTAG_FILTER))
9805 ixgbe_set_rx_mode(netdev);
9806
9807 return 1;
9808 }
9809
9810
9811
9812
9813
9814
9815 static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9816 struct udp_tunnel_info *ti)
9817 {
9818 struct ixgbe_adapter *adapter = netdev_priv(dev);
9819 struct ixgbe_hw *hw = &adapter->hw;
9820 __be16 port = ti->port;
9821 u32 port_shift = 0;
9822 u32 reg;
9823
9824 if (ti->sa_family != AF_INET)
9825 return;
9826
9827 switch (ti->type) {
9828 case UDP_TUNNEL_TYPE_VXLAN:
9829 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9830 return;
9831
9832 if (adapter->vxlan_port == port)
9833 return;
9834
9835 if (adapter->vxlan_port) {
9836 netdev_info(dev,
9837 "VXLAN port %d set, not adding port %d\n",
9838 ntohs(adapter->vxlan_port),
9839 ntohs(port));
9840 return;
9841 }
9842
9843 adapter->vxlan_port = port;
9844 break;
9845 case UDP_TUNNEL_TYPE_GENEVE:
9846 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9847 return;
9848
9849 if (adapter->geneve_port == port)
9850 return;
9851
9852 if (adapter->geneve_port) {
9853 netdev_info(dev,
9854 "GENEVE port %d set, not adding port %d\n",
9855 ntohs(adapter->geneve_port),
9856 ntohs(port));
9857 return;
9858 }
9859
9860 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9861 adapter->geneve_port = port;
9862 break;
9863 default:
9864 return;
9865 }
9866
9867 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9868 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9869 }
9870
9871
9872
9873
9874
9875
9876 static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9877 struct udp_tunnel_info *ti)
9878 {
9879 struct ixgbe_adapter *adapter = netdev_priv(dev);
9880 u32 port_mask;
9881
9882 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9883 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9884 return;
9885
9886 if (ti->sa_family != AF_INET)
9887 return;
9888
9889 switch (ti->type) {
9890 case UDP_TUNNEL_TYPE_VXLAN:
9891 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9892 return;
9893
9894 if (adapter->vxlan_port != ti->port) {
9895 netdev_info(dev, "VXLAN port %d not found\n",
9896 ntohs(ti->port));
9897 return;
9898 }
9899
9900 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9901 break;
9902 case UDP_TUNNEL_TYPE_GENEVE:
9903 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9904 return;
9905
9906 if (adapter->geneve_port != ti->port) {
9907 netdev_info(dev, "GENEVE port %d not found\n",
9908 ntohs(ti->port));
9909 return;
9910 }
9911
9912 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9913 break;
9914 default:
9915 return;
9916 }
9917
9918 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9919 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9920 }
9921
9922 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9923 struct net_device *dev,
9924 const unsigned char *addr, u16 vid,
9925 u16 flags,
9926 struct netlink_ext_ack *extack)
9927 {
9928
9929 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9930 struct ixgbe_adapter *adapter = netdev_priv(dev);
9931 u16 pool = VMDQ_P(0);
9932
9933 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9934 return -ENOMEM;
9935 }
9936
9937 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9938 }
9939
9940
9941
9942
9943
9944
9945
9946
9947 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9948 __u16 mode)
9949 {
9950 struct ixgbe_hw *hw = &adapter->hw;
9951 unsigned int p, num_pools;
9952 u32 vmdctl;
9953
9954 switch (mode) {
9955 case BRIDGE_MODE_VEPA:
9956
9957 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9958
9959
9960
9961
9962
9963 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9964 vmdctl |= IXGBE_VT_CTL_REPLEN;
9965 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9966
9967
9968
9969
9970 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9971 for (p = 0; p < num_pools; p++) {
9972 if (hw->mac.ops.set_source_address_pruning)
9973 hw->mac.ops.set_source_address_pruning(hw,
9974 true,
9975 p);
9976 }
9977 break;
9978 case BRIDGE_MODE_VEB:
9979
9980 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9981 IXGBE_PFDTXGSWC_VT_LBEN);
9982
9983
9984
9985
9986 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9987 if (!adapter->num_vfs)
9988 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9989 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9990
9991
9992
9993
9994 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9995 for (p = 0; p < num_pools; p++) {
9996 if (hw->mac.ops.set_source_address_pruning)
9997 hw->mac.ops.set_source_address_pruning(hw,
9998 false,
9999 p);
10000 }
10001 break;
10002 default:
10003 return -EINVAL;
10004 }
10005
10006 adapter->bridge_mode = mode;
10007
10008 e_info(drv, "enabling bridge mode: %s\n",
10009 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10010
10011 return 0;
10012 }
10013
10014 static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
10015 struct nlmsghdr *nlh, u16 flags,
10016 struct netlink_ext_ack *extack)
10017 {
10018 struct ixgbe_adapter *adapter = netdev_priv(dev);
10019 struct nlattr *attr, *br_spec;
10020 int rem;
10021
10022 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10023 return -EOPNOTSUPP;
10024
10025 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10026 if (!br_spec)
10027 return -EINVAL;
10028
10029 nla_for_each_nested(attr, br_spec, rem) {
10030 int status;
10031 __u16 mode;
10032
10033 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10034 continue;
10035
10036 if (nla_len(attr) < sizeof(mode))
10037 return -EINVAL;
10038
10039 mode = nla_get_u16(attr);
10040 status = ixgbe_configure_bridge_mode(adapter, mode);
10041 if (status)
10042 return status;
10043
10044 break;
10045 }
10046
10047 return 0;
10048 }
10049
10050 static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10051 struct net_device *dev,
10052 u32 filter_mask, int nlflags)
10053 {
10054 struct ixgbe_adapter *adapter = netdev_priv(dev);
10055
10056 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10057 return 0;
10058
10059 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
10060 adapter->bridge_mode, 0, 0, nlflags,
10061 filter_mask, NULL);
10062 }
10063
10064 static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
10065 {
10066 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10067 struct ixgbe_fwd_adapter *accel;
10068 int tcs = adapter->hw_tcs ? : 1;
10069 int pool, err;
10070
10071 if (adapter->xdp_prog) {
10072 e_warn(probe, "L2FW offload is not supported with XDP\n");
10073 return ERR_PTR(-EINVAL);
10074 }
10075
10076
10077
10078
10079
10080 if (!macvlan_supports_dest_filter(vdev))
10081 return ERR_PTR(-EMEDIUMTYPE);
10082
10083
10084
10085
10086
10087 if (netif_is_multiqueue(vdev))
10088 return ERR_PTR(-ERANGE);
10089
10090 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
10091 if (pool == adapter->num_rx_pools) {
10092 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
10093 u16 reserved_pools;
10094
10095 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
10096 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
10097 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
10098 return ERR_PTR(-EBUSY);
10099
10100
10101
10102
10103
10104 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
10105 return ERR_PTR(-EBUSY);
10106
10107
10108 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
10109 IXGBE_FLAG_SRIOV_ENABLED;
10110
10111
10112
10113
10114
10115 if (used_pools < 32 && adapter->num_rx_pools < 16)
10116 reserved_pools = min_t(u16,
10117 32 - used_pools,
10118 16 - adapter->num_rx_pools);
10119 else if (adapter->num_rx_pools < 32)
10120 reserved_pools = min_t(u16,
10121 64 - used_pools,
10122 32 - adapter->num_rx_pools);
10123 else
10124 reserved_pools = 64 - used_pools;
10125
10126
10127 if (!reserved_pools)
10128 return ERR_PTR(-EBUSY);
10129
10130 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10131
10132
10133 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10134 if (err)
10135 return ERR_PTR(err);
10136
10137 if (pool >= adapter->num_rx_pools)
10138 return ERR_PTR(-ENOMEM);
10139 }
10140
10141 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10142 if (!accel)
10143 return ERR_PTR(-ENOMEM);
10144
10145 set_bit(pool, adapter->fwd_bitmask);
10146 netdev_set_sb_channel(vdev, pool);
10147 accel->pool = pool;
10148 accel->netdev = vdev;
10149
10150 if (!netif_running(pdev))
10151 return accel;
10152
10153 err = ixgbe_fwd_ring_up(adapter, accel);
10154 if (err)
10155 return ERR_PTR(err);
10156
10157 return accel;
10158 }
10159
10160 static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10161 {
10162 struct ixgbe_fwd_adapter *accel = priv;
10163 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10164 unsigned int rxbase = accel->rx_base_queue;
10165 unsigned int i;
10166
10167
10168 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10169 VMDQ_P(accel->pool));
10170
10171
10172
10173
10174 usleep_range(10000, 20000);
10175
10176 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10177 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10178 struct ixgbe_q_vector *qv = ring->q_vector;
10179
10180
10181
10182
10183 if (netif_running(adapter->netdev))
10184 napi_synchronize(&qv->napi);
10185 ring->netdev = NULL;
10186 }
10187
10188
10189 netdev_unbind_sb_channel(pdev, accel->netdev);
10190 netdev_set_sb_channel(accel->netdev, 0);
10191
10192 clear_bit(accel->pool, adapter->fwd_bitmask);
10193 kfree(accel);
10194 }
10195
10196 #define IXGBE_MAX_MAC_HDR_LEN 127
10197 #define IXGBE_MAX_NETWORK_HDR_LEN 511
10198
10199 static netdev_features_t
10200 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10201 netdev_features_t features)
10202 {
10203 unsigned int network_hdr_len, mac_hdr_len;
10204
10205
10206 mac_hdr_len = skb_network_header(skb) - skb->data;
10207 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10208 return features & ~(NETIF_F_HW_CSUM |
10209 NETIF_F_SCTP_CRC |
10210 NETIF_F_HW_VLAN_CTAG_TX |
10211 NETIF_F_TSO |
10212 NETIF_F_TSO6);
10213
10214 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10215 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10216 return features & ~(NETIF_F_HW_CSUM |
10217 NETIF_F_SCTP_CRC |
10218 NETIF_F_TSO |
10219 NETIF_F_TSO6);
10220
10221
10222
10223
10224
10225
10226 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10227 #ifdef CONFIG_IXGBE_IPSEC
10228 if (!secpath_exists(skb))
10229 #endif
10230 features &= ~NETIF_F_TSO;
10231 }
10232
10233 return features;
10234 }
10235
10236 static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10237 {
10238 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10239 struct ixgbe_adapter *adapter = netdev_priv(dev);
10240 struct bpf_prog *old_prog;
10241 bool need_reset;
10242
10243 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10244 return -EINVAL;
10245
10246 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10247 return -EINVAL;
10248
10249
10250 for (i = 0; i < adapter->num_rx_queues; i++) {
10251 struct ixgbe_ring *ring = adapter->rx_ring[i];
10252
10253 if (ring_is_rsc_enabled(ring))
10254 return -EINVAL;
10255
10256 if (frame_size > ixgbe_rx_bufsz(ring))
10257 return -EINVAL;
10258 }
10259
10260 if (nr_cpu_ids > MAX_XDP_QUEUES)
10261 return -ENOMEM;
10262
10263 old_prog = xchg(&adapter->xdp_prog, prog);
10264 need_reset = (!!prog != !!old_prog);
10265
10266
10267 if (need_reset) {
10268 int err;
10269
10270 if (!prog)
10271
10272 synchronize_rcu();
10273 err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10274
10275 if (err) {
10276 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10277 return -EINVAL;
10278 }
10279 } else {
10280 for (i = 0; i < adapter->num_rx_queues; i++)
10281 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10282 adapter->xdp_prog);
10283 }
10284
10285 if (old_prog)
10286 bpf_prog_put(old_prog);
10287
10288
10289
10290
10291 if (need_reset && prog)
10292 for (i = 0; i < adapter->num_rx_queues; i++)
10293 if (adapter->xdp_ring[i]->xsk_umem)
10294 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10295 XDP_WAKEUP_RX);
10296
10297 return 0;
10298 }
10299
10300 static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10301 {
10302 struct ixgbe_adapter *adapter = netdev_priv(dev);
10303
10304 switch (xdp->command) {
10305 case XDP_SETUP_PROG:
10306 return ixgbe_xdp_setup(dev, xdp->prog);
10307 case XDP_QUERY_PROG:
10308 xdp->prog_id = adapter->xdp_prog ?
10309 adapter->xdp_prog->aux->id : 0;
10310 return 0;
10311 case XDP_SETUP_XSK_UMEM:
10312 return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
10313 xdp->xsk.queue_id);
10314
10315 default:
10316 return -EINVAL;
10317 }
10318 }
10319
10320 void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10321 {
10322
10323
10324
10325 wmb();
10326 writel(ring->next_to_use, ring->tail);
10327 }
10328
10329 static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10330 struct xdp_frame **frames, u32 flags)
10331 {
10332 struct ixgbe_adapter *adapter = netdev_priv(dev);
10333 struct ixgbe_ring *ring;
10334 int drops = 0;
10335 int i;
10336
10337 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10338 return -ENETDOWN;
10339
10340 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10341 return -EINVAL;
10342
10343
10344
10345
10346 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10347 if (unlikely(!ring))
10348 return -ENXIO;
10349
10350 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10351 return -ENXIO;
10352
10353 for (i = 0; i < n; i++) {
10354 struct xdp_frame *xdpf = frames[i];
10355 int err;
10356
10357 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10358 if (err != IXGBE_XDP_TX) {
10359 xdp_return_frame_rx_napi(xdpf);
10360 drops++;
10361 }
10362 }
10363
10364 if (unlikely(flags & XDP_XMIT_FLUSH))
10365 ixgbe_xdp_ring_update_tail(ring);
10366
10367 return n - drops;
10368 }
10369
10370 static const struct net_device_ops ixgbe_netdev_ops = {
10371 .ndo_open = ixgbe_open,
10372 .ndo_stop = ixgbe_close,
10373 .ndo_start_xmit = ixgbe_xmit_frame,
10374 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10375 .ndo_validate_addr = eth_validate_addr,
10376 .ndo_set_mac_address = ixgbe_set_mac,
10377 .ndo_change_mtu = ixgbe_change_mtu,
10378 .ndo_tx_timeout = ixgbe_tx_timeout,
10379 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10380 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10381 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10382 .ndo_do_ioctl = ixgbe_ioctl,
10383 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10384 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10385 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10386 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10387 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10388 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10389 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10390 .ndo_get_stats64 = ixgbe_get_stats64,
10391 .ndo_setup_tc = __ixgbe_setup_tc,
10392 #ifdef IXGBE_FCOE
10393 .ndo_select_queue = ixgbe_select_queue,
10394 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10395 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10396 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10397 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10398 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10399 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10400 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10401 #endif
10402 .ndo_set_features = ixgbe_set_features,
10403 .ndo_fix_features = ixgbe_fix_features,
10404 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10405 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10406 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10407 .ndo_dfwd_add_station = ixgbe_fwd_add,
10408 .ndo_dfwd_del_station = ixgbe_fwd_del,
10409 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10410 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10411 .ndo_features_check = ixgbe_features_check,
10412 .ndo_bpf = ixgbe_xdp,
10413 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10414 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10415 };
10416
10417 static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10418 struct ixgbe_ring *tx_ring)
10419 {
10420 unsigned long wait_delay, delay_interval;
10421 struct ixgbe_hw *hw = &adapter->hw;
10422 u8 reg_idx = tx_ring->reg_idx;
10423 int wait_loop;
10424 u32 txdctl;
10425
10426 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10427
10428
10429 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10430
10431 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10432 wait_delay = delay_interval;
10433
10434 while (wait_loop--) {
10435 usleep_range(wait_delay, wait_delay + 10);
10436 wait_delay += delay_interval * 2;
10437 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10438
10439 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10440 return;
10441 }
10442
10443 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10444 }
10445
10446 static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10447 struct ixgbe_ring *tx_ring)
10448 {
10449 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10450 ixgbe_disable_txr_hw(adapter, tx_ring);
10451 }
10452
10453 static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10454 struct ixgbe_ring *rx_ring)
10455 {
10456 unsigned long wait_delay, delay_interval;
10457 struct ixgbe_hw *hw = &adapter->hw;
10458 u8 reg_idx = rx_ring->reg_idx;
10459 int wait_loop;
10460 u32 rxdctl;
10461
10462 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10463 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10464 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10465
10466
10467 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10468
10469
10470 if (hw->mac.type == ixgbe_mac_82598EB &&
10471 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10472 return;
10473
10474
10475 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10476
10477 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10478 wait_delay = delay_interval;
10479
10480 while (wait_loop--) {
10481 usleep_range(wait_delay, wait_delay + 10);
10482 wait_delay += delay_interval * 2;
10483 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10484
10485 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10486 return;
10487 }
10488
10489 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10490 }
10491
10492 static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10493 {
10494 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10495 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10496 }
10497
10498 static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10499 {
10500 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10501 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10502 }
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512 void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10513 {
10514 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10515
10516 rx_ring = adapter->rx_ring[ring];
10517 tx_ring = adapter->tx_ring[ring];
10518 xdp_ring = adapter->xdp_ring[ring];
10519
10520 ixgbe_disable_txr(adapter, tx_ring);
10521 if (xdp_ring)
10522 ixgbe_disable_txr(adapter, xdp_ring);
10523 ixgbe_disable_rxr_hw(adapter, rx_ring);
10524
10525 if (xdp_ring)
10526 synchronize_rcu();
10527
10528
10529 napi_disable(&rx_ring->q_vector->napi);
10530
10531 ixgbe_clean_tx_ring(tx_ring);
10532 if (xdp_ring)
10533 ixgbe_clean_tx_ring(xdp_ring);
10534 ixgbe_clean_rx_ring(rx_ring);
10535
10536 ixgbe_reset_txr_stats(tx_ring);
10537 if (xdp_ring)
10538 ixgbe_reset_txr_stats(xdp_ring);
10539 ixgbe_reset_rxr_stats(rx_ring);
10540 }
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550 void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10551 {
10552 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10553
10554 rx_ring = adapter->rx_ring[ring];
10555 tx_ring = adapter->tx_ring[ring];
10556 xdp_ring = adapter->xdp_ring[ring];
10557
10558
10559 napi_enable(&rx_ring->q_vector->napi);
10560
10561 ixgbe_configure_tx_ring(adapter, tx_ring);
10562 if (xdp_ring)
10563 ixgbe_configure_tx_ring(adapter, xdp_ring);
10564 ixgbe_configure_rx_ring(adapter, rx_ring);
10565
10566 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10567 if (xdp_ring)
10568 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10569 }
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10581 {
10582 struct pci_dev *entry, *pdev = adapter->pdev;
10583 int physfns = 0;
10584
10585
10586
10587
10588
10589 if (ixgbe_pcie_from_parent(&adapter->hw))
10590 physfns = 4;
10591
10592 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10593
10594 if (entry->is_virtfn)
10595 continue;
10596
10597
10598
10599
10600
10601
10602
10603 if ((entry->vendor != pdev->vendor) ||
10604 (entry->device != pdev->device))
10605 return -1;
10606
10607 physfns++;
10608 }
10609
10610 return physfns;
10611 }
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10624 u16 subdevice_id)
10625 {
10626 struct ixgbe_hw *hw = &adapter->hw;
10627 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10628
10629
10630 if (hw->mac.type == ixgbe_mac_82598EB)
10631 return false;
10632
10633
10634 if (hw->mac.type >= ixgbe_mac_X540) {
10635 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10636 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10637 (hw->bus.func == 0)))
10638 return true;
10639 }
10640
10641
10642 switch (device_id) {
10643 case IXGBE_DEV_ID_82599_SFP:
10644
10645 switch (subdevice_id) {
10646 case IXGBE_SUBDEV_ID_82599_560FLR:
10647 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10648 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10649 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10650
10651 if (hw->bus.func != 0)
10652 break;
10653
10654 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10655 case IXGBE_SUBDEV_ID_82599_SFP:
10656 case IXGBE_SUBDEV_ID_82599_RNDC:
10657 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10658 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10659 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10660 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10661 return true;
10662 }
10663 break;
10664 case IXGBE_DEV_ID_82599EN_SFP:
10665
10666 switch (subdevice_id) {
10667 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10668 return true;
10669 }
10670 break;
10671 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10672
10673 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10674 return true;
10675 break;
10676 case IXGBE_DEV_ID_82599_KX4:
10677 return true;
10678 default:
10679 break;
10680 }
10681
10682 return false;
10683 }
10684
10685
10686
10687
10688
10689
10690
10691
10692 static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10693 {
10694 struct ixgbe_hw *hw = &adapter->hw;
10695 struct ixgbe_nvm_version nvm_ver;
10696
10697 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10698 if (nvm_ver.oem_valid) {
10699 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10700 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10701 nvm_ver.oem_release);
10702 return;
10703 }
10704
10705 ixgbe_get_etk_id(hw, &nvm_ver);
10706 ixgbe_get_orom_version(hw, &nvm_ver);
10707
10708 if (nvm_ver.or_valid) {
10709 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10710 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10711 nvm_ver.or_build, nvm_ver.or_patch);
10712 return;
10713 }
10714
10715
10716 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10717 "0x%08x", nvm_ver.etk_id);
10718 }
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731 static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10732 {
10733 struct net_device *netdev;
10734 struct ixgbe_adapter *adapter = NULL;
10735 struct ixgbe_hw *hw;
10736 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10737 int i, err, pci_using_dac, expected_gts;
10738 unsigned int indices = MAX_TX_QUEUES;
10739 u8 part_str[IXGBE_PBANUM_LENGTH];
10740 bool disable_dev = false;
10741 #ifdef IXGBE_FCOE
10742 u16 device_caps;
10743 #endif
10744 u32 eec;
10745
10746
10747
10748
10749 if (pdev->is_virtfn) {
10750 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10751 pci_name(pdev), pdev->vendor, pdev->device);
10752 return -EINVAL;
10753 }
10754
10755 err = pci_enable_device_mem(pdev);
10756 if (err)
10757 return err;
10758
10759 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10760 pci_using_dac = 1;
10761 } else {
10762 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10763 if (err) {
10764 dev_err(&pdev->dev,
10765 "No usable DMA configuration, aborting\n");
10766 goto err_dma;
10767 }
10768 pci_using_dac = 0;
10769 }
10770
10771 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10772 if (err) {
10773 dev_err(&pdev->dev,
10774 "pci_request_selected_regions failed 0x%x\n", err);
10775 goto err_pci_reg;
10776 }
10777
10778 pci_enable_pcie_error_reporting(pdev);
10779
10780 pci_set_master(pdev);
10781 pci_save_state(pdev);
10782
10783 if (ii->mac == ixgbe_mac_82598EB) {
10784 #ifdef CONFIG_IXGBE_DCB
10785
10786 indices = 4 * MAX_TRAFFIC_CLASS;
10787 #else
10788 indices = IXGBE_MAX_RSS_INDICES;
10789 #endif
10790 }
10791
10792 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10793 if (!netdev) {
10794 err = -ENOMEM;
10795 goto err_alloc_etherdev;
10796 }
10797
10798 SET_NETDEV_DEV(netdev, &pdev->dev);
10799
10800 adapter = netdev_priv(netdev);
10801
10802 adapter->netdev = netdev;
10803 adapter->pdev = pdev;
10804 hw = &adapter->hw;
10805 hw->back = adapter;
10806 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10807
10808 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10809 pci_resource_len(pdev, 0));
10810 adapter->io_addr = hw->hw_addr;
10811 if (!hw->hw_addr) {
10812 err = -EIO;
10813 goto err_ioremap;
10814 }
10815
10816 netdev->netdev_ops = &ixgbe_netdev_ops;
10817 ixgbe_set_ethtool_ops(netdev);
10818 netdev->watchdog_timeo = 5 * HZ;
10819 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10820
10821
10822 hw->mac.ops = *ii->mac_ops;
10823 hw->mac.type = ii->mac;
10824 hw->mvals = ii->mvals;
10825 if (ii->link_ops)
10826 hw->link.ops = *ii->link_ops;
10827
10828
10829 hw->eeprom.ops = *ii->eeprom_ops;
10830 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10831 if (ixgbe_removed(hw->hw_addr)) {
10832 err = -EIO;
10833 goto err_ioremap;
10834 }
10835
10836 if (!(eec & BIT(8)))
10837 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10838
10839
10840 hw->phy.ops = *ii->phy_ops;
10841 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10842
10843 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10844 hw->phy.mdio.mmds = 0;
10845 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10846 hw->phy.mdio.dev = netdev;
10847 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10848 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10849
10850
10851 err = ixgbe_sw_init(adapter, ii);
10852 if (err)
10853 goto err_sw_init;
10854
10855
10856 if (hw->mac.ops.init_swfw_sync)
10857 hw->mac.ops.init_swfw_sync(hw);
10858
10859
10860 switch (adapter->hw.mac.type) {
10861 case ixgbe_mac_82599EB:
10862 case ixgbe_mac_X540:
10863 case ixgbe_mac_X550:
10864 case ixgbe_mac_X550EM_x:
10865 case ixgbe_mac_x550em_a:
10866 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10867 break;
10868 default:
10869 break;
10870 }
10871
10872
10873
10874
10875
10876 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10877 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10878 if (esdp & IXGBE_ESDP_SDP1)
10879 e_crit(probe, "Fan has stopped, replace the adapter\n");
10880 }
10881
10882 if (allow_unsupported_sfp)
10883 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10884
10885
10886 hw->phy.reset_if_overtemp = true;
10887 err = hw->mac.ops.reset_hw(hw);
10888 hw->phy.reset_if_overtemp = false;
10889 ixgbe_set_eee_capable(adapter);
10890 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10891 err = 0;
10892 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10893 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10894 e_dev_err("Reload the driver after installing a supported module.\n");
10895 goto err_sw_init;
10896 } else if (err) {
10897 e_dev_err("HW Init failed: %d\n", err);
10898 goto err_sw_init;
10899 }
10900
10901 #ifdef CONFIG_PCI_IOV
10902
10903 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10904 goto skip_sriov;
10905
10906 ixgbe_init_mbx_params_pf(hw);
10907 hw->mbx.ops = ii->mbx_ops;
10908 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10909 ixgbe_enable_sriov(adapter, max_vfs);
10910 skip_sriov:
10911
10912 #endif
10913 netdev->features = NETIF_F_SG |
10914 NETIF_F_TSO |
10915 NETIF_F_TSO6 |
10916 NETIF_F_RXHASH |
10917 NETIF_F_RXCSUM |
10918 NETIF_F_HW_CSUM;
10919
10920 #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10921 NETIF_F_GSO_GRE_CSUM | \
10922 NETIF_F_GSO_IPXIP4 | \
10923 NETIF_F_GSO_IPXIP6 | \
10924 NETIF_F_GSO_UDP_TUNNEL | \
10925 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10926
10927 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10928 netdev->features |= NETIF_F_GSO_PARTIAL |
10929 IXGBE_GSO_PARTIAL_FEATURES;
10930
10931 if (hw->mac.type >= ixgbe_mac_82599EB)
10932 netdev->features |= NETIF_F_SCTP_CRC;
10933
10934 #ifdef CONFIG_IXGBE_IPSEC
10935 #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10936 NETIF_F_HW_ESP_TX_CSUM | \
10937 NETIF_F_GSO_ESP)
10938
10939 if (adapter->ipsec)
10940 netdev->features |= IXGBE_ESP_FEATURES;
10941 #endif
10942
10943 netdev->hw_features |= netdev->features |
10944 NETIF_F_HW_VLAN_CTAG_FILTER |
10945 NETIF_F_HW_VLAN_CTAG_RX |
10946 NETIF_F_HW_VLAN_CTAG_TX |
10947 NETIF_F_RXALL |
10948 NETIF_F_HW_L2FW_DOFFLOAD;
10949
10950 if (hw->mac.type >= ixgbe_mac_82599EB)
10951 netdev->hw_features |= NETIF_F_NTUPLE |
10952 NETIF_F_HW_TC;
10953
10954 if (pci_using_dac)
10955 netdev->features |= NETIF_F_HIGHDMA;
10956
10957 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10958 netdev->hw_enc_features |= netdev->vlan_features;
10959 netdev->mpls_features |= NETIF_F_SG |
10960 NETIF_F_TSO |
10961 NETIF_F_TSO6 |
10962 NETIF_F_HW_CSUM;
10963 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10964
10965
10966 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10967 NETIF_F_HW_VLAN_CTAG_RX |
10968 NETIF_F_HW_VLAN_CTAG_TX;
10969
10970 netdev->priv_flags |= IFF_UNICAST_FLT;
10971 netdev->priv_flags |= IFF_SUPP_NOFCS;
10972
10973
10974 netdev->min_mtu = ETH_MIN_MTU;
10975 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10976
10977 #ifdef CONFIG_IXGBE_DCB
10978 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10979 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10980 #endif
10981
10982 #ifdef IXGBE_FCOE
10983 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10984 unsigned int fcoe_l;
10985
10986 if (hw->mac.ops.get_device_caps) {
10987 hw->mac.ops.get_device_caps(hw, &device_caps);
10988 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10989 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10990 }
10991
10992
10993 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10994 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10995
10996 netdev->features |= NETIF_F_FSO |
10997 NETIF_F_FCOE_CRC;
10998
10999 netdev->vlan_features |= NETIF_F_FSO |
11000 NETIF_F_FCOE_CRC |
11001 NETIF_F_FCOE_MTU;
11002 }
11003 #endif
11004 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
11005 netdev->hw_features |= NETIF_F_LRO;
11006 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
11007 netdev->features |= NETIF_F_LRO;
11008
11009 if (ixgbe_check_fw_error(adapter)) {
11010 err = -EIO;
11011 goto err_sw_init;
11012 }
11013
11014
11015 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
11016 e_dev_err("The EEPROM Checksum Is Not Valid\n");
11017 err = -EIO;
11018 goto err_sw_init;
11019 }
11020
11021 eth_platform_get_mac_address(&adapter->pdev->dev,
11022 adapter->hw.mac.perm_addr);
11023
11024 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
11025
11026 if (!is_valid_ether_addr(netdev->dev_addr)) {
11027 e_dev_err("invalid MAC address\n");
11028 err = -EIO;
11029 goto err_sw_init;
11030 }
11031
11032
11033 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
11034 ixgbe_mac_set_default_filter(adapter);
11035
11036 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
11037
11038 if (ixgbe_removed(hw->hw_addr)) {
11039 err = -EIO;
11040 goto err_sw_init;
11041 }
11042 INIT_WORK(&adapter->service_task, ixgbe_service_task);
11043 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
11044 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
11045
11046 err = ixgbe_init_interrupt_scheme(adapter);
11047 if (err)
11048 goto err_sw_init;
11049
11050 for (i = 0; i < adapter->num_rx_queues; i++)
11051 u64_stats_init(&adapter->rx_ring[i]->syncp);
11052 for (i = 0; i < adapter->num_tx_queues; i++)
11053 u64_stats_init(&adapter->tx_ring[i]->syncp);
11054 for (i = 0; i < adapter->num_xdp_queues; i++)
11055 u64_stats_init(&adapter->xdp_ring[i]->syncp);
11056
11057
11058 adapter->wol = 0;
11059 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
11060 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
11061 pdev->subsystem_device);
11062 if (hw->wol_enabled)
11063 adapter->wol = IXGBE_WUFC_MAG;
11064
11065 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
11066
11067
11068 ixgbe_set_fw_version(adapter);
11069
11070
11071 if (ixgbe_pcie_from_parent(hw))
11072 ixgbe_get_parent_bus_info(adapter);
11073 else
11074 hw->mac.ops.get_bus_info(hw);
11075
11076
11077
11078
11079
11080
11081 switch (hw->mac.type) {
11082 case ixgbe_mac_82598EB:
11083 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
11084 break;
11085 default:
11086 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
11087 break;
11088 }
11089
11090
11091 if (expected_gts > 0)
11092 ixgbe_check_minimum_link(adapter, expected_gts);
11093
11094 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
11095 if (err)
11096 strlcpy(part_str, "Unknown", sizeof(part_str));
11097 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
11098 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
11099 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
11100 part_str);
11101 else
11102 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
11103 hw->mac.type, hw->phy.type, part_str);
11104
11105 e_dev_info("%pM\n", netdev->dev_addr);
11106
11107
11108 err = hw->mac.ops.start_hw(hw);
11109 if (err == IXGBE_ERR_EEPROM_VERSION) {
11110
11111 e_dev_warn("This device is a pre-production adapter/LOM. "
11112 "Please be aware there may be issues associated "
11113 "with your hardware. If you are experiencing "
11114 "problems please contact your Intel or hardware "
11115 "representative who provided you with this "
11116 "hardware.\n");
11117 }
11118 strcpy(netdev->name, "eth%d");
11119 pci_set_drvdata(pdev, adapter);
11120 err = register_netdev(netdev);
11121 if (err)
11122 goto err_register;
11123
11124
11125
11126 if (hw->mac.ops.disable_tx_laser)
11127 hw->mac.ops.disable_tx_laser(hw);
11128
11129
11130 netif_carrier_off(netdev);
11131
11132 #ifdef CONFIG_IXGBE_DCA
11133 if (dca_add_requester(&pdev->dev) == 0) {
11134 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11135 ixgbe_setup_dca(adapter);
11136 }
11137 #endif
11138 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11139 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11140 for (i = 0; i < adapter->num_vfs; i++)
11141 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11142 }
11143
11144
11145
11146
11147 if (hw->mac.ops.set_fw_drv_ver)
11148 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11149 sizeof(ixgbe_driver_version) - 1,
11150 ixgbe_driver_version);
11151
11152
11153 ixgbe_add_sanmac_netdev(netdev);
11154
11155 e_dev_info("%s\n", ixgbe_default_device_descr);
11156
11157 #ifdef CONFIG_IXGBE_HWMON
11158 if (ixgbe_sysfs_init(adapter))
11159 e_err(probe, "failed to allocate sysfs resources\n");
11160 #endif
11161
11162 ixgbe_dbg_adapter_init(adapter);
11163
11164
11165 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11166 hw->mac.ops.setup_link(hw,
11167 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11168 true);
11169
11170 ixgbe_mii_bus_init(hw);
11171
11172 return 0;
11173
11174 err_register:
11175 ixgbe_release_hw_control(adapter);
11176 ixgbe_clear_interrupt_scheme(adapter);
11177 err_sw_init:
11178 ixgbe_disable_sriov(adapter);
11179 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11180 iounmap(adapter->io_addr);
11181 kfree(adapter->jump_tables[0]);
11182 kfree(adapter->mac_table);
11183 kfree(adapter->rss_key);
11184 bitmap_free(adapter->af_xdp_zc_qps);
11185 err_ioremap:
11186 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11187 free_netdev(netdev);
11188 err_alloc_etherdev:
11189 pci_release_mem_regions(pdev);
11190 err_pci_reg:
11191 err_dma:
11192 if (!adapter || disable_dev)
11193 pci_disable_device(pdev);
11194 return err;
11195 }
11196
11197
11198
11199
11200
11201
11202
11203
11204
11205
11206 static void ixgbe_remove(struct pci_dev *pdev)
11207 {
11208 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11209 struct net_device *netdev;
11210 bool disable_dev;
11211 int i;
11212
11213
11214 if (!adapter)
11215 return;
11216
11217 netdev = adapter->netdev;
11218 ixgbe_dbg_adapter_exit(adapter);
11219
11220 set_bit(__IXGBE_REMOVING, &adapter->state);
11221 cancel_work_sync(&adapter->service_task);
11222
11223 if (adapter->mii_bus)
11224 mdiobus_unregister(adapter->mii_bus);
11225
11226 #ifdef CONFIG_IXGBE_DCA
11227 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11228 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11229 dca_remove_requester(&pdev->dev);
11230 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11231 IXGBE_DCA_CTRL_DCA_DISABLE);
11232 }
11233
11234 #endif
11235 #ifdef CONFIG_IXGBE_HWMON
11236 ixgbe_sysfs_exit(adapter);
11237 #endif
11238
11239
11240 ixgbe_del_sanmac_netdev(netdev);
11241
11242 #ifdef CONFIG_PCI_IOV
11243 ixgbe_disable_sriov(adapter);
11244 #endif
11245 if (netdev->reg_state == NETREG_REGISTERED)
11246 unregister_netdev(netdev);
11247
11248 ixgbe_stop_ipsec_offload(adapter);
11249 ixgbe_clear_interrupt_scheme(adapter);
11250
11251 ixgbe_release_hw_control(adapter);
11252
11253 #ifdef CONFIG_DCB
11254 kfree(adapter->ixgbe_ieee_pfc);
11255 kfree(adapter->ixgbe_ieee_ets);
11256
11257 #endif
11258 iounmap(adapter->io_addr);
11259 pci_release_mem_regions(pdev);
11260
11261 e_dev_info("complete\n");
11262
11263 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11264 if (adapter->jump_tables[i]) {
11265 kfree(adapter->jump_tables[i]->input);
11266 kfree(adapter->jump_tables[i]->mask);
11267 }
11268 kfree(adapter->jump_tables[i]);
11269 }
11270
11271 kfree(adapter->mac_table);
11272 kfree(adapter->rss_key);
11273 bitmap_free(adapter->af_xdp_zc_qps);
11274 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11275 free_netdev(netdev);
11276
11277 pci_disable_pcie_error_reporting(pdev);
11278
11279 if (disable_dev)
11280 pci_disable_device(pdev);
11281 }
11282
11283
11284
11285
11286
11287
11288
11289
11290
11291 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11292 pci_channel_state_t state)
11293 {
11294 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11295 struct net_device *netdev = adapter->netdev;
11296
11297 #ifdef CONFIG_PCI_IOV
11298 struct ixgbe_hw *hw = &adapter->hw;
11299 struct pci_dev *bdev, *vfdev;
11300 u32 dw0, dw1, dw2, dw3;
11301 int vf, pos;
11302 u16 req_id, pf_func;
11303
11304 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11305 adapter->num_vfs == 0)
11306 goto skip_bad_vf_detection;
11307
11308 bdev = pdev->bus->self;
11309 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11310 bdev = bdev->bus->self;
11311
11312 if (!bdev)
11313 goto skip_bad_vf_detection;
11314
11315 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11316 if (!pos)
11317 goto skip_bad_vf_detection;
11318
11319 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11320 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11321 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11322 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11323 if (ixgbe_removed(hw->hw_addr))
11324 goto skip_bad_vf_detection;
11325
11326 req_id = dw1 >> 16;
11327
11328 if (!(req_id & 0x0080))
11329 goto skip_bad_vf_detection;
11330
11331 pf_func = req_id & 0x01;
11332 if ((pf_func & 1) == (pdev->devfn & 1)) {
11333 unsigned int device_id;
11334
11335 vf = (req_id & 0x7F) >> 1;
11336 e_dev_err("VF %d has caused a PCIe error\n", vf);
11337 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11338 "%8.8x\tdw3: %8.8x\n",
11339 dw0, dw1, dw2, dw3);
11340 switch (adapter->hw.mac.type) {
11341 case ixgbe_mac_82599EB:
11342 device_id = IXGBE_82599_VF_DEVICE_ID;
11343 break;
11344 case ixgbe_mac_X540:
11345 device_id = IXGBE_X540_VF_DEVICE_ID;
11346 break;
11347 case ixgbe_mac_X550:
11348 device_id = IXGBE_DEV_ID_X550_VF;
11349 break;
11350 case ixgbe_mac_X550EM_x:
11351 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11352 break;
11353 case ixgbe_mac_x550em_a:
11354 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11355 break;
11356 default:
11357 device_id = 0;
11358 break;
11359 }
11360
11361
11362 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11363 while (vfdev) {
11364 if (vfdev->devfn == (req_id & 0xFF))
11365 break;
11366 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11367 device_id, vfdev);
11368 }
11369
11370
11371
11372
11373
11374 if (vfdev) {
11375 pcie_flr(vfdev);
11376
11377 pci_dev_put(vfdev);
11378 }
11379 }
11380
11381
11382
11383
11384
11385
11386
11387 adapter->vferr_refcount++;
11388
11389 return PCI_ERS_RESULT_RECOVERED;
11390
11391 skip_bad_vf_detection:
11392 #endif
11393 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11394 return PCI_ERS_RESULT_DISCONNECT;
11395
11396 if (!netif_device_present(netdev))
11397 return PCI_ERS_RESULT_DISCONNECT;
11398
11399 rtnl_lock();
11400 netif_device_detach(netdev);
11401
11402 if (netif_running(netdev))
11403 ixgbe_close_suspend(adapter);
11404
11405 if (state == pci_channel_io_perm_failure) {
11406 rtnl_unlock();
11407 return PCI_ERS_RESULT_DISCONNECT;
11408 }
11409
11410 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11411 pci_disable_device(pdev);
11412 rtnl_unlock();
11413
11414
11415 return PCI_ERS_RESULT_NEED_RESET;
11416 }
11417
11418
11419
11420
11421
11422
11423
11424 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11425 {
11426 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11427 pci_ers_result_t result;
11428
11429 if (pci_enable_device_mem(pdev)) {
11430 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11431 result = PCI_ERS_RESULT_DISCONNECT;
11432 } else {
11433 smp_mb__before_atomic();
11434 clear_bit(__IXGBE_DISABLED, &adapter->state);
11435 adapter->hw.hw_addr = adapter->io_addr;
11436 pci_set_master(pdev);
11437 pci_restore_state(pdev);
11438 pci_save_state(pdev);
11439
11440 pci_wake_from_d3(pdev, false);
11441
11442 ixgbe_reset(adapter);
11443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11444 result = PCI_ERS_RESULT_RECOVERED;
11445 }
11446
11447 return result;
11448 }
11449
11450
11451
11452
11453
11454
11455
11456
11457 static void ixgbe_io_resume(struct pci_dev *pdev)
11458 {
11459 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11460 struct net_device *netdev = adapter->netdev;
11461
11462 #ifdef CONFIG_PCI_IOV
11463 if (adapter->vferr_refcount) {
11464 e_info(drv, "Resuming after VF err\n");
11465 adapter->vferr_refcount--;
11466 return;
11467 }
11468
11469 #endif
11470 rtnl_lock();
11471 if (netif_running(netdev))
11472 ixgbe_open(netdev);
11473
11474 netif_device_attach(netdev);
11475 rtnl_unlock();
11476 }
11477
11478 static const struct pci_error_handlers ixgbe_err_handler = {
11479 .error_detected = ixgbe_io_error_detected,
11480 .slot_reset = ixgbe_io_slot_reset,
11481 .resume = ixgbe_io_resume,
11482 };
11483
11484 static struct pci_driver ixgbe_driver = {
11485 .name = ixgbe_driver_name,
11486 .id_table = ixgbe_pci_tbl,
11487 .probe = ixgbe_probe,
11488 .remove = ixgbe_remove,
11489 #ifdef CONFIG_PM
11490 .suspend = ixgbe_suspend,
11491 .resume = ixgbe_resume,
11492 #endif
11493 .shutdown = ixgbe_shutdown,
11494 .sriov_configure = ixgbe_pci_sriov_configure,
11495 .err_handler = &ixgbe_err_handler
11496 };
11497
11498
11499
11500
11501
11502
11503
11504 static int __init ixgbe_init_module(void)
11505 {
11506 int ret;
11507 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11508 pr_info("%s\n", ixgbe_copyright);
11509
11510 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11511 if (!ixgbe_wq) {
11512 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11513 return -ENOMEM;
11514 }
11515
11516 ixgbe_dbg_init();
11517
11518 ret = pci_register_driver(&ixgbe_driver);
11519 if (ret) {
11520 destroy_workqueue(ixgbe_wq);
11521 ixgbe_dbg_exit();
11522 return ret;
11523 }
11524
11525 #ifdef CONFIG_IXGBE_DCA
11526 dca_register_notify(&dca_notifier);
11527 #endif
11528
11529 return 0;
11530 }
11531
11532 module_init(ixgbe_init_module);
11533
11534
11535
11536
11537
11538
11539
11540 static void __exit ixgbe_exit_module(void)
11541 {
11542 #ifdef CONFIG_IXGBE_DCA
11543 dca_unregister_notify(&dca_notifier);
11544 #endif
11545 pci_unregister_driver(&ixgbe_driver);
11546
11547 ixgbe_dbg_exit();
11548 if (ixgbe_wq) {
11549 destroy_workqueue(ixgbe_wq);
11550 ixgbe_wq = NULL;
11551 }
11552 }
11553
11554 #ifdef CONFIG_IXGBE_DCA
11555 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11556 void *p)
11557 {
11558 int ret_val;
11559
11560 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11561 __ixgbe_notify_dca);
11562
11563 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11564 }
11565
11566 #endif
11567
11568 module_exit(ixgbe_exit_module);
11569
11570