This source file includes following definitions.
- igb_regdump
- igb_dump
- igb_get_i2c_data
- igb_set_i2c_data
- igb_set_i2c_clk
- igb_get_i2c_clk
- igb_get_hw_dev
- igb_init_module
- igb_exit_module
- igb_cache_ring_register
- igb_rd32
- igb_write_ivar
- igb_assign_vector
- igb_configure_msix
- igb_request_msix
- igb_free_q_vector
- igb_reset_q_vector
- igb_reset_interrupt_capability
- igb_free_q_vectors
- igb_clear_interrupt_scheme
- igb_set_interrupt_capability
- igb_add_ring
- igb_alloc_q_vector
- igb_alloc_q_vectors
- igb_init_interrupt_scheme
- igb_request_irq
- igb_free_irq
- igb_irq_disable
- igb_irq_enable
- igb_update_mng_vlan
- igb_release_hw_control
- igb_get_hw_control
- enable_fqtss
- is_fqtss_enabled
- set_tx_desc_fetch_prio
- set_queue_mode
- is_any_cbs_enabled
- is_any_txtime_enabled
- igb_config_tx_modes
- igb_save_txtime_params
- igb_save_cbs_params
- igb_setup_tx_mode
- igb_configure
- igb_power_up_link
- igb_power_down_link
- igb_check_swap_media
- igb_up
- igb_down
- igb_reinit_locked
- igb_enable_mas
- igb_reset
- igb_fix_features
- igb_set_features
- igb_ndo_fdb_add
- igb_features_check
- igb_offload_apply
- igb_offload_cbs
- igb_parse_cls_flower
- igb_configure_clsflower
- igb_delete_clsflower
- igb_setup_tc_cls_flower
- igb_setup_tc_block_cb
- igb_offload_txtime
- igb_setup_tc
- igb_set_fw_version
- igb_init_mas
- igb_init_i2c
- igb_probe
- igb_disable_sriov
- igb_enable_sriov
- igb_remove_i2c
- igb_remove
- igb_probe_vfs
- igb_get_max_rss_queues
- igb_init_queue_configuration
- igb_set_flag_queue_pairs
- igb_sw_init
- __igb_open
- igb_open
- __igb_close
- igb_close
- igb_setup_tx_resources
- igb_setup_all_tx_resources
- igb_setup_tctl
- igb_configure_tx_ring
- igb_configure_tx
- igb_setup_rx_resources
- igb_setup_all_rx_resources
- igb_setup_mrqc
- igb_setup_rctl
- igb_set_vf_rlpml
- igb_set_vf_vlan_strip
- igb_set_vmolr
- igb_configure_rx_ring
- igb_set_rx_buffer_len
- igb_configure_rx
- igb_free_tx_resources
- igb_free_all_tx_resources
- igb_clean_tx_ring
- igb_clean_all_tx_rings
- igb_free_rx_resources
- igb_free_all_rx_resources
- igb_clean_rx_ring
- igb_clean_all_rx_rings
- igb_set_mac
- igb_write_mc_addr_list
- igb_vlan_promisc_enable
- igb_scrub_vfta
- igb_vlan_promisc_disable
- igb_set_rx_mode
- igb_check_wvbr
- igb_spoof_check
- igb_update_phy_info
- igb_has_link
- igb_thermal_sensor_event
- igb_check_lvmmc
- igb_watchdog
- igb_watchdog_task
- igb_update_ring_itr
- igb_update_itr
- igb_set_itr
- igb_tx_ctxtdesc
- igb_tso
- igb_ipv6_csum_is_sctp
- igb_tx_csum
- igb_tx_cmd_type
- igb_tx_olinfo_status
- __igb_maybe_stop_tx
- igb_maybe_stop_tx
- igb_tx_map
- igb_xmit_frame_ring
- igb_tx_queue_mapping
- igb_xmit_frame
- igb_tx_timeout
- igb_reset_task
- igb_get_stats64
- igb_change_mtu
- igb_update_stats
- igb_tsync_interrupt
- igb_msix_other
- igb_write_itr
- igb_msix_ring
- igb_update_tx_dca
- igb_update_rx_dca
- igb_update_dca
- igb_setup_dca
- __igb_notify_dca
- igb_notify_dca
- igb_vf_configure
- igb_ping_all_vfs
- igb_set_vf_promisc
- igb_set_vf_multicasts
- igb_restore_vf_multicasts
- igb_clear_vf_vfta
- igb_find_vlvf_entry
- igb_update_pf_vlvf
- igb_set_vf_vlan
- igb_set_vmvir
- igb_enable_port_vlan
- igb_disable_port_vlan
- igb_ndo_set_vf_vlan
- igb_set_vf_vlan_msg
- igb_vf_reset
- igb_vf_reset_event
- igb_vf_reset_msg
- igb_flush_mac_table
- igb_available_rars
- igb_set_default_mac_filter
- igb_mac_entry_can_be_used
- igb_add_mac_filter_flags
- igb_add_mac_filter
- igb_del_mac_filter_flags
- igb_del_mac_filter
- igb_add_mac_steering_filter
- igb_del_mac_steering_filter
- igb_uc_sync
- igb_uc_unsync
- igb_set_vf_mac_filter
- igb_set_vf_mac_addr
- igb_rcv_ack_from_vf
- igb_rcv_msg_from_vf
- igb_msg_task
- igb_set_uta
- igb_intr_msi
- igb_intr
- igb_ring_irq_enable
- igb_poll
- igb_clean_tx_irq
- igb_reuse_rx_page
- igb_page_is_reserved
- igb_can_reuse_rx_page
- igb_add_rx_frag
- igb_construct_skb
- igb_build_skb
- igb_rx_checksum
- igb_rx_hash
- igb_is_non_eop
- igb_cleanup_headers
- igb_process_skb_fields
- igb_get_rx_buffer
- igb_put_rx_buffer
- igb_clean_rx_irq
- igb_rx_offset
- igb_alloc_mapped_page
- igb_alloc_rx_buffers
- igb_mii_ioctl
- igb_ioctl
- igb_read_pci_cfg
- igb_write_pci_cfg
- igb_read_pcie_cap_reg
- igb_write_pcie_cap_reg
- igb_vlan_mode
- igb_vlan_rx_add_vid
- igb_vlan_rx_kill_vid
- igb_restore_vlan
- igb_set_spd_dplx
- __igb_shutdown
- igb_deliver_wake_packet
- igb_suspend
- igb_resume
- igb_runtime_idle
- igb_runtime_suspend
- igb_runtime_resume
- igb_shutdown
- igb_sriov_reinit
- igb_pci_disable_sriov
- igb_pci_enable_sriov
- igb_pci_sriov_configure
- igb_io_error_detected
- igb_io_slot_reset
- igb_io_resume
- igb_rar_set_index
- igb_set_vf_mac
- igb_ndo_set_vf_mac
- igb_link_mbps
- igb_set_vf_rate_limit
- igb_check_vf_rate_limit
- igb_ndo_set_vf_bw
- igb_ndo_set_vf_spoofchk
- igb_ndo_set_vf_trust
- igb_ndo_get_vf_config
- igb_vmm_control
- igb_init_dmac
- igb_read_i2c_byte
- igb_write_i2c_byte
- igb_reinit_queues
- igb_nfc_filter_exit
- igb_nfc_filter_restore
1
2
3
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/netdevice.h>
13 #include <linux/ipv6.h>
14 #include <linux/slab.h>
15 #include <net/checksum.h>
16 #include <net/ip6_checksum.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/mii.h>
21 #include <linux/ethtool.h>
22 #include <linux/if.h>
23 #include <linux/if_vlan.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/ip.h>
28 #include <linux/tcp.h>
29 #include <linux/sctp.h>
30 #include <linux/if_ether.h>
31 #include <linux/aer.h>
32 #include <linux/prefetch.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/etherdevice.h>
35 #ifdef CONFIG_IGB_DCA
36 #include <linux/dca.h>
37 #endif
38 #include <linux/i2c.h>
39 #include "igb.h"
40
41 #define MAJ 5
42 #define MIN 6
43 #define BUILD 0
44 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45 __stringify(BUILD) "-k"
46
47 enum queue_mode {
48 QUEUE_MODE_STRICT_PRIORITY,
49 QUEUE_MODE_STREAM_RESERVATION,
50 };
51
52 enum tx_queue_prio {
53 TX_QUEUE_PRIO_HIGH,
54 TX_QUEUE_PRIO_LOW,
55 };
56
57 char igb_driver_name[] = "igb";
58 char igb_driver_version[] = DRV_VERSION;
59 static const char igb_driver_string[] =
60 "Intel(R) Gigabit Ethernet Network Driver";
61 static const char igb_copyright[] =
62 "Copyright (c) 2007-2014 Intel Corporation.";
63
64 static const struct e1000_info *igb_info_tbl[] = {
65 [board_82575] = &e1000_82575_info,
66 };
67
68 static const struct pci_device_id igb_pci_tbl[] = {
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
104
105 {0, }
106 };
107
108 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
109
110 static int igb_setup_all_tx_resources(struct igb_adapter *);
111 static int igb_setup_all_rx_resources(struct igb_adapter *);
112 static void igb_free_all_tx_resources(struct igb_adapter *);
113 static void igb_free_all_rx_resources(struct igb_adapter *);
114 static void igb_setup_mrqc(struct igb_adapter *);
115 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116 static void igb_remove(struct pci_dev *pdev);
117 static int igb_sw_init(struct igb_adapter *);
118 int igb_open(struct net_device *);
119 int igb_close(struct net_device *);
120 static void igb_configure(struct igb_adapter *);
121 static void igb_configure_tx(struct igb_adapter *);
122 static void igb_configure_rx(struct igb_adapter *);
123 static void igb_clean_all_tx_rings(struct igb_adapter *);
124 static void igb_clean_all_rx_rings(struct igb_adapter *);
125 static void igb_clean_tx_ring(struct igb_ring *);
126 static void igb_clean_rx_ring(struct igb_ring *);
127 static void igb_set_rx_mode(struct net_device *);
128 static void igb_update_phy_info(struct timer_list *);
129 static void igb_watchdog(struct timer_list *);
130 static void igb_watchdog_task(struct work_struct *);
131 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
132 static void igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
134 static int igb_change_mtu(struct net_device *, int);
135 static int igb_set_mac(struct net_device *, void *);
136 static void igb_set_uta(struct igb_adapter *adapter, bool set);
137 static irqreturn_t igb_intr(int irq, void *);
138 static irqreturn_t igb_intr_msi(int irq, void *);
139 static irqreturn_t igb_msix_other(int irq, void *);
140 static irqreturn_t igb_msix_ring(int irq, void *);
141 #ifdef CONFIG_IGB_DCA
142 static void igb_update_dca(struct igb_q_vector *);
143 static void igb_setup_dca(struct igb_adapter *);
144 #endif
145 static int igb_poll(struct napi_struct *, int);
146 static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147 static int igb_clean_rx_irq(struct igb_q_vector *, int);
148 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149 static void igb_tx_timeout(struct net_device *);
150 static void igb_reset_task(struct work_struct *);
151 static void igb_vlan_mode(struct net_device *netdev,
152 netdev_features_t features);
153 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
154 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
155 static void igb_restore_vlan(struct igb_adapter *);
156 static void igb_rar_set_index(struct igb_adapter *, u32);
157 static void igb_ping_all_vfs(struct igb_adapter *);
158 static void igb_msg_task(struct igb_adapter *);
159 static void igb_vmm_control(struct igb_adapter *);
160 static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
161 static void igb_flush_mac_table(struct igb_adapter *);
162 static int igb_available_rars(struct igb_adapter *, u8);
163 static void igb_set_default_mac_filter(struct igb_adapter *);
164 static int igb_uc_sync(struct net_device *, const unsigned char *);
165 static int igb_uc_unsync(struct net_device *, const unsigned char *);
166 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
167 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
168 static int igb_ndo_set_vf_vlan(struct net_device *netdev,
169 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
170 static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
171 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
172 bool setting);
173 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
174 bool setting);
175 static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 struct ifla_vf_info *ivi);
177 static void igb_check_vf_rate_limit(struct igb_adapter *);
178 static void igb_nfc_filter_exit(struct igb_adapter *adapter);
179 static void igb_nfc_filter_restore(struct igb_adapter *adapter);
180
181 #ifdef CONFIG_PCI_IOV
182 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
183 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
184 static int igb_disable_sriov(struct pci_dev *dev);
185 static int igb_pci_disable_sriov(struct pci_dev *dev);
186 #endif
187
188 static int igb_suspend(struct device *);
189 static int igb_resume(struct device *);
190 static int igb_runtime_suspend(struct device *dev);
191 static int igb_runtime_resume(struct device *dev);
192 static int igb_runtime_idle(struct device *dev);
193 static const struct dev_pm_ops igb_pm_ops = {
194 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
195 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
196 igb_runtime_idle)
197 };
198 static void igb_shutdown(struct pci_dev *);
199 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
200 #ifdef CONFIG_IGB_DCA
201 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
202 static struct notifier_block dca_notifier = {
203 .notifier_call = igb_notify_dca,
204 .next = NULL,
205 .priority = 0
206 };
207 #endif
208 #ifdef CONFIG_PCI_IOV
209 static unsigned int max_vfs;
210 module_param(max_vfs, uint, 0);
211 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
212 #endif
213
214 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217 static void igb_io_resume(struct pci_dev *);
218
219 static const struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223 };
224
225 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
226
227 static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = igb_remove,
232 #ifdef CONFIG_PM
233 .driver.pm = &igb_pm_ops,
234 #endif
235 .shutdown = igb_shutdown,
236 .sriov_configure = igb_pci_sriov_configure,
237 .err_handler = &igb_err_handler
238 };
239
240 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242 MODULE_LICENSE("GPL v2");
243 MODULE_VERSION(DRV_VERSION);
244
245 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246 static int debug = -1;
247 module_param(debug, int, 0);
248 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
250 struct igb_reg_info {
251 u32 ofs;
252 char *name;
253 };
254
255 static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262
263 {E1000_ICR, "ICR"},
264
265
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287
288 {}
289 };
290
291
292 static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
293 {
294 int n = 0;
295 char rname[16];
296 u32 regs[8];
297
298 switch (reginfo->ofs) {
299 case E1000_RDLEN(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDLEN(n));
302 break;
303 case E1000_RDH(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RDH(n));
306 break;
307 case E1000_RDT(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_RDT(n));
310 break;
311 case E1000_RXDCTL(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_RXDCTL(n));
314 break;
315 case E1000_RDBAL(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_RDBAL(n));
318 break;
319 case E1000_RDBAH(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_RDBAH(n));
322 break;
323 case E1000_TDBAL(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_RDBAL(n));
326 break;
327 case E1000_TDBAH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_TDBAH(n));
330 break;
331 case E1000_TDLEN(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_TDLEN(n));
334 break;
335 case E1000_TDH(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_TDH(n));
338 break;
339 case E1000_TDT(0):
340 for (n = 0; n < 4; n++)
341 regs[n] = rd32(E1000_TDT(n));
342 break;
343 case E1000_TXDCTL(0):
344 for (n = 0; n < 4; n++)
345 regs[n] = rd32(E1000_TXDCTL(n));
346 break;
347 default:
348 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
349 return;
350 }
351
352 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
353 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
354 regs[2], regs[3]);
355 }
356
357
358 static void igb_dump(struct igb_adapter *adapter)
359 {
360 struct net_device *netdev = adapter->netdev;
361 struct e1000_hw *hw = &adapter->hw;
362 struct igb_reg_info *reginfo;
363 struct igb_ring *tx_ring;
364 union e1000_adv_tx_desc *tx_desc;
365 struct my_u0 { u64 a; u64 b; } *u0;
366 struct igb_ring *rx_ring;
367 union e1000_adv_rx_desc *rx_desc;
368 u32 staterr;
369 u16 i, n;
370
371 if (!netif_msg_hw(adapter))
372 return;
373
374
375 if (netdev) {
376 dev_info(&adapter->pdev->dev, "Net device Info\n");
377 pr_info("Device Name state trans_start\n");
378 pr_info("%-15s %016lX %016lX\n", netdev->name,
379 netdev->state, dev_trans_start(netdev));
380 }
381
382
383 dev_info(&adapter->pdev->dev, "Register Dump\n");
384 pr_info(" Register Name Value\n");
385 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 reginfo->name; reginfo++) {
387 igb_regdump(hw, reginfo);
388 }
389
390
391 if (!netdev || !netif_running(netdev))
392 goto exit;
393
394 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
395 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
396 for (n = 0; n < adapter->num_tx_queues; n++) {
397 struct igb_tx_buffer *buffer_info;
398 tx_ring = adapter->tx_ring[n];
399 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
400 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 (u64)dma_unmap_addr(buffer_info, dma),
403 dma_unmap_len(buffer_info, len),
404 buffer_info->next_to_watch,
405 (u64)buffer_info->time_stamp);
406 }
407
408
409 if (!netif_msg_tx_done(adapter))
410 goto rx_ring_summary;
411
412 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413
414
415
416
417
418
419
420
421
422
423
424
425 for (n = 0; n < adapter->num_tx_queues; n++) {
426 tx_ring = adapter->tx_ring[n];
427 pr_info("------------------------------------\n");
428 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 pr_info("------------------------------------\n");
430 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
431
432 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
433 const char *next_desc;
434 struct igb_tx_buffer *buffer_info;
435 tx_desc = IGB_TX_DESC(tx_ring, i);
436 buffer_info = &tx_ring->tx_buffer_info[i];
437 u0 = (struct my_u0 *)tx_desc;
438 if (i == tx_ring->next_to_use &&
439 i == tx_ring->next_to_clean)
440 next_desc = " NTC/U";
441 else if (i == tx_ring->next_to_use)
442 next_desc = " NTU";
443 else if (i == tx_ring->next_to_clean)
444 next_desc = " NTC";
445 else
446 next_desc = "";
447
448 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
449 i, le64_to_cpu(u0->a),
450 le64_to_cpu(u0->b),
451 (u64)dma_unmap_addr(buffer_info, dma),
452 dma_unmap_len(buffer_info, len),
453 buffer_info->next_to_watch,
454 (u64)buffer_info->time_stamp,
455 buffer_info->skb, next_desc);
456
457 if (netif_msg_pktdata(adapter) && buffer_info->skb)
458 print_hex_dump(KERN_INFO, "",
459 DUMP_PREFIX_ADDRESS,
460 16, 1, buffer_info->skb->data,
461 dma_unmap_len(buffer_info, len),
462 true);
463 }
464 }
465
466
467 rx_ring_summary:
468 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
469 pr_info("Queue [NTU] [NTC]\n");
470 for (n = 0; n < adapter->num_rx_queues; n++) {
471 rx_ring = adapter->rx_ring[n];
472 pr_info(" %5d %5X %5X\n",
473 n, rx_ring->next_to_use, rx_ring->next_to_clean);
474 }
475
476
477 if (!netif_msg_rx_status(adapter))
478 goto exit;
479
480 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503 for (n = 0; n < adapter->num_rx_queues; n++) {
504 rx_ring = adapter->rx_ring[n];
505 pr_info("------------------------------------\n");
506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
507 pr_info("------------------------------------\n");
508 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
509 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
510
511 for (i = 0; i < rx_ring->count; i++) {
512 const char *next_desc;
513 struct igb_rx_buffer *buffer_info;
514 buffer_info = &rx_ring->rx_buffer_info[i];
515 rx_desc = IGB_RX_DESC(rx_ring, i);
516 u0 = (struct my_u0 *)rx_desc;
517 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
518
519 if (i == rx_ring->next_to_use)
520 next_desc = " NTU";
521 else if (i == rx_ring->next_to_clean)
522 next_desc = " NTC";
523 else
524 next_desc = "";
525
526 if (staterr & E1000_RXD_STAT_DD) {
527
528 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
529 "RWB", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 next_desc);
533 } else {
534 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
535 "R ", i,
536 le64_to_cpu(u0->a),
537 le64_to_cpu(u0->b),
538 (u64)buffer_info->dma,
539 next_desc);
540
541 if (netif_msg_pktdata(adapter) &&
542 buffer_info->dma && buffer_info->page) {
543 print_hex_dump(KERN_INFO, "",
544 DUMP_PREFIX_ADDRESS,
545 16, 1,
546 page_address(buffer_info->page) +
547 buffer_info->page_offset,
548 igb_rx_bufsz(rx_ring), true);
549 }
550 }
551 }
552 }
553
554 exit:
555 return;
556 }
557
558
559
560
561
562
563
564
565 static int igb_get_i2c_data(void *data)
566 {
567 struct igb_adapter *adapter = (struct igb_adapter *)data;
568 struct e1000_hw *hw = &adapter->hw;
569 s32 i2cctl = rd32(E1000_I2CPARAMS);
570
571 return !!(i2cctl & E1000_I2C_DATA_IN);
572 }
573
574
575
576
577
578
579
580
581 static void igb_set_i2c_data(void *data, int state)
582 {
583 struct igb_adapter *adapter = (struct igb_adapter *)data;
584 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS);
586
587 if (state)
588 i2cctl |= E1000_I2C_DATA_OUT;
589 else
590 i2cctl &= ~E1000_I2C_DATA_OUT;
591
592 i2cctl &= ~E1000_I2C_DATA_OE_N;
593 i2cctl |= E1000_I2C_CLK_OE_N;
594 wr32(E1000_I2CPARAMS, i2cctl);
595 wrfl();
596
597 }
598
599
600
601
602
603
604
605
606 static void igb_set_i2c_clk(void *data, int state)
607 {
608 struct igb_adapter *adapter = (struct igb_adapter *)data;
609 struct e1000_hw *hw = &adapter->hw;
610 s32 i2cctl = rd32(E1000_I2CPARAMS);
611
612 if (state) {
613 i2cctl |= E1000_I2C_CLK_OUT;
614 i2cctl &= ~E1000_I2C_CLK_OE_N;
615 } else {
616 i2cctl &= ~E1000_I2C_CLK_OUT;
617 i2cctl &= ~E1000_I2C_CLK_OE_N;
618 }
619 wr32(E1000_I2CPARAMS, i2cctl);
620 wrfl();
621 }
622
623
624
625
626
627
628
629 static int igb_get_i2c_clk(void *data)
630 {
631 struct igb_adapter *adapter = (struct igb_adapter *)data;
632 struct e1000_hw *hw = &adapter->hw;
633 s32 i2cctl = rd32(E1000_I2CPARAMS);
634
635 return !!(i2cctl & E1000_I2C_CLK_IN);
636 }
637
638 static const struct i2c_algo_bit_data igb_i2c_algo = {
639 .setsda = igb_set_i2c_data,
640 .setscl = igb_set_i2c_clk,
641 .getsda = igb_get_i2c_data,
642 .getscl = igb_get_i2c_clk,
643 .udelay = 5,
644 .timeout = 20,
645 };
646
647
648
649
650
651
652
653 struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
654 {
655 struct igb_adapter *adapter = hw->back;
656 return adapter->netdev;
657 }
658
659
660
661
662
663
664
665 static int __init igb_init_module(void)
666 {
667 int ret;
668
669 pr_info("%s - version %s\n",
670 igb_driver_string, igb_driver_version);
671 pr_info("%s\n", igb_copyright);
672
673 #ifdef CONFIG_IGB_DCA
674 dca_register_notify(&dca_notifier);
675 #endif
676 ret = pci_register_driver(&igb_driver);
677 return ret;
678 }
679
680 module_init(igb_init_module);
681
682
683
684
685
686
687
688 static void __exit igb_exit_module(void)
689 {
690 #ifdef CONFIG_IGB_DCA
691 dca_unregister_notify(&dca_notifier);
692 #endif
693 pci_unregister_driver(&igb_driver);
694 }
695
696 module_exit(igb_exit_module);
697
698 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
699
700
701
702
703
704
705
706 static void igb_cache_ring_register(struct igb_adapter *adapter)
707 {
708 int i = 0, j = 0;
709 u32 rbase_offset = adapter->vfs_allocated_count;
710
711 switch (adapter->hw.mac.type) {
712 case e1000_82576:
713
714
715
716
717
718 if (adapter->vfs_allocated_count) {
719 for (; i < adapter->rss_queues; i++)
720 adapter->rx_ring[i]->reg_idx = rbase_offset +
721 Q_IDX_82576(i);
722 }
723
724 case e1000_82575:
725 case e1000_82580:
726 case e1000_i350:
727 case e1000_i354:
728 case e1000_i210:
729 case e1000_i211:
730
731 default:
732 for (; i < adapter->num_rx_queues; i++)
733 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
734 for (; j < adapter->num_tx_queues; j++)
735 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
736 break;
737 }
738 }
739
740 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
741 {
742 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
743 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
744 u32 value = 0;
745
746 if (E1000_REMOVED(hw_addr))
747 return ~value;
748
749 value = readl(&hw_addr[reg]);
750
751
752 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
753 struct net_device *netdev = igb->netdev;
754 hw->hw_addr = NULL;
755 netdev_err(netdev, "PCIe link lost\n");
756 WARN(pci_device_is_present(igb->pdev),
757 "igb: Failed to read reg 0x%x!\n", reg);
758 }
759
760 return value;
761 }
762
763
764
765
766
767
768
769
770
771
772
773
774
775 static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
776 int index, int offset)
777 {
778 u32 ivar = array_rd32(E1000_IVAR0, index);
779
780
781 ivar &= ~((u32)0xFF << offset);
782
783
784 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
785
786 array_wr32(E1000_IVAR0, index, ivar);
787 }
788
789 #define IGB_N0_QUEUE -1
790 static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
791 {
792 struct igb_adapter *adapter = q_vector->adapter;
793 struct e1000_hw *hw = &adapter->hw;
794 int rx_queue = IGB_N0_QUEUE;
795 int tx_queue = IGB_N0_QUEUE;
796 u32 msixbm = 0;
797
798 if (q_vector->rx.ring)
799 rx_queue = q_vector->rx.ring->reg_idx;
800 if (q_vector->tx.ring)
801 tx_queue = q_vector->tx.ring->reg_idx;
802
803 switch (hw->mac.type) {
804 case e1000_82575:
805
806
807
808
809
810 if (rx_queue > IGB_N0_QUEUE)
811 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
812 if (tx_queue > IGB_N0_QUEUE)
813 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
814 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
815 msixbm |= E1000_EIMS_OTHER;
816 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
817 q_vector->eims_value = msixbm;
818 break;
819 case e1000_82576:
820
821
822
823
824
825 if (rx_queue > IGB_N0_QUEUE)
826 igb_write_ivar(hw, msix_vector,
827 rx_queue & 0x7,
828 (rx_queue & 0x8) << 1);
829 if (tx_queue > IGB_N0_QUEUE)
830 igb_write_ivar(hw, msix_vector,
831 tx_queue & 0x7,
832 ((tx_queue & 0x8) << 1) + 8);
833 q_vector->eims_value = BIT(msix_vector);
834 break;
835 case e1000_82580:
836 case e1000_i350:
837 case e1000_i354:
838 case e1000_i210:
839 case e1000_i211:
840
841
842
843
844
845
846 if (rx_queue > IGB_N0_QUEUE)
847 igb_write_ivar(hw, msix_vector,
848 rx_queue >> 1,
849 (rx_queue & 0x1) << 4);
850 if (tx_queue > IGB_N0_QUEUE)
851 igb_write_ivar(hw, msix_vector,
852 tx_queue >> 1,
853 ((tx_queue & 0x1) << 4) + 8);
854 q_vector->eims_value = BIT(msix_vector);
855 break;
856 default:
857 BUG();
858 break;
859 }
860
861
862 adapter->eims_enable_mask |= q_vector->eims_value;
863
864
865 q_vector->set_itr = 1;
866 }
867
868
869
870
871
872
873
874
875 static void igb_configure_msix(struct igb_adapter *adapter)
876 {
877 u32 tmp;
878 int i, vector = 0;
879 struct e1000_hw *hw = &adapter->hw;
880
881 adapter->eims_enable_mask = 0;
882
883
884 switch (hw->mac.type) {
885 case e1000_82575:
886 tmp = rd32(E1000_CTRL_EXT);
887
888 tmp |= E1000_CTRL_EXT_PBA_CLR;
889
890
891 tmp |= E1000_CTRL_EXT_EIAME;
892 tmp |= E1000_CTRL_EXT_IRCA;
893
894 wr32(E1000_CTRL_EXT, tmp);
895
896
897 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
898 adapter->eims_other = E1000_EIMS_OTHER;
899
900 break;
901
902 case e1000_82576:
903 case e1000_82580:
904 case e1000_i350:
905 case e1000_i354:
906 case e1000_i210:
907 case e1000_i211:
908
909
910
911 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
912 E1000_GPIE_PBA | E1000_GPIE_EIAME |
913 E1000_GPIE_NSICR);
914
915
916 adapter->eims_other = BIT(vector);
917 tmp = (vector++ | E1000_IVAR_VALID) << 8;
918
919 wr32(E1000_IVAR_MISC, tmp);
920 break;
921 default:
922
923 break;
924 }
925
926 adapter->eims_enable_mask |= adapter->eims_other;
927
928 for (i = 0; i < adapter->num_q_vectors; i++)
929 igb_assign_vector(adapter->q_vector[i], vector++);
930
931 wrfl();
932 }
933
934
935
936
937
938
939
940
941 static int igb_request_msix(struct igb_adapter *adapter)
942 {
943 struct net_device *netdev = adapter->netdev;
944 int i, err = 0, vector = 0, free_vector = 0;
945
946 err = request_irq(adapter->msix_entries[vector].vector,
947 igb_msix_other, 0, netdev->name, adapter);
948 if (err)
949 goto err_out;
950
951 for (i = 0; i < adapter->num_q_vectors; i++) {
952 struct igb_q_vector *q_vector = adapter->q_vector[i];
953
954 vector++;
955
956 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
957
958 if (q_vector->rx.ring && q_vector->tx.ring)
959 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
960 q_vector->rx.ring->queue_index);
961 else if (q_vector->tx.ring)
962 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
963 q_vector->tx.ring->queue_index);
964 else if (q_vector->rx.ring)
965 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
966 q_vector->rx.ring->queue_index);
967 else
968 sprintf(q_vector->name, "%s-unused", netdev->name);
969
970 err = request_irq(adapter->msix_entries[vector].vector,
971 igb_msix_ring, 0, q_vector->name,
972 q_vector);
973 if (err)
974 goto err_free;
975 }
976
977 igb_configure_msix(adapter);
978 return 0;
979
980 err_free:
981
982 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
983
984 vector--;
985 for (i = 0; i < vector; i++) {
986 free_irq(adapter->msix_entries[free_vector++].vector,
987 adapter->q_vector[i]);
988 }
989 err_out:
990 return err;
991 }
992
993
994
995
996
997
998
999
1000 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1001 {
1002 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1003
1004 adapter->q_vector[v_idx] = NULL;
1005
1006
1007
1008
1009 if (q_vector)
1010 kfree_rcu(q_vector, rcu);
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1022 {
1023 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1024
1025
1026
1027
1028 if (!q_vector)
1029 return;
1030
1031 if (q_vector->tx.ring)
1032 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1033
1034 if (q_vector->rx.ring)
1035 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1036
1037 netif_napi_del(&q_vector->napi);
1038
1039 }
1040
1041 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1042 {
1043 int v_idx = adapter->num_q_vectors;
1044
1045 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1046 pci_disable_msix(adapter->pdev);
1047 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1048 pci_disable_msi(adapter->pdev);
1049
1050 while (v_idx--)
1051 igb_reset_q_vector(adapter, v_idx);
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062 static void igb_free_q_vectors(struct igb_adapter *adapter)
1063 {
1064 int v_idx = adapter->num_q_vectors;
1065
1066 adapter->num_tx_queues = 0;
1067 adapter->num_rx_queues = 0;
1068 adapter->num_q_vectors = 0;
1069
1070 while (v_idx--) {
1071 igb_reset_q_vector(adapter, v_idx);
1072 igb_free_q_vector(adapter, v_idx);
1073 }
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1084 {
1085 igb_free_q_vectors(adapter);
1086 igb_reset_interrupt_capability(adapter);
1087 }
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1098 {
1099 int err;
1100 int numvecs, i;
1101
1102 if (!msix)
1103 goto msi_only;
1104 adapter->flags |= IGB_FLAG_HAS_MSIX;
1105
1106
1107 adapter->num_rx_queues = adapter->rss_queues;
1108 if (adapter->vfs_allocated_count)
1109 adapter->num_tx_queues = 1;
1110 else
1111 adapter->num_tx_queues = adapter->rss_queues;
1112
1113
1114 numvecs = adapter->num_rx_queues;
1115
1116
1117 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1118 numvecs += adapter->num_tx_queues;
1119
1120
1121 adapter->num_q_vectors = numvecs;
1122
1123
1124 numvecs++;
1125 for (i = 0; i < numvecs; i++)
1126 adapter->msix_entries[i].entry = i;
1127
1128 err = pci_enable_msix_range(adapter->pdev,
1129 adapter->msix_entries,
1130 numvecs,
1131 numvecs);
1132 if (err > 0)
1133 return;
1134
1135 igb_reset_interrupt_capability(adapter);
1136
1137
1138 msi_only:
1139 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1140 #ifdef CONFIG_PCI_IOV
1141
1142 if (adapter->vf_data) {
1143 struct e1000_hw *hw = &adapter->hw;
1144
1145 pci_disable_sriov(adapter->pdev);
1146 msleep(500);
1147
1148 kfree(adapter->vf_mac_list);
1149 adapter->vf_mac_list = NULL;
1150 kfree(adapter->vf_data);
1151 adapter->vf_data = NULL;
1152 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1153 wrfl();
1154 msleep(100);
1155 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1156 }
1157 #endif
1158 adapter->vfs_allocated_count = 0;
1159 adapter->rss_queues = 1;
1160 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1161 adapter->num_rx_queues = 1;
1162 adapter->num_tx_queues = 1;
1163 adapter->num_q_vectors = 1;
1164 if (!pci_enable_msi(adapter->pdev))
1165 adapter->flags |= IGB_FLAG_HAS_MSI;
1166 }
1167
1168 static void igb_add_ring(struct igb_ring *ring,
1169 struct igb_ring_container *head)
1170 {
1171 head->ring = ring;
1172 head->count++;
1173 }
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 static int igb_alloc_q_vector(struct igb_adapter *adapter,
1188 int v_count, int v_idx,
1189 int txr_count, int txr_idx,
1190 int rxr_count, int rxr_idx)
1191 {
1192 struct igb_q_vector *q_vector;
1193 struct igb_ring *ring;
1194 int ring_count;
1195 size_t size;
1196
1197
1198 if (txr_count > 1 || rxr_count > 1)
1199 return -ENOMEM;
1200
1201 ring_count = txr_count + rxr_count;
1202 size = struct_size(q_vector, ring, ring_count);
1203
1204
1205 q_vector = adapter->q_vector[v_idx];
1206 if (!q_vector) {
1207 q_vector = kzalloc(size, GFP_KERNEL);
1208 } else if (size > ksize(q_vector)) {
1209 kfree_rcu(q_vector, rcu);
1210 q_vector = kzalloc(size, GFP_KERNEL);
1211 } else {
1212 memset(q_vector, 0, size);
1213 }
1214 if (!q_vector)
1215 return -ENOMEM;
1216
1217
1218 netif_napi_add(adapter->netdev, &q_vector->napi,
1219 igb_poll, 64);
1220
1221
1222 adapter->q_vector[v_idx] = q_vector;
1223 q_vector->adapter = adapter;
1224
1225
1226 q_vector->tx.work_limit = adapter->tx_work_limit;
1227
1228
1229 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1230 q_vector->itr_val = IGB_START_ITR;
1231
1232
1233 ring = q_vector->ring;
1234
1235
1236 if (rxr_count) {
1237
1238 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1239 q_vector->itr_val = adapter->rx_itr_setting;
1240 } else {
1241
1242 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1243 q_vector->itr_val = adapter->tx_itr_setting;
1244 }
1245
1246 if (txr_count) {
1247
1248 ring->dev = &adapter->pdev->dev;
1249 ring->netdev = adapter->netdev;
1250
1251
1252 ring->q_vector = q_vector;
1253
1254
1255 igb_add_ring(ring, &q_vector->tx);
1256
1257
1258 if (adapter->hw.mac.type == e1000_82575)
1259 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1260
1261
1262 ring->count = adapter->tx_ring_count;
1263 ring->queue_index = txr_idx;
1264
1265 ring->cbs_enable = false;
1266 ring->idleslope = 0;
1267 ring->sendslope = 0;
1268 ring->hicredit = 0;
1269 ring->locredit = 0;
1270
1271 u64_stats_init(&ring->tx_syncp);
1272 u64_stats_init(&ring->tx_syncp2);
1273
1274
1275 adapter->tx_ring[txr_idx] = ring;
1276
1277
1278 ring++;
1279 }
1280
1281 if (rxr_count) {
1282
1283 ring->dev = &adapter->pdev->dev;
1284 ring->netdev = adapter->netdev;
1285
1286
1287 ring->q_vector = q_vector;
1288
1289
1290 igb_add_ring(ring, &q_vector->rx);
1291
1292
1293 if (adapter->hw.mac.type >= e1000_82576)
1294 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1295
1296
1297
1298
1299 if (adapter->hw.mac.type >= e1000_i350)
1300 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1301
1302
1303 ring->count = adapter->rx_ring_count;
1304 ring->queue_index = rxr_idx;
1305
1306 u64_stats_init(&ring->rx_syncp);
1307
1308
1309 adapter->rx_ring[rxr_idx] = ring;
1310 }
1311
1312 return 0;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1324 {
1325 int q_vectors = adapter->num_q_vectors;
1326 int rxr_remaining = adapter->num_rx_queues;
1327 int txr_remaining = adapter->num_tx_queues;
1328 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1329 int err;
1330
1331 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1332 for (; rxr_remaining; v_idx++) {
1333 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1334 0, 0, 1, rxr_idx);
1335
1336 if (err)
1337 goto err_out;
1338
1339
1340 rxr_remaining--;
1341 rxr_idx++;
1342 }
1343 }
1344
1345 for (; v_idx < q_vectors; v_idx++) {
1346 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1348
1349 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1350 tqpv, txr_idx, rqpv, rxr_idx);
1351
1352 if (err)
1353 goto err_out;
1354
1355
1356 rxr_remaining -= rqpv;
1357 txr_remaining -= tqpv;
1358 rxr_idx++;
1359 txr_idx++;
1360 }
1361
1362 return 0;
1363
1364 err_out:
1365 adapter->num_tx_queues = 0;
1366 adapter->num_rx_queues = 0;
1367 adapter->num_q_vectors = 0;
1368
1369 while (v_idx--)
1370 igb_free_q_vector(adapter, v_idx);
1371
1372 return -ENOMEM;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1383 {
1384 struct pci_dev *pdev = adapter->pdev;
1385 int err;
1386
1387 igb_set_interrupt_capability(adapter, msix);
1388
1389 err = igb_alloc_q_vectors(adapter);
1390 if (err) {
1391 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1392 goto err_alloc_q_vectors;
1393 }
1394
1395 igb_cache_ring_register(adapter);
1396
1397 return 0;
1398
1399 err_alloc_q_vectors:
1400 igb_reset_interrupt_capability(adapter);
1401 return err;
1402 }
1403
1404
1405
1406
1407
1408
1409
1410
1411 static int igb_request_irq(struct igb_adapter *adapter)
1412 {
1413 struct net_device *netdev = adapter->netdev;
1414 struct pci_dev *pdev = adapter->pdev;
1415 int err = 0;
1416
1417 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1418 err = igb_request_msix(adapter);
1419 if (!err)
1420 goto request_done;
1421
1422 igb_free_all_tx_resources(adapter);
1423 igb_free_all_rx_resources(adapter);
1424
1425 igb_clear_interrupt_scheme(adapter);
1426 err = igb_init_interrupt_scheme(adapter, false);
1427 if (err)
1428 goto request_done;
1429
1430 igb_setup_all_tx_resources(adapter);
1431 igb_setup_all_rx_resources(adapter);
1432 igb_configure(adapter);
1433 }
1434
1435 igb_assign_vector(adapter->q_vector[0], 0);
1436
1437 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1438 err = request_irq(pdev->irq, igb_intr_msi, 0,
1439 netdev->name, adapter);
1440 if (!err)
1441 goto request_done;
1442
1443
1444 igb_reset_interrupt_capability(adapter);
1445 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1446 }
1447
1448 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1449 netdev->name, adapter);
1450
1451 if (err)
1452 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1453 err);
1454
1455 request_done:
1456 return err;
1457 }
1458
1459 static void igb_free_irq(struct igb_adapter *adapter)
1460 {
1461 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1462 int vector = 0, i;
1463
1464 free_irq(adapter->msix_entries[vector++].vector, adapter);
1465
1466 for (i = 0; i < adapter->num_q_vectors; i++)
1467 free_irq(adapter->msix_entries[vector++].vector,
1468 adapter->q_vector[i]);
1469 } else {
1470 free_irq(adapter->pdev->irq, adapter);
1471 }
1472 }
1473
1474
1475
1476
1477
1478 static void igb_irq_disable(struct igb_adapter *adapter)
1479 {
1480 struct e1000_hw *hw = &adapter->hw;
1481
1482
1483
1484
1485
1486 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1487 u32 regval = rd32(E1000_EIAM);
1488
1489 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1490 wr32(E1000_EIMC, adapter->eims_enable_mask);
1491 regval = rd32(E1000_EIAC);
1492 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1493 }
1494
1495 wr32(E1000_IAM, 0);
1496 wr32(E1000_IMC, ~0);
1497 wrfl();
1498 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1499 int i;
1500
1501 for (i = 0; i < adapter->num_q_vectors; i++)
1502 synchronize_irq(adapter->msix_entries[i].vector);
1503 } else {
1504 synchronize_irq(adapter->pdev->irq);
1505 }
1506 }
1507
1508
1509
1510
1511
1512 static void igb_irq_enable(struct igb_adapter *adapter)
1513 {
1514 struct e1000_hw *hw = &adapter->hw;
1515
1516 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1517 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1518 u32 regval = rd32(E1000_EIAC);
1519
1520 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1521 regval = rd32(E1000_EIAM);
1522 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1523 wr32(E1000_EIMS, adapter->eims_enable_mask);
1524 if (adapter->vfs_allocated_count) {
1525 wr32(E1000_MBVFIMR, 0xFF);
1526 ims |= E1000_IMS_VMMB;
1527 }
1528 wr32(E1000_IMS, ims);
1529 } else {
1530 wr32(E1000_IMS, IMS_ENABLE_MASK |
1531 E1000_IMS_DRSTA);
1532 wr32(E1000_IAM, IMS_ENABLE_MASK |
1533 E1000_IMS_DRSTA);
1534 }
1535 }
1536
1537 static void igb_update_mng_vlan(struct igb_adapter *adapter)
1538 {
1539 struct e1000_hw *hw = &adapter->hw;
1540 u16 pf_id = adapter->vfs_allocated_count;
1541 u16 vid = adapter->hw.mng_cookie.vlan_id;
1542 u16 old_vid = adapter->mng_vlan_id;
1543
1544 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1545
1546 igb_vfta_set(hw, vid, pf_id, true, true);
1547 adapter->mng_vlan_id = vid;
1548 } else {
1549 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1550 }
1551
1552 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1553 (vid != old_vid) &&
1554 !test_bit(old_vid, adapter->active_vlans)) {
1555
1556 igb_vfta_set(hw, vid, pf_id, false, true);
1557 }
1558 }
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 static void igb_release_hw_control(struct igb_adapter *adapter)
1569 {
1570 struct e1000_hw *hw = &adapter->hw;
1571 u32 ctrl_ext;
1572
1573
1574 ctrl_ext = rd32(E1000_CTRL_EXT);
1575 wr32(E1000_CTRL_EXT,
1576 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1577 }
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587 static void igb_get_hw_control(struct igb_adapter *adapter)
1588 {
1589 struct e1000_hw *hw = &adapter->hw;
1590 u32 ctrl_ext;
1591
1592
1593 ctrl_ext = rd32(E1000_CTRL_EXT);
1594 wr32(E1000_CTRL_EXT,
1595 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1596 }
1597
1598 static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1599 {
1600 struct net_device *netdev = adapter->netdev;
1601 struct e1000_hw *hw = &adapter->hw;
1602
1603 WARN_ON(hw->mac.type != e1000_i210);
1604
1605 if (enable)
1606 adapter->flags |= IGB_FLAG_FQTSS;
1607 else
1608 adapter->flags &= ~IGB_FLAG_FQTSS;
1609
1610 if (netif_running(netdev))
1611 schedule_work(&adapter->reset_task);
1612 }
1613
1614 static bool is_fqtss_enabled(struct igb_adapter *adapter)
1615 {
1616 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1617 }
1618
1619 static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1620 enum tx_queue_prio prio)
1621 {
1622 u32 val;
1623
1624 WARN_ON(hw->mac.type != e1000_i210);
1625 WARN_ON(queue < 0 || queue > 4);
1626
1627 val = rd32(E1000_I210_TXDCTL(queue));
1628
1629 if (prio == TX_QUEUE_PRIO_HIGH)
1630 val |= E1000_TXDCTL_PRIORITY;
1631 else
1632 val &= ~E1000_TXDCTL_PRIORITY;
1633
1634 wr32(E1000_I210_TXDCTL(queue), val);
1635 }
1636
1637 static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1638 {
1639 u32 val;
1640
1641 WARN_ON(hw->mac.type != e1000_i210);
1642 WARN_ON(queue < 0 || queue > 1);
1643
1644 val = rd32(E1000_I210_TQAVCC(queue));
1645
1646 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1647 val |= E1000_TQAVCC_QUEUEMODE;
1648 else
1649 val &= ~E1000_TQAVCC_QUEUEMODE;
1650
1651 wr32(E1000_I210_TQAVCC(queue), val);
1652 }
1653
1654 static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1655 {
1656 int i;
1657
1658 for (i = 0; i < adapter->num_tx_queues; i++) {
1659 if (adapter->tx_ring[i]->cbs_enable)
1660 return true;
1661 }
1662
1663 return false;
1664 }
1665
1666 static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1667 {
1668 int i;
1669
1670 for (i = 0; i < adapter->num_tx_queues; i++) {
1671 if (adapter->tx_ring[i]->launchtime_enable)
1672 return true;
1673 }
1674
1675 return false;
1676 }
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1689 {
1690 struct igb_ring *ring = adapter->tx_ring[queue];
1691 struct net_device *netdev = adapter->netdev;
1692 struct e1000_hw *hw = &adapter->hw;
1693 u32 tqavcc, tqavctrl;
1694 u16 value;
1695
1696 WARN_ON(hw->mac.type != e1000_i210);
1697 WARN_ON(queue < 0 || queue > 1);
1698
1699
1700
1701
1702
1703 if (ring->cbs_enable || ring->launchtime_enable) {
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1706 } else {
1707 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1708 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1709 }
1710
1711
1712 if (ring->cbs_enable || queue == 0) {
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 if (queue == 0 && !ring->cbs_enable) {
1723
1724 ring->idleslope = 1000000;
1725 ring->hicredit = ETH_FRAME_LEN;
1726 }
1727
1728
1729
1730
1731
1732 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1733 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1734 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1794
1795 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1796 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1797 tqavcc |= value;
1798 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1799
1800 wr32(E1000_I210_TQAVHC(queue),
1801 0x80000000 + ring->hicredit * 0x7735);
1802 } else {
1803
1804
1805 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1806 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1807 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1808
1809
1810 wr32(E1000_I210_TQAVHC(queue), 0);
1811
1812
1813
1814
1815
1816 if (!is_any_cbs_enabled(adapter)) {
1817 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1818 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1819 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1820 }
1821 }
1822
1823
1824 if (ring->launchtime_enable) {
1825
1826
1827
1828
1829
1830
1831
1832
1833 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1834 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1835 E1000_TQAVCTRL_FETCHTIME_DELTA;
1836 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1837 } else {
1838
1839
1840
1841
1842 if (!is_any_txtime_enabled(adapter)) {
1843 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1844 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1845 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1846 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1847 }
1848 }
1849
1850
1851
1852
1853
1854
1855 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1856 ring->cbs_enable ? "enabled" : "disabled",
1857 ring->launchtime_enable ? "enabled" : "disabled",
1858 queue,
1859 ring->idleslope, ring->sendslope,
1860 ring->hicredit, ring->locredit);
1861 }
1862
1863 static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1864 bool enable)
1865 {
1866 struct igb_ring *ring;
1867
1868 if (queue < 0 || queue > adapter->num_tx_queues)
1869 return -EINVAL;
1870
1871 ring = adapter->tx_ring[queue];
1872 ring->launchtime_enable = enable;
1873
1874 return 0;
1875 }
1876
1877 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1878 bool enable, int idleslope, int sendslope,
1879 int hicredit, int locredit)
1880 {
1881 struct igb_ring *ring;
1882
1883 if (queue < 0 || queue > adapter->num_tx_queues)
1884 return -EINVAL;
1885
1886 ring = adapter->tx_ring[queue];
1887
1888 ring->cbs_enable = enable;
1889 ring->idleslope = idleslope;
1890 ring->sendslope = sendslope;
1891 ring->hicredit = hicredit;
1892 ring->locredit = locredit;
1893
1894 return 0;
1895 }
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static void igb_setup_tx_mode(struct igb_adapter *adapter)
1907 {
1908 struct net_device *netdev = adapter->netdev;
1909 struct e1000_hw *hw = &adapter->hw;
1910 u32 val;
1911
1912
1913 if (hw->mac.type != e1000_i210)
1914 return;
1915
1916 if (is_fqtss_enabled(adapter)) {
1917 int i, max_queue;
1918
1919
1920
1921
1922
1923 val = rd32(E1000_I210_TQAVCTRL);
1924 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1925 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1926 wr32(E1000_I210_TQAVCTRL, val);
1927
1928
1929
1930
1931 val = rd32(E1000_TXPBS);
1932 val &= ~I210_TXPBSIZE_MASK;
1933 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1934 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1935 wr32(E1000_TXPBS, val);
1936
1937 val = rd32(E1000_RXPBS);
1938 val &= ~I210_RXPBSIZE_MASK;
1939 val |= I210_RXPBSIZE_PB_30KB;
1940 wr32(E1000_RXPBS, val);
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 val = (4096 - 1) / 64;
1954 wr32(E1000_I210_DTXMXPKTSZ, val);
1955
1956
1957
1958
1959
1960
1961 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1962 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1963
1964 for (i = 0; i < max_queue; i++) {
1965 igb_config_tx_modes(adapter, i);
1966 }
1967 } else {
1968 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1969 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1970 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1971
1972 val = rd32(E1000_I210_TQAVCTRL);
1973
1974
1975
1976
1977 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1978 wr32(E1000_I210_TQAVCTRL, val);
1979 }
1980
1981 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1982 "enabled" : "disabled");
1983 }
1984
1985
1986
1987
1988
1989 static void igb_configure(struct igb_adapter *adapter)
1990 {
1991 struct net_device *netdev = adapter->netdev;
1992 int i;
1993
1994 igb_get_hw_control(adapter);
1995 igb_set_rx_mode(netdev);
1996 igb_setup_tx_mode(adapter);
1997
1998 igb_restore_vlan(adapter);
1999
2000 igb_setup_tctl(adapter);
2001 igb_setup_mrqc(adapter);
2002 igb_setup_rctl(adapter);
2003
2004 igb_nfc_filter_restore(adapter);
2005 igb_configure_tx(adapter);
2006 igb_configure_rx(adapter);
2007
2008 igb_rx_fifo_flush_82575(&adapter->hw);
2009
2010
2011
2012
2013
2014 for (i = 0; i < adapter->num_rx_queues; i++) {
2015 struct igb_ring *ring = adapter->rx_ring[i];
2016 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2017 }
2018 }
2019
2020
2021
2022
2023
2024 void igb_power_up_link(struct igb_adapter *adapter)
2025 {
2026 igb_reset_phy(&adapter->hw);
2027
2028 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2029 igb_power_up_phy_copper(&adapter->hw);
2030 else
2031 igb_power_up_serdes_link_82575(&adapter->hw);
2032
2033 igb_setup_link(&adapter->hw);
2034 }
2035
2036
2037
2038
2039
2040 static void igb_power_down_link(struct igb_adapter *adapter)
2041 {
2042 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2043 igb_power_down_phy_copper_82575(&adapter->hw);
2044 else
2045 igb_shutdown_serdes_link_82575(&adapter->hw);
2046 }
2047
2048
2049
2050
2051
2052 static void igb_check_swap_media(struct igb_adapter *adapter)
2053 {
2054 struct e1000_hw *hw = &adapter->hw;
2055 u32 ctrl_ext, connsw;
2056 bool swap_now = false;
2057
2058 ctrl_ext = rd32(E1000_CTRL_EXT);
2059 connsw = rd32(E1000_CONNSW);
2060
2061
2062
2063
2064
2065 if ((hw->phy.media_type == e1000_media_type_copper) &&
2066 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2067 swap_now = true;
2068 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2069 !(connsw & E1000_CONNSW_SERDESD)) {
2070
2071 if (adapter->copper_tries < 4) {
2072 adapter->copper_tries++;
2073 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2074 wr32(E1000_CONNSW, connsw);
2075 return;
2076 } else {
2077 adapter->copper_tries = 0;
2078 if ((connsw & E1000_CONNSW_PHYSD) &&
2079 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2080 swap_now = true;
2081 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2082 wr32(E1000_CONNSW, connsw);
2083 }
2084 }
2085 }
2086
2087 if (!swap_now)
2088 return;
2089
2090 switch (hw->phy.media_type) {
2091 case e1000_media_type_copper:
2092 netdev_info(adapter->netdev,
2093 "MAS: changing media to fiber/serdes\n");
2094 ctrl_ext |=
2095 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2096 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2097 adapter->copper_tries = 0;
2098 break;
2099 case e1000_media_type_internal_serdes:
2100 case e1000_media_type_fiber:
2101 netdev_info(adapter->netdev,
2102 "MAS: changing media to copper\n");
2103 ctrl_ext &=
2104 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2105 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2106 break;
2107 default:
2108
2109 netdev_err(adapter->netdev,
2110 "AMS: Invalid media type found, returning\n");
2111 break;
2112 }
2113 wr32(E1000_CTRL_EXT, ctrl_ext);
2114 }
2115
2116
2117
2118
2119
2120 int igb_up(struct igb_adapter *adapter)
2121 {
2122 struct e1000_hw *hw = &adapter->hw;
2123 int i;
2124
2125
2126 igb_configure(adapter);
2127
2128 clear_bit(__IGB_DOWN, &adapter->state);
2129
2130 for (i = 0; i < adapter->num_q_vectors; i++)
2131 napi_enable(&(adapter->q_vector[i]->napi));
2132
2133 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2134 igb_configure_msix(adapter);
2135 else
2136 igb_assign_vector(adapter->q_vector[0], 0);
2137
2138
2139 rd32(E1000_TSICR);
2140 rd32(E1000_ICR);
2141 igb_irq_enable(adapter);
2142
2143
2144 if (adapter->vfs_allocated_count) {
2145 u32 reg_data = rd32(E1000_CTRL_EXT);
2146
2147 reg_data |= E1000_CTRL_EXT_PFRSTD;
2148 wr32(E1000_CTRL_EXT, reg_data);
2149 }
2150
2151 netif_tx_start_all_queues(adapter->netdev);
2152
2153
2154 hw->mac.get_link_status = 1;
2155 schedule_work(&adapter->watchdog_task);
2156
2157 if ((adapter->flags & IGB_FLAG_EEE) &&
2158 (!hw->dev_spec._82575.eee_disable))
2159 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2160
2161 return 0;
2162 }
2163
2164 void igb_down(struct igb_adapter *adapter)
2165 {
2166 struct net_device *netdev = adapter->netdev;
2167 struct e1000_hw *hw = &adapter->hw;
2168 u32 tctl, rctl;
2169 int i;
2170
2171
2172
2173
2174 set_bit(__IGB_DOWN, &adapter->state);
2175
2176
2177 rctl = rd32(E1000_RCTL);
2178 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2179
2180
2181 igb_nfc_filter_exit(adapter);
2182
2183 netif_carrier_off(netdev);
2184 netif_tx_stop_all_queues(netdev);
2185
2186
2187 tctl = rd32(E1000_TCTL);
2188 tctl &= ~E1000_TCTL_EN;
2189 wr32(E1000_TCTL, tctl);
2190
2191 wrfl();
2192 usleep_range(10000, 11000);
2193
2194 igb_irq_disable(adapter);
2195
2196 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2197
2198 for (i = 0; i < adapter->num_q_vectors; i++) {
2199 if (adapter->q_vector[i]) {
2200 napi_synchronize(&adapter->q_vector[i]->napi);
2201 napi_disable(&adapter->q_vector[i]->napi);
2202 }
2203 }
2204
2205 del_timer_sync(&adapter->watchdog_timer);
2206 del_timer_sync(&adapter->phy_info_timer);
2207
2208
2209 spin_lock(&adapter->stats64_lock);
2210 igb_update_stats(adapter);
2211 spin_unlock(&adapter->stats64_lock);
2212
2213 adapter->link_speed = 0;
2214 adapter->link_duplex = 0;
2215
2216 if (!pci_channel_offline(adapter->pdev))
2217 igb_reset(adapter);
2218
2219
2220 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2221
2222 igb_clean_all_tx_rings(adapter);
2223 igb_clean_all_rx_rings(adapter);
2224 #ifdef CONFIG_IGB_DCA
2225
2226
2227 igb_setup_dca(adapter);
2228 #endif
2229 }
2230
2231 void igb_reinit_locked(struct igb_adapter *adapter)
2232 {
2233 WARN_ON(in_interrupt());
2234 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2235 usleep_range(1000, 2000);
2236 igb_down(adapter);
2237 igb_up(adapter);
2238 clear_bit(__IGB_RESETTING, &adapter->state);
2239 }
2240
2241
2242
2243
2244
2245 static void igb_enable_mas(struct igb_adapter *adapter)
2246 {
2247 struct e1000_hw *hw = &adapter->hw;
2248 u32 connsw = rd32(E1000_CONNSW);
2249
2250
2251 if ((hw->phy.media_type == e1000_media_type_copper) &&
2252 (!(connsw & E1000_CONNSW_SERDESD))) {
2253 connsw |= E1000_CONNSW_ENRGSRC;
2254 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2255 wr32(E1000_CONNSW, connsw);
2256 wrfl();
2257 }
2258 }
2259
2260 void igb_reset(struct igb_adapter *adapter)
2261 {
2262 struct pci_dev *pdev = adapter->pdev;
2263 struct e1000_hw *hw = &adapter->hw;
2264 struct e1000_mac_info *mac = &hw->mac;
2265 struct e1000_fc_info *fc = &hw->fc;
2266 u32 pba, hwm;
2267
2268
2269
2270
2271 switch (mac->type) {
2272 case e1000_i350:
2273 case e1000_i354:
2274 case e1000_82580:
2275 pba = rd32(E1000_RXPBS);
2276 pba = igb_rxpbs_adjust_82580(pba);
2277 break;
2278 case e1000_82576:
2279 pba = rd32(E1000_RXPBS);
2280 pba &= E1000_RXPBS_SIZE_MASK_82576;
2281 break;
2282 case e1000_82575:
2283 case e1000_i210:
2284 case e1000_i211:
2285 default:
2286 pba = E1000_PBA_34K;
2287 break;
2288 }
2289
2290 if (mac->type == e1000_82575) {
2291 u32 min_rx_space, min_tx_space, needed_tx_space;
2292
2293
2294 wr32(E1000_PBA, pba);
2295
2296
2297
2298
2299
2300
2301
2302
2303 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2304
2305
2306
2307
2308
2309
2310 min_tx_space = adapter->max_frame_size;
2311 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2312 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2313
2314
2315 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2316
2317
2318
2319
2320
2321 if (needed_tx_space < pba) {
2322 pba -= needed_tx_space;
2323
2324
2325
2326
2327 if (pba < min_rx_space)
2328 pba = min_rx_space;
2329 }
2330
2331
2332 wr32(E1000_PBA, pba);
2333 }
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2344
2345 fc->high_water = hwm & 0xFFFFFFF0;
2346 fc->low_water = fc->high_water - 16;
2347 fc->pause_time = 0xFFFF;
2348 fc->send_xon = 1;
2349 fc->current_mode = fc->requested_mode;
2350
2351
2352 if (adapter->vfs_allocated_count) {
2353 int i;
2354
2355 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2356 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2357
2358
2359 igb_ping_all_vfs(adapter);
2360
2361
2362 wr32(E1000_VFRE, 0);
2363 wr32(E1000_VFTE, 0);
2364 }
2365
2366
2367 hw->mac.ops.reset_hw(hw);
2368 wr32(E1000_WUC, 0);
2369
2370 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2371
2372 adapter->ei.get_invariants(hw);
2373 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2374 }
2375 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2376 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2377 igb_enable_mas(adapter);
2378 }
2379 if (hw->mac.ops.init_hw(hw))
2380 dev_err(&pdev->dev, "Hardware Error\n");
2381
2382
2383 igb_flush_mac_table(adapter);
2384 __dev_uc_unsync(adapter->netdev, NULL);
2385
2386
2387 igb_set_default_mac_filter(adapter);
2388
2389
2390
2391
2392 if (!hw->mac.autoneg)
2393 igb_force_mac_fc(hw);
2394
2395 igb_init_dmac(adapter, pba);
2396 #ifdef CONFIG_IGB_HWMON
2397
2398 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2399 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2400
2401
2402
2403 if (adapter->ets)
2404 mac->ops.init_thermal_sensor_thresh(hw);
2405 }
2406 }
2407 #endif
2408
2409 if (hw->phy.media_type == e1000_media_type_copper) {
2410 switch (mac->type) {
2411 case e1000_i350:
2412 case e1000_i210:
2413 case e1000_i211:
2414 igb_set_eee_i350(hw, true, true);
2415 break;
2416 case e1000_i354:
2417 igb_set_eee_i354(hw, true, true);
2418 break;
2419 default:
2420 break;
2421 }
2422 }
2423 if (!netif_running(adapter->netdev))
2424 igb_power_down_link(adapter);
2425
2426 igb_update_mng_vlan(adapter);
2427
2428
2429 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2430
2431
2432 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2433 igb_ptp_reset(adapter);
2434
2435 igb_get_phy_info(hw);
2436 }
2437
2438 static netdev_features_t igb_fix_features(struct net_device *netdev,
2439 netdev_features_t features)
2440 {
2441
2442
2443
2444 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2445 features |= NETIF_F_HW_VLAN_CTAG_TX;
2446 else
2447 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2448
2449 return features;
2450 }
2451
2452 static int igb_set_features(struct net_device *netdev,
2453 netdev_features_t features)
2454 {
2455 netdev_features_t changed = netdev->features ^ features;
2456 struct igb_adapter *adapter = netdev_priv(netdev);
2457
2458 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2459 igb_vlan_mode(netdev, features);
2460
2461 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2462 return 0;
2463
2464 if (!(features & NETIF_F_NTUPLE)) {
2465 struct hlist_node *node2;
2466 struct igb_nfc_filter *rule;
2467
2468 spin_lock(&adapter->nfc_lock);
2469 hlist_for_each_entry_safe(rule, node2,
2470 &adapter->nfc_filter_list, nfc_node) {
2471 igb_erase_filter(adapter, rule);
2472 hlist_del(&rule->nfc_node);
2473 kfree(rule);
2474 }
2475 spin_unlock(&adapter->nfc_lock);
2476 adapter->nfc_filter_count = 0;
2477 }
2478
2479 netdev->features = features;
2480
2481 if (netif_running(netdev))
2482 igb_reinit_locked(adapter);
2483 else
2484 igb_reset(adapter);
2485
2486 return 1;
2487 }
2488
2489 static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2490 struct net_device *dev,
2491 const unsigned char *addr, u16 vid,
2492 u16 flags,
2493 struct netlink_ext_ack *extack)
2494 {
2495
2496 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2497 struct igb_adapter *adapter = netdev_priv(dev);
2498 int vfn = adapter->vfs_allocated_count;
2499
2500 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2501 return -ENOMEM;
2502 }
2503
2504 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2505 }
2506
2507 #define IGB_MAX_MAC_HDR_LEN 127
2508 #define IGB_MAX_NETWORK_HDR_LEN 511
2509
2510 static netdev_features_t
2511 igb_features_check(struct sk_buff *skb, struct net_device *dev,
2512 netdev_features_t features)
2513 {
2514 unsigned int network_hdr_len, mac_hdr_len;
2515
2516
2517 mac_hdr_len = skb_network_header(skb) - skb->data;
2518 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2519 return features & ~(NETIF_F_HW_CSUM |
2520 NETIF_F_SCTP_CRC |
2521 NETIF_F_HW_VLAN_CTAG_TX |
2522 NETIF_F_TSO |
2523 NETIF_F_TSO6);
2524
2525 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2526 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2527 return features & ~(NETIF_F_HW_CSUM |
2528 NETIF_F_SCTP_CRC |
2529 NETIF_F_TSO |
2530 NETIF_F_TSO6);
2531
2532
2533
2534
2535 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2536 features &= ~NETIF_F_TSO;
2537
2538 return features;
2539 }
2540
2541 static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2542 {
2543 if (!is_fqtss_enabled(adapter)) {
2544 enable_fqtss(adapter, true);
2545 return;
2546 }
2547
2548 igb_config_tx_modes(adapter, queue);
2549
2550 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2551 enable_fqtss(adapter, false);
2552 }
2553
2554 static int igb_offload_cbs(struct igb_adapter *adapter,
2555 struct tc_cbs_qopt_offload *qopt)
2556 {
2557 struct e1000_hw *hw = &adapter->hw;
2558 int err;
2559
2560
2561 if (hw->mac.type != e1000_i210)
2562 return -EOPNOTSUPP;
2563
2564
2565 if (qopt->queue < 0 || qopt->queue > 1)
2566 return -EINVAL;
2567
2568 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2569 qopt->idleslope, qopt->sendslope,
2570 qopt->hicredit, qopt->locredit);
2571 if (err)
2572 return err;
2573
2574 igb_offload_apply(adapter, qopt->queue);
2575
2576 return 0;
2577 }
2578
2579 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2580 #define VLAN_PRIO_FULL_MASK (0x07)
2581
2582 static int igb_parse_cls_flower(struct igb_adapter *adapter,
2583 struct flow_cls_offload *f,
2584 int traffic_class,
2585 struct igb_nfc_filter *input)
2586 {
2587 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2588 struct flow_dissector *dissector = rule->match.dissector;
2589 struct netlink_ext_ack *extack = f->common.extack;
2590
2591 if (dissector->used_keys &
2592 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2593 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2594 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2595 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2596 NL_SET_ERR_MSG_MOD(extack,
2597 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2598 return -EOPNOTSUPP;
2599 }
2600
2601 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2602 struct flow_match_eth_addrs match;
2603
2604 flow_rule_match_eth_addrs(rule, &match);
2605 if (!is_zero_ether_addr(match.mask->dst)) {
2606 if (!is_broadcast_ether_addr(match.mask->dst)) {
2607 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2608 return -EINVAL;
2609 }
2610
2611 input->filter.match_flags |=
2612 IGB_FILTER_FLAG_DST_MAC_ADDR;
2613 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2614 }
2615
2616 if (!is_zero_ether_addr(match.mask->src)) {
2617 if (!is_broadcast_ether_addr(match.mask->src)) {
2618 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2619 return -EINVAL;
2620 }
2621
2622 input->filter.match_flags |=
2623 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2624 ether_addr_copy(input->filter.src_addr, match.key->src);
2625 }
2626 }
2627
2628 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2629 struct flow_match_basic match;
2630
2631 flow_rule_match_basic(rule, &match);
2632 if (match.mask->n_proto) {
2633 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2634 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2635 return -EINVAL;
2636 }
2637
2638 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2639 input->filter.etype = match.key->n_proto;
2640 }
2641 }
2642
2643 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2644 struct flow_match_vlan match;
2645
2646 flow_rule_match_vlan(rule, &match);
2647 if (match.mask->vlan_priority) {
2648 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2649 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2650 return -EINVAL;
2651 }
2652
2653 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2654 input->filter.vlan_tci = match.key->vlan_priority;
2655 }
2656 }
2657
2658 input->action = traffic_class;
2659 input->cookie = f->cookie;
2660
2661 return 0;
2662 }
2663
2664 static int igb_configure_clsflower(struct igb_adapter *adapter,
2665 struct flow_cls_offload *cls_flower)
2666 {
2667 struct netlink_ext_ack *extack = cls_flower->common.extack;
2668 struct igb_nfc_filter *filter, *f;
2669 int err, tc;
2670
2671 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2672 if (tc < 0) {
2673 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2674 return -EINVAL;
2675 }
2676
2677 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2678 if (!filter)
2679 return -ENOMEM;
2680
2681 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2682 if (err < 0)
2683 goto err_parse;
2684
2685 spin_lock(&adapter->nfc_lock);
2686
2687 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2688 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2689 err = -EEXIST;
2690 NL_SET_ERR_MSG_MOD(extack,
2691 "This filter is already set in ethtool");
2692 goto err_locked;
2693 }
2694 }
2695
2696 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2697 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2698 err = -EEXIST;
2699 NL_SET_ERR_MSG_MOD(extack,
2700 "This filter is already set in cls_flower");
2701 goto err_locked;
2702 }
2703 }
2704
2705 err = igb_add_filter(adapter, filter);
2706 if (err < 0) {
2707 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2708 goto err_locked;
2709 }
2710
2711 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2712
2713 spin_unlock(&adapter->nfc_lock);
2714
2715 return 0;
2716
2717 err_locked:
2718 spin_unlock(&adapter->nfc_lock);
2719
2720 err_parse:
2721 kfree(filter);
2722
2723 return err;
2724 }
2725
2726 static int igb_delete_clsflower(struct igb_adapter *adapter,
2727 struct flow_cls_offload *cls_flower)
2728 {
2729 struct igb_nfc_filter *filter;
2730 int err;
2731
2732 spin_lock(&adapter->nfc_lock);
2733
2734 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2735 if (filter->cookie == cls_flower->cookie)
2736 break;
2737
2738 if (!filter) {
2739 err = -ENOENT;
2740 goto out;
2741 }
2742
2743 err = igb_erase_filter(adapter, filter);
2744 if (err < 0)
2745 goto out;
2746
2747 hlist_del(&filter->nfc_node);
2748 kfree(filter);
2749
2750 out:
2751 spin_unlock(&adapter->nfc_lock);
2752
2753 return err;
2754 }
2755
2756 static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2757 struct flow_cls_offload *cls_flower)
2758 {
2759 switch (cls_flower->command) {
2760 case FLOW_CLS_REPLACE:
2761 return igb_configure_clsflower(adapter, cls_flower);
2762 case FLOW_CLS_DESTROY:
2763 return igb_delete_clsflower(adapter, cls_flower);
2764 case FLOW_CLS_STATS:
2765 return -EOPNOTSUPP;
2766 default:
2767 return -EOPNOTSUPP;
2768 }
2769 }
2770
2771 static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2772 void *cb_priv)
2773 {
2774 struct igb_adapter *adapter = cb_priv;
2775
2776 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2777 return -EOPNOTSUPP;
2778
2779 switch (type) {
2780 case TC_SETUP_CLSFLOWER:
2781 return igb_setup_tc_cls_flower(adapter, type_data);
2782
2783 default:
2784 return -EOPNOTSUPP;
2785 }
2786 }
2787
2788 static int igb_offload_txtime(struct igb_adapter *adapter,
2789 struct tc_etf_qopt_offload *qopt)
2790 {
2791 struct e1000_hw *hw = &adapter->hw;
2792 int err;
2793
2794
2795 if (hw->mac.type != e1000_i210)
2796 return -EOPNOTSUPP;
2797
2798
2799 if (qopt->queue < 0 || qopt->queue > 1)
2800 return -EINVAL;
2801
2802 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2803 if (err)
2804 return err;
2805
2806 igb_offload_apply(adapter, qopt->queue);
2807
2808 return 0;
2809 }
2810
2811 static LIST_HEAD(igb_block_cb_list);
2812
2813 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2814 void *type_data)
2815 {
2816 struct igb_adapter *adapter = netdev_priv(dev);
2817
2818 switch (type) {
2819 case TC_SETUP_QDISC_CBS:
2820 return igb_offload_cbs(adapter, type_data);
2821 case TC_SETUP_BLOCK:
2822 return flow_block_cb_setup_simple(type_data,
2823 &igb_block_cb_list,
2824 igb_setup_tc_block_cb,
2825 adapter, adapter, true);
2826
2827 case TC_SETUP_QDISC_ETF:
2828 return igb_offload_txtime(adapter, type_data);
2829
2830 default:
2831 return -EOPNOTSUPP;
2832 }
2833 }
2834
2835 static const struct net_device_ops igb_netdev_ops = {
2836 .ndo_open = igb_open,
2837 .ndo_stop = igb_close,
2838 .ndo_start_xmit = igb_xmit_frame,
2839 .ndo_get_stats64 = igb_get_stats64,
2840 .ndo_set_rx_mode = igb_set_rx_mode,
2841 .ndo_set_mac_address = igb_set_mac,
2842 .ndo_change_mtu = igb_change_mtu,
2843 .ndo_do_ioctl = igb_ioctl,
2844 .ndo_tx_timeout = igb_tx_timeout,
2845 .ndo_validate_addr = eth_validate_addr,
2846 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2847 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2848 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2849 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2850 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2851 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2852 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2853 .ndo_get_vf_config = igb_ndo_get_vf_config,
2854 .ndo_fix_features = igb_fix_features,
2855 .ndo_set_features = igb_set_features,
2856 .ndo_fdb_add = igb_ndo_fdb_add,
2857 .ndo_features_check = igb_features_check,
2858 .ndo_setup_tc = igb_setup_tc,
2859 };
2860
2861
2862
2863
2864
2865 void igb_set_fw_version(struct igb_adapter *adapter)
2866 {
2867 struct e1000_hw *hw = &adapter->hw;
2868 struct e1000_fw_version fw;
2869
2870 igb_get_fw_version(hw, &fw);
2871
2872 switch (hw->mac.type) {
2873 case e1000_i210:
2874 case e1000_i211:
2875 if (!(igb_get_flash_presence_i210(hw))) {
2876 snprintf(adapter->fw_version,
2877 sizeof(adapter->fw_version),
2878 "%2d.%2d-%d",
2879 fw.invm_major, fw.invm_minor,
2880 fw.invm_img_type);
2881 break;
2882 }
2883
2884 default:
2885
2886 if (fw.or_valid) {
2887 snprintf(adapter->fw_version,
2888 sizeof(adapter->fw_version),
2889 "%d.%d, 0x%08x, %d.%d.%d",
2890 fw.eep_major, fw.eep_minor, fw.etrack_id,
2891 fw.or_major, fw.or_build, fw.or_patch);
2892
2893 } else if (fw.etrack_id != 0X0000) {
2894 snprintf(adapter->fw_version,
2895 sizeof(adapter->fw_version),
2896 "%d.%d, 0x%08x",
2897 fw.eep_major, fw.eep_minor, fw.etrack_id);
2898 } else {
2899 snprintf(adapter->fw_version,
2900 sizeof(adapter->fw_version),
2901 "%d.%d.%d",
2902 fw.eep_major, fw.eep_minor, fw.eep_build);
2903 }
2904 break;
2905 }
2906 }
2907
2908
2909
2910
2911
2912
2913 static void igb_init_mas(struct igb_adapter *adapter)
2914 {
2915 struct e1000_hw *hw = &adapter->hw;
2916 u16 eeprom_data;
2917
2918 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2919 switch (hw->bus.func) {
2920 case E1000_FUNC_0:
2921 if (eeprom_data & IGB_MAS_ENABLE_0) {
2922 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2923 netdev_info(adapter->netdev,
2924 "MAS: Enabling Media Autosense for port %d\n",
2925 hw->bus.func);
2926 }
2927 break;
2928 case E1000_FUNC_1:
2929 if (eeprom_data & IGB_MAS_ENABLE_1) {
2930 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2931 netdev_info(adapter->netdev,
2932 "MAS: Enabling Media Autosense for port %d\n",
2933 hw->bus.func);
2934 }
2935 break;
2936 case E1000_FUNC_2:
2937 if (eeprom_data & IGB_MAS_ENABLE_2) {
2938 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2939 netdev_info(adapter->netdev,
2940 "MAS: Enabling Media Autosense for port %d\n",
2941 hw->bus.func);
2942 }
2943 break;
2944 case E1000_FUNC_3:
2945 if (eeprom_data & IGB_MAS_ENABLE_3) {
2946 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2947 netdev_info(adapter->netdev,
2948 "MAS: Enabling Media Autosense for port %d\n",
2949 hw->bus.func);
2950 }
2951 break;
2952 default:
2953
2954 netdev_err(adapter->netdev,
2955 "MAS: Invalid port configuration, returning\n");
2956 break;
2957 }
2958 }
2959
2960
2961
2962
2963
2964 static s32 igb_init_i2c(struct igb_adapter *adapter)
2965 {
2966 s32 status = 0;
2967
2968
2969 if (adapter->hw.mac.type != e1000_i350)
2970 return 0;
2971
2972
2973
2974
2975
2976 adapter->i2c_adap.owner = THIS_MODULE;
2977 adapter->i2c_algo = igb_i2c_algo;
2978 adapter->i2c_algo.data = adapter;
2979 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2980 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2981 strlcpy(adapter->i2c_adap.name, "igb BB",
2982 sizeof(adapter->i2c_adap.name));
2983 status = i2c_bit_add_bus(&adapter->i2c_adap);
2984 return status;
2985 }
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2999 {
3000 struct net_device *netdev;
3001 struct igb_adapter *adapter;
3002 struct e1000_hw *hw;
3003 u16 eeprom_data = 0;
3004 s32 ret_val;
3005 static int global_quad_port_a;
3006 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3007 int err, pci_using_dac;
3008 u8 part_str[E1000_PBANUM_LENGTH];
3009
3010
3011
3012
3013 if (pdev->is_virtfn) {
3014 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3015 pci_name(pdev), pdev->vendor, pdev->device);
3016 return -EINVAL;
3017 }
3018
3019 err = pci_enable_device_mem(pdev);
3020 if (err)
3021 return err;
3022
3023 pci_using_dac = 0;
3024 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3025 if (!err) {
3026 pci_using_dac = 1;
3027 } else {
3028 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3029 if (err) {
3030 dev_err(&pdev->dev,
3031 "No usable DMA configuration, aborting\n");
3032 goto err_dma;
3033 }
3034 }
3035
3036 err = pci_request_mem_regions(pdev, igb_driver_name);
3037 if (err)
3038 goto err_pci_reg;
3039
3040 pci_enable_pcie_error_reporting(pdev);
3041
3042 pci_set_master(pdev);
3043 pci_save_state(pdev);
3044
3045 err = -ENOMEM;
3046 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3047 IGB_MAX_TX_QUEUES);
3048 if (!netdev)
3049 goto err_alloc_etherdev;
3050
3051 SET_NETDEV_DEV(netdev, &pdev->dev);
3052
3053 pci_set_drvdata(pdev, netdev);
3054 adapter = netdev_priv(netdev);
3055 adapter->netdev = netdev;
3056 adapter->pdev = pdev;
3057 hw = &adapter->hw;
3058 hw->back = adapter;
3059 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3060
3061 err = -EIO;
3062 adapter->io_addr = pci_iomap(pdev, 0, 0);
3063 if (!adapter->io_addr)
3064 goto err_ioremap;
3065
3066 hw->hw_addr = adapter->io_addr;
3067
3068 netdev->netdev_ops = &igb_netdev_ops;
3069 igb_set_ethtool_ops(netdev);
3070 netdev->watchdog_timeo = 5 * HZ;
3071
3072 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3073
3074 netdev->mem_start = pci_resource_start(pdev, 0);
3075 netdev->mem_end = pci_resource_end(pdev, 0);
3076
3077
3078 hw->vendor_id = pdev->vendor;
3079 hw->device_id = pdev->device;
3080 hw->revision_id = pdev->revision;
3081 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3082 hw->subsystem_device_id = pdev->subsystem_device;
3083
3084
3085 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3086 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3087 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3088
3089 err = ei->get_invariants(hw);
3090 if (err)
3091 goto err_sw_init;
3092
3093
3094 err = igb_sw_init(adapter);
3095 if (err)
3096 goto err_sw_init;
3097
3098 igb_get_bus_info_pcie(hw);
3099
3100 hw->phy.autoneg_wait_to_complete = false;
3101
3102
3103 if (hw->phy.media_type == e1000_media_type_copper) {
3104 hw->phy.mdix = AUTO_ALL_MODES;
3105 hw->phy.disable_polarity_correction = false;
3106 hw->phy.ms_type = e1000_ms_hw_default;
3107 }
3108
3109 if (igb_check_reset_block(hw))
3110 dev_info(&pdev->dev,
3111 "PHY reset is blocked due to SOL/IDER session.\n");
3112
3113
3114
3115
3116
3117 netdev->features |= NETIF_F_SG |
3118 NETIF_F_TSO |
3119 NETIF_F_TSO6 |
3120 NETIF_F_RXHASH |
3121 NETIF_F_RXCSUM |
3122 NETIF_F_HW_CSUM;
3123
3124 if (hw->mac.type >= e1000_82576)
3125 netdev->features |= NETIF_F_SCTP_CRC;
3126
3127 if (hw->mac.type >= e1000_i350)
3128 netdev->features |= NETIF_F_HW_TC;
3129
3130 #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3131 NETIF_F_GSO_GRE_CSUM | \
3132 NETIF_F_GSO_IPXIP4 | \
3133 NETIF_F_GSO_IPXIP6 | \
3134 NETIF_F_GSO_UDP_TUNNEL | \
3135 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3136
3137 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3138 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3139
3140
3141 netdev->hw_features |= netdev->features |
3142 NETIF_F_HW_VLAN_CTAG_RX |
3143 NETIF_F_HW_VLAN_CTAG_TX |
3144 NETIF_F_RXALL;
3145
3146 if (hw->mac.type >= e1000_i350)
3147 netdev->hw_features |= NETIF_F_NTUPLE;
3148
3149 if (pci_using_dac)
3150 netdev->features |= NETIF_F_HIGHDMA;
3151
3152 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3153 netdev->mpls_features |= NETIF_F_HW_CSUM;
3154 netdev->hw_enc_features |= netdev->vlan_features;
3155
3156
3157 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3158 NETIF_F_HW_VLAN_CTAG_RX |
3159 NETIF_F_HW_VLAN_CTAG_TX;
3160
3161 netdev->priv_flags |= IFF_SUPP_NOFCS;
3162
3163 netdev->priv_flags |= IFF_UNICAST_FLT;
3164
3165
3166 netdev->min_mtu = ETH_MIN_MTU;
3167 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3168
3169 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3170
3171
3172
3173
3174 hw->mac.ops.reset_hw(hw);
3175
3176
3177
3178
3179 switch (hw->mac.type) {
3180 case e1000_i210:
3181 case e1000_i211:
3182 if (igb_get_flash_presence_i210(hw)) {
3183 if (hw->nvm.ops.validate(hw) < 0) {
3184 dev_err(&pdev->dev,
3185 "The NVM Checksum Is Not Valid\n");
3186 err = -EIO;
3187 goto err_eeprom;
3188 }
3189 }
3190 break;
3191 default:
3192 if (hw->nvm.ops.validate(hw) < 0) {
3193 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3194 err = -EIO;
3195 goto err_eeprom;
3196 }
3197 break;
3198 }
3199
3200 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3201
3202 if (hw->mac.ops.read_mac_addr(hw))
3203 dev_err(&pdev->dev, "NVM Read Error\n");
3204 }
3205
3206 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3207
3208 if (!is_valid_ether_addr(netdev->dev_addr)) {
3209 dev_err(&pdev->dev, "Invalid MAC Address\n");
3210 err = -EIO;
3211 goto err_eeprom;
3212 }
3213
3214 igb_set_default_mac_filter(adapter);
3215
3216
3217 igb_set_fw_version(adapter);
3218
3219
3220 if (hw->mac.type == e1000_i210) {
3221 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3222 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3223 }
3224
3225 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3226 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3227
3228 INIT_WORK(&adapter->reset_task, igb_reset_task);
3229 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3230
3231
3232 adapter->fc_autoneg = true;
3233 hw->mac.autoneg = true;
3234 hw->phy.autoneg_advertised = 0x2f;
3235
3236 hw->fc.requested_mode = e1000_fc_default;
3237 hw->fc.current_mode = e1000_fc_default;
3238
3239 igb_validate_mdi_setting(hw);
3240
3241
3242 if (hw->bus.func == 0)
3243 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3244
3245
3246 if (hw->mac.type >= e1000_82580)
3247 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3248 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3249 &eeprom_data);
3250 else if (hw->bus.func == 1)
3251 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3252
3253 if (eeprom_data & IGB_EEPROM_APME)
3254 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3255
3256
3257
3258
3259
3260 switch (pdev->device) {
3261 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3262 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3263 break;
3264 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3265 case E1000_DEV_ID_82576_FIBER:
3266 case E1000_DEV_ID_82576_SERDES:
3267
3268
3269
3270 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3271 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3272 break;
3273 case E1000_DEV_ID_82576_QUAD_COPPER:
3274 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3275
3276 if (global_quad_port_a != 0)
3277 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3278 else
3279 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3280
3281 if (++global_quad_port_a == 4)
3282 global_quad_port_a = 0;
3283 break;
3284 default:
3285
3286 if (!device_can_wakeup(&adapter->pdev->dev))
3287 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3288 }
3289
3290
3291 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3292 adapter->wol |= E1000_WUFC_MAG;
3293
3294
3295 if ((hw->mac.type == e1000_i350) &&
3296 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3297 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3298 adapter->wol = 0;
3299 }
3300
3301
3302
3303
3304 if (((hw->mac.type == e1000_i350) ||
3305 (hw->mac.type == e1000_i354)) &&
3306 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3307 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3308 adapter->wol = 0;
3309 }
3310 if (hw->mac.type == e1000_i350) {
3311 if (((pdev->subsystem_device == 0x5001) ||
3312 (pdev->subsystem_device == 0x5002)) &&
3313 (hw->bus.func == 0)) {
3314 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3315 adapter->wol = 0;
3316 }
3317 if (pdev->subsystem_device == 0x1F52)
3318 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3319 }
3320
3321 device_set_wakeup_enable(&adapter->pdev->dev,
3322 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3323
3324
3325 igb_reset(adapter);
3326
3327
3328 err = igb_init_i2c(adapter);
3329 if (err) {
3330 dev_err(&pdev->dev, "failed to init i2c interface\n");
3331 goto err_eeprom;
3332 }
3333
3334
3335
3336
3337 igb_get_hw_control(adapter);
3338
3339 strcpy(netdev->name, "eth%d");
3340 err = register_netdev(netdev);
3341 if (err)
3342 goto err_register;
3343
3344
3345 netif_carrier_off(netdev);
3346
3347 #ifdef CONFIG_IGB_DCA
3348 if (dca_add_requester(&pdev->dev) == 0) {
3349 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3350 dev_info(&pdev->dev, "DCA enabled\n");
3351 igb_setup_dca(adapter);
3352 }
3353
3354 #endif
3355 #ifdef CONFIG_IGB_HWMON
3356
3357 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3358 u16 ets_word;
3359
3360
3361
3362
3363 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3364 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3365 adapter->ets = true;
3366 else
3367 adapter->ets = false;
3368 if (igb_sysfs_init(adapter))
3369 dev_err(&pdev->dev,
3370 "failed to allocate sysfs resources\n");
3371 } else {
3372 adapter->ets = false;
3373 }
3374 #endif
3375
3376 adapter->ei = *ei;
3377 if (hw->dev_spec._82575.mas_capable)
3378 igb_init_mas(adapter);
3379
3380
3381 igb_ptp_init(adapter);
3382
3383 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3384
3385 if (hw->mac.type != e1000_i354) {
3386 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3387 netdev->name,
3388 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3389 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3390 "unknown"),
3391 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3392 "Width x4" :
3393 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3394 "Width x2" :
3395 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3396 "Width x1" : "unknown"), netdev->dev_addr);
3397 }
3398
3399 if ((hw->mac.type >= e1000_i210 ||
3400 igb_get_flash_presence_i210(hw))) {
3401 ret_val = igb_read_part_string(hw, part_str,
3402 E1000_PBANUM_LENGTH);
3403 } else {
3404 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3405 }
3406
3407 if (ret_val)
3408 strcpy(part_str, "Unknown");
3409 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3410 dev_info(&pdev->dev,
3411 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3412 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3413 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3414 adapter->num_rx_queues, adapter->num_tx_queues);
3415 if (hw->phy.media_type == e1000_media_type_copper) {
3416 switch (hw->mac.type) {
3417 case e1000_i350:
3418 case e1000_i210:
3419 case e1000_i211:
3420
3421 err = igb_set_eee_i350(hw, true, true);
3422 if ((!err) &&
3423 (!hw->dev_spec._82575.eee_disable)) {
3424 adapter->eee_advert =
3425 MDIO_EEE_100TX | MDIO_EEE_1000T;
3426 adapter->flags |= IGB_FLAG_EEE;
3427 }
3428 break;
3429 case e1000_i354:
3430 if ((rd32(E1000_CTRL_EXT) &
3431 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3432 err = igb_set_eee_i354(hw, true, true);
3433 if ((!err) &&
3434 (!hw->dev_spec._82575.eee_disable)) {
3435 adapter->eee_advert =
3436 MDIO_EEE_100TX | MDIO_EEE_1000T;
3437 adapter->flags |= IGB_FLAG_EEE;
3438 }
3439 }
3440 break;
3441 default:
3442 break;
3443 }
3444 }
3445
3446 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
3447
3448 pm_runtime_put_noidle(&pdev->dev);
3449 return 0;
3450
3451 err_register:
3452 igb_release_hw_control(adapter);
3453 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3454 err_eeprom:
3455 if (!igb_check_reset_block(hw))
3456 igb_reset_phy(hw);
3457
3458 if (hw->flash_address)
3459 iounmap(hw->flash_address);
3460 err_sw_init:
3461 kfree(adapter->mac_table);
3462 kfree(adapter->shadow_vfta);
3463 igb_clear_interrupt_scheme(adapter);
3464 #ifdef CONFIG_PCI_IOV
3465 igb_disable_sriov(pdev);
3466 #endif
3467 pci_iounmap(pdev, adapter->io_addr);
3468 err_ioremap:
3469 free_netdev(netdev);
3470 err_alloc_etherdev:
3471 pci_release_mem_regions(pdev);
3472 err_pci_reg:
3473 err_dma:
3474 pci_disable_device(pdev);
3475 return err;
3476 }
3477
3478 #ifdef CONFIG_PCI_IOV
3479 static int igb_disable_sriov(struct pci_dev *pdev)
3480 {
3481 struct net_device *netdev = pci_get_drvdata(pdev);
3482 struct igb_adapter *adapter = netdev_priv(netdev);
3483 struct e1000_hw *hw = &adapter->hw;
3484
3485
3486 if (adapter->vf_data) {
3487
3488 if (pci_vfs_assigned(pdev)) {
3489 dev_warn(&pdev->dev,
3490 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3491 return -EPERM;
3492 } else {
3493 pci_disable_sriov(pdev);
3494 msleep(500);
3495 }
3496
3497 kfree(adapter->vf_mac_list);
3498 adapter->vf_mac_list = NULL;
3499 kfree(adapter->vf_data);
3500 adapter->vf_data = NULL;
3501 adapter->vfs_allocated_count = 0;
3502 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3503 wrfl();
3504 msleep(100);
3505 dev_info(&pdev->dev, "IOV Disabled\n");
3506
3507
3508 adapter->flags |= IGB_FLAG_DMAC;
3509 }
3510
3511 return 0;
3512 }
3513
3514 static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3515 {
3516 struct net_device *netdev = pci_get_drvdata(pdev);
3517 struct igb_adapter *adapter = netdev_priv(netdev);
3518 int old_vfs = pci_num_vf(pdev);
3519 struct vf_mac_filter *mac_list;
3520 int err = 0;
3521 int num_vf_mac_filters, i;
3522
3523 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3524 err = -EPERM;
3525 goto out;
3526 }
3527 if (!num_vfs)
3528 goto out;
3529
3530 if (old_vfs) {
3531 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3532 old_vfs, max_vfs);
3533 adapter->vfs_allocated_count = old_vfs;
3534 } else
3535 adapter->vfs_allocated_count = num_vfs;
3536
3537 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3538 sizeof(struct vf_data_storage), GFP_KERNEL);
3539
3540
3541 if (!adapter->vf_data) {
3542 adapter->vfs_allocated_count = 0;
3543 err = -ENOMEM;
3544 goto out;
3545 }
3546
3547
3548
3549
3550
3551
3552 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3553 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3554 adapter->vfs_allocated_count);
3555
3556 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3557 sizeof(struct vf_mac_filter),
3558 GFP_KERNEL);
3559
3560 mac_list = adapter->vf_mac_list;
3561 INIT_LIST_HEAD(&adapter->vf_macs.l);
3562
3563 if (adapter->vf_mac_list) {
3564
3565 for (i = 0; i < num_vf_mac_filters; i++) {
3566 mac_list->vf = -1;
3567 mac_list->free = true;
3568 list_add(&mac_list->l, &adapter->vf_macs.l);
3569 mac_list++;
3570 }
3571 } else {
3572
3573
3574
3575 dev_err(&pdev->dev,
3576 "Unable to allocate memory for VF MAC filter list\n");
3577 }
3578
3579
3580 if (!old_vfs) {
3581 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3582 if (err)
3583 goto err_out;
3584 }
3585 dev_info(&pdev->dev, "%d VFs allocated\n",
3586 adapter->vfs_allocated_count);
3587 for (i = 0; i < adapter->vfs_allocated_count; i++)
3588 igb_vf_configure(adapter, i);
3589
3590
3591 adapter->flags &= ~IGB_FLAG_DMAC;
3592 goto out;
3593
3594 err_out:
3595 kfree(adapter->vf_mac_list);
3596 adapter->vf_mac_list = NULL;
3597 kfree(adapter->vf_data);
3598 adapter->vf_data = NULL;
3599 adapter->vfs_allocated_count = 0;
3600 out:
3601 return err;
3602 }
3603
3604 #endif
3605
3606
3607
3608
3609 static void igb_remove_i2c(struct igb_adapter *adapter)
3610 {
3611
3612 i2c_del_adapter(&adapter->i2c_adap);
3613 }
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624 static void igb_remove(struct pci_dev *pdev)
3625 {
3626 struct net_device *netdev = pci_get_drvdata(pdev);
3627 struct igb_adapter *adapter = netdev_priv(netdev);
3628 struct e1000_hw *hw = &adapter->hw;
3629
3630 pm_runtime_get_noresume(&pdev->dev);
3631 #ifdef CONFIG_IGB_HWMON
3632 igb_sysfs_exit(adapter);
3633 #endif
3634 igb_remove_i2c(adapter);
3635 igb_ptp_stop(adapter);
3636
3637
3638
3639 set_bit(__IGB_DOWN, &adapter->state);
3640 del_timer_sync(&adapter->watchdog_timer);
3641 del_timer_sync(&adapter->phy_info_timer);
3642
3643 cancel_work_sync(&adapter->reset_task);
3644 cancel_work_sync(&adapter->watchdog_task);
3645
3646 #ifdef CONFIG_IGB_DCA
3647 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3648 dev_info(&pdev->dev, "DCA disabled\n");
3649 dca_remove_requester(&pdev->dev);
3650 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3651 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3652 }
3653 #endif
3654
3655
3656
3657
3658 igb_release_hw_control(adapter);
3659
3660 #ifdef CONFIG_PCI_IOV
3661 igb_disable_sriov(pdev);
3662 #endif
3663
3664 unregister_netdev(netdev);
3665
3666 igb_clear_interrupt_scheme(adapter);
3667
3668 pci_iounmap(pdev, adapter->io_addr);
3669 if (hw->flash_address)
3670 iounmap(hw->flash_address);
3671 pci_release_mem_regions(pdev);
3672
3673 kfree(adapter->mac_table);
3674 kfree(adapter->shadow_vfta);
3675 free_netdev(netdev);
3676
3677 pci_disable_pcie_error_reporting(pdev);
3678
3679 pci_disable_device(pdev);
3680 }
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691 static void igb_probe_vfs(struct igb_adapter *adapter)
3692 {
3693 #ifdef CONFIG_PCI_IOV
3694 struct pci_dev *pdev = adapter->pdev;
3695 struct e1000_hw *hw = &adapter->hw;
3696
3697
3698 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3699 return;
3700
3701
3702
3703
3704
3705 igb_set_interrupt_capability(adapter, true);
3706 igb_reset_interrupt_capability(adapter);
3707
3708 pci_sriov_set_totalvfs(pdev, 7);
3709 igb_enable_sriov(pdev, max_vfs);
3710
3711 #endif
3712 }
3713
3714 unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3715 {
3716 struct e1000_hw *hw = &adapter->hw;
3717 unsigned int max_rss_queues;
3718
3719
3720 switch (hw->mac.type) {
3721 case e1000_i211:
3722 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3723 break;
3724 case e1000_82575:
3725 case e1000_i210:
3726 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3727 break;
3728 case e1000_i350:
3729
3730 if (!!adapter->vfs_allocated_count) {
3731 max_rss_queues = 1;
3732 break;
3733 }
3734
3735 case e1000_82576:
3736 if (!!adapter->vfs_allocated_count) {
3737 max_rss_queues = 2;
3738 break;
3739 }
3740
3741 case e1000_82580:
3742 case e1000_i354:
3743 default:
3744 max_rss_queues = IGB_MAX_RX_QUEUES;
3745 break;
3746 }
3747
3748 return max_rss_queues;
3749 }
3750
3751 static void igb_init_queue_configuration(struct igb_adapter *adapter)
3752 {
3753 u32 max_rss_queues;
3754
3755 max_rss_queues = igb_get_max_rss_queues(adapter);
3756 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3757
3758 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3759 }
3760
3761 void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3762 const u32 max_rss_queues)
3763 {
3764 struct e1000_hw *hw = &adapter->hw;
3765
3766
3767 switch (hw->mac.type) {
3768 case e1000_82575:
3769 case e1000_i211:
3770
3771 break;
3772 case e1000_82576:
3773 case e1000_82580:
3774 case e1000_i350:
3775 case e1000_i354:
3776 case e1000_i210:
3777 default:
3778
3779
3780
3781 if (adapter->rss_queues > (max_rss_queues / 2))
3782 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3783 else
3784 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3785 break;
3786 }
3787 }
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797 static int igb_sw_init(struct igb_adapter *adapter)
3798 {
3799 struct e1000_hw *hw = &adapter->hw;
3800 struct net_device *netdev = adapter->netdev;
3801 struct pci_dev *pdev = adapter->pdev;
3802
3803 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3804
3805
3806 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3807 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3808
3809
3810 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3811 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3812
3813
3814 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3815
3816 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3817 VLAN_HLEN;
3818 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3819
3820 spin_lock_init(&adapter->nfc_lock);
3821 spin_lock_init(&adapter->stats64_lock);
3822 #ifdef CONFIG_PCI_IOV
3823 switch (hw->mac.type) {
3824 case e1000_82576:
3825 case e1000_i350:
3826 if (max_vfs > 7) {
3827 dev_warn(&pdev->dev,
3828 "Maximum of 7 VFs per PF, using max\n");
3829 max_vfs = adapter->vfs_allocated_count = 7;
3830 } else
3831 adapter->vfs_allocated_count = max_vfs;
3832 if (adapter->vfs_allocated_count)
3833 dev_warn(&pdev->dev,
3834 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3835 break;
3836 default:
3837 break;
3838 }
3839 #endif
3840
3841
3842 adapter->flags |= IGB_FLAG_HAS_MSIX;
3843
3844 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3845 sizeof(struct igb_mac_addr),
3846 GFP_KERNEL);
3847 if (!adapter->mac_table)
3848 return -ENOMEM;
3849
3850 igb_probe_vfs(adapter);
3851
3852 igb_init_queue_configuration(adapter);
3853
3854
3855 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3856 GFP_KERNEL);
3857 if (!adapter->shadow_vfta)
3858 return -ENOMEM;
3859
3860
3861 if (igb_init_interrupt_scheme(adapter, true)) {
3862 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3863 return -ENOMEM;
3864 }
3865
3866
3867 igb_irq_disable(adapter);
3868
3869 if (hw->mac.type >= e1000_i350)
3870 adapter->flags &= ~IGB_FLAG_DMAC;
3871
3872 set_bit(__IGB_DOWN, &adapter->state);
3873 return 0;
3874 }
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888 static int __igb_open(struct net_device *netdev, bool resuming)
3889 {
3890 struct igb_adapter *adapter = netdev_priv(netdev);
3891 struct e1000_hw *hw = &adapter->hw;
3892 struct pci_dev *pdev = adapter->pdev;
3893 int err;
3894 int i;
3895
3896
3897 if (test_bit(__IGB_TESTING, &adapter->state)) {
3898 WARN_ON(resuming);
3899 return -EBUSY;
3900 }
3901
3902 if (!resuming)
3903 pm_runtime_get_sync(&pdev->dev);
3904
3905 netif_carrier_off(netdev);
3906
3907
3908 err = igb_setup_all_tx_resources(adapter);
3909 if (err)
3910 goto err_setup_tx;
3911
3912
3913 err = igb_setup_all_rx_resources(adapter);
3914 if (err)
3915 goto err_setup_rx;
3916
3917 igb_power_up_link(adapter);
3918
3919
3920
3921
3922
3923
3924 igb_configure(adapter);
3925
3926 err = igb_request_irq(adapter);
3927 if (err)
3928 goto err_req_irq;
3929
3930
3931 err = netif_set_real_num_tx_queues(adapter->netdev,
3932 adapter->num_tx_queues);
3933 if (err)
3934 goto err_set_queues;
3935
3936 err = netif_set_real_num_rx_queues(adapter->netdev,
3937 adapter->num_rx_queues);
3938 if (err)
3939 goto err_set_queues;
3940
3941
3942 clear_bit(__IGB_DOWN, &adapter->state);
3943
3944 for (i = 0; i < adapter->num_q_vectors; i++)
3945 napi_enable(&(adapter->q_vector[i]->napi));
3946
3947
3948 rd32(E1000_TSICR);
3949 rd32(E1000_ICR);
3950
3951 igb_irq_enable(adapter);
3952
3953
3954 if (adapter->vfs_allocated_count) {
3955 u32 reg_data = rd32(E1000_CTRL_EXT);
3956
3957 reg_data |= E1000_CTRL_EXT_PFRSTD;
3958 wr32(E1000_CTRL_EXT, reg_data);
3959 }
3960
3961 netif_tx_start_all_queues(netdev);
3962
3963 if (!resuming)
3964 pm_runtime_put(&pdev->dev);
3965
3966
3967 hw->mac.get_link_status = 1;
3968 schedule_work(&adapter->watchdog_task);
3969
3970 return 0;
3971
3972 err_set_queues:
3973 igb_free_irq(adapter);
3974 err_req_irq:
3975 igb_release_hw_control(adapter);
3976 igb_power_down_link(adapter);
3977 igb_free_all_rx_resources(adapter);
3978 err_setup_rx:
3979 igb_free_all_tx_resources(adapter);
3980 err_setup_tx:
3981 igb_reset(adapter);
3982 if (!resuming)
3983 pm_runtime_put(&pdev->dev);
3984
3985 return err;
3986 }
3987
3988 int igb_open(struct net_device *netdev)
3989 {
3990 return __igb_open(netdev, false);
3991 }
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004 static int __igb_close(struct net_device *netdev, bool suspending)
4005 {
4006 struct igb_adapter *adapter = netdev_priv(netdev);
4007 struct pci_dev *pdev = adapter->pdev;
4008
4009 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4010
4011 if (!suspending)
4012 pm_runtime_get_sync(&pdev->dev);
4013
4014 igb_down(adapter);
4015 igb_free_irq(adapter);
4016
4017 igb_free_all_tx_resources(adapter);
4018 igb_free_all_rx_resources(adapter);
4019
4020 if (!suspending)
4021 pm_runtime_put_sync(&pdev->dev);
4022 return 0;
4023 }
4024
4025 int igb_close(struct net_device *netdev)
4026 {
4027 if (netif_device_present(netdev) || netdev->dismantle)
4028 return __igb_close(netdev, false);
4029 return 0;
4030 }
4031
4032
4033
4034
4035
4036
4037
4038 int igb_setup_tx_resources(struct igb_ring *tx_ring)
4039 {
4040 struct device *dev = tx_ring->dev;
4041 int size;
4042
4043 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4044
4045 tx_ring->tx_buffer_info = vmalloc(size);
4046 if (!tx_ring->tx_buffer_info)
4047 goto err;
4048
4049
4050 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4051 tx_ring->size = ALIGN(tx_ring->size, 4096);
4052
4053 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4054 &tx_ring->dma, GFP_KERNEL);
4055 if (!tx_ring->desc)
4056 goto err;
4057
4058 tx_ring->next_to_use = 0;
4059 tx_ring->next_to_clean = 0;
4060
4061 return 0;
4062
4063 err:
4064 vfree(tx_ring->tx_buffer_info);
4065 tx_ring->tx_buffer_info = NULL;
4066 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4067 return -ENOMEM;
4068 }
4069
4070
4071
4072
4073
4074
4075
4076
4077 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4078 {
4079 struct pci_dev *pdev = adapter->pdev;
4080 int i, err = 0;
4081
4082 for (i = 0; i < adapter->num_tx_queues; i++) {
4083 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4084 if (err) {
4085 dev_err(&pdev->dev,
4086 "Allocation for Tx Queue %u failed\n", i);
4087 for (i--; i >= 0; i--)
4088 igb_free_tx_resources(adapter->tx_ring[i]);
4089 break;
4090 }
4091 }
4092
4093 return err;
4094 }
4095
4096
4097
4098
4099
4100 void igb_setup_tctl(struct igb_adapter *adapter)
4101 {
4102 struct e1000_hw *hw = &adapter->hw;
4103 u32 tctl;
4104
4105
4106 wr32(E1000_TXDCTL(0), 0);
4107
4108
4109 tctl = rd32(E1000_TCTL);
4110 tctl &= ~E1000_TCTL_CT;
4111 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4112 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4113
4114 igb_config_collision_dist(hw);
4115
4116
4117 tctl |= E1000_TCTL_EN;
4118
4119 wr32(E1000_TCTL, tctl);
4120 }
4121
4122
4123
4124
4125
4126
4127
4128
4129 void igb_configure_tx_ring(struct igb_adapter *adapter,
4130 struct igb_ring *ring)
4131 {
4132 struct e1000_hw *hw = &adapter->hw;
4133 u32 txdctl = 0;
4134 u64 tdba = ring->dma;
4135 int reg_idx = ring->reg_idx;
4136
4137 wr32(E1000_TDLEN(reg_idx),
4138 ring->count * sizeof(union e1000_adv_tx_desc));
4139 wr32(E1000_TDBAL(reg_idx),
4140 tdba & 0x00000000ffffffffULL);
4141 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4142
4143 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4144 wr32(E1000_TDH(reg_idx), 0);
4145 writel(0, ring->tail);
4146
4147 txdctl |= IGB_TX_PTHRESH;
4148 txdctl |= IGB_TX_HTHRESH << 8;
4149 txdctl |= IGB_TX_WTHRESH << 16;
4150
4151
4152 memset(ring->tx_buffer_info, 0,
4153 sizeof(struct igb_tx_buffer) * ring->count);
4154
4155 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4156 wr32(E1000_TXDCTL(reg_idx), txdctl);
4157 }
4158
4159
4160
4161
4162
4163
4164
4165 static void igb_configure_tx(struct igb_adapter *adapter)
4166 {
4167 struct e1000_hw *hw = &adapter->hw;
4168 int i;
4169
4170
4171 for (i = 0; i < adapter->num_tx_queues; i++)
4172 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4173
4174 wrfl();
4175 usleep_range(10000, 20000);
4176
4177 for (i = 0; i < adapter->num_tx_queues; i++)
4178 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4179 }
4180
4181
4182
4183
4184
4185
4186
4187 int igb_setup_rx_resources(struct igb_ring *rx_ring)
4188 {
4189 struct device *dev = rx_ring->dev;
4190 int size;
4191
4192 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4193
4194 rx_ring->rx_buffer_info = vmalloc(size);
4195 if (!rx_ring->rx_buffer_info)
4196 goto err;
4197
4198
4199 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4200 rx_ring->size = ALIGN(rx_ring->size, 4096);
4201
4202 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4203 &rx_ring->dma, GFP_KERNEL);
4204 if (!rx_ring->desc)
4205 goto err;
4206
4207 rx_ring->next_to_alloc = 0;
4208 rx_ring->next_to_clean = 0;
4209 rx_ring->next_to_use = 0;
4210
4211 return 0;
4212
4213 err:
4214 vfree(rx_ring->rx_buffer_info);
4215 rx_ring->rx_buffer_info = NULL;
4216 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4217 return -ENOMEM;
4218 }
4219
4220
4221
4222
4223
4224
4225
4226
4227 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4228 {
4229 struct pci_dev *pdev = adapter->pdev;
4230 int i, err = 0;
4231
4232 for (i = 0; i < adapter->num_rx_queues; i++) {
4233 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4234 if (err) {
4235 dev_err(&pdev->dev,
4236 "Allocation for Rx Queue %u failed\n", i);
4237 for (i--; i >= 0; i--)
4238 igb_free_rx_resources(adapter->rx_ring[i]);
4239 break;
4240 }
4241 }
4242
4243 return err;
4244 }
4245
4246
4247
4248
4249
4250 static void igb_setup_mrqc(struct igb_adapter *adapter)
4251 {
4252 struct e1000_hw *hw = &adapter->hw;
4253 u32 mrqc, rxcsum;
4254 u32 j, num_rx_queues;
4255 u32 rss_key[10];
4256
4257 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4258 for (j = 0; j < 10; j++)
4259 wr32(E1000_RSSRK(j), rss_key[j]);
4260
4261 num_rx_queues = adapter->rss_queues;
4262
4263 switch (hw->mac.type) {
4264 case e1000_82576:
4265
4266 if (adapter->vfs_allocated_count)
4267 num_rx_queues = 2;
4268 break;
4269 default:
4270 break;
4271 }
4272
4273 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4274 for (j = 0; j < IGB_RETA_SIZE; j++)
4275 adapter->rss_indir_tbl[j] =
4276 (j * num_rx_queues) / IGB_RETA_SIZE;
4277 adapter->rss_indir_tbl_init = num_rx_queues;
4278 }
4279 igb_write_rss_indir_tbl(adapter);
4280
4281
4282
4283
4284
4285 rxcsum = rd32(E1000_RXCSUM);
4286 rxcsum |= E1000_RXCSUM_PCSD;
4287
4288 if (adapter->hw.mac.type >= e1000_82576)
4289
4290 rxcsum |= E1000_RXCSUM_CRCOFL;
4291
4292
4293 wr32(E1000_RXCSUM, rxcsum);
4294
4295
4296
4297
4298 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4299 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4300 E1000_MRQC_RSS_FIELD_IPV6 |
4301 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4302 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4303
4304 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4305 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4306 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4307 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4308
4309
4310
4311
4312
4313 if (adapter->vfs_allocated_count) {
4314 if (hw->mac.type > e1000_82575) {
4315
4316 u32 vtctl = rd32(E1000_VT_CTL);
4317
4318 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4319 E1000_VT_CTL_DISABLE_DEF_POOL);
4320 vtctl |= adapter->vfs_allocated_count <<
4321 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4322 wr32(E1000_VT_CTL, vtctl);
4323 }
4324 if (adapter->rss_queues > 1)
4325 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4326 else
4327 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4328 } else {
4329 if (hw->mac.type != e1000_i211)
4330 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4331 }
4332 igb_vmm_control(adapter);
4333
4334 wr32(E1000_MRQC, mrqc);
4335 }
4336
4337
4338
4339
4340
4341 void igb_setup_rctl(struct igb_adapter *adapter)
4342 {
4343 struct e1000_hw *hw = &adapter->hw;
4344 u32 rctl;
4345
4346 rctl = rd32(E1000_RCTL);
4347
4348 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4349 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4350
4351 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4352 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4353
4354
4355
4356
4357
4358 rctl |= E1000_RCTL_SECRC;
4359
4360
4361 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4362
4363
4364 rctl |= E1000_RCTL_LPE;
4365
4366
4367 wr32(E1000_RXDCTL(0), 0);
4368
4369
4370
4371
4372
4373 if (adapter->vfs_allocated_count) {
4374
4375 wr32(E1000_QDE, ALL_QUEUES);
4376 }
4377
4378
4379 if (adapter->netdev->features & NETIF_F_RXALL) {
4380
4381
4382
4383 rctl |= (E1000_RCTL_SBP |
4384 E1000_RCTL_BAM |
4385 E1000_RCTL_PMCF);
4386
4387 rctl &= ~(E1000_RCTL_DPF |
4388 E1000_RCTL_CFIEN);
4389
4390
4391
4392 }
4393
4394 wr32(E1000_RCTL, rctl);
4395 }
4396
4397 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4398 int vfn)
4399 {
4400 struct e1000_hw *hw = &adapter->hw;
4401 u32 vmolr;
4402
4403 if (size > MAX_JUMBO_FRAME_SIZE)
4404 size = MAX_JUMBO_FRAME_SIZE;
4405
4406 vmolr = rd32(E1000_VMOLR(vfn));
4407 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4408 vmolr |= size | E1000_VMOLR_LPE;
4409 wr32(E1000_VMOLR(vfn), vmolr);
4410
4411 return 0;
4412 }
4413
4414 static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4415 int vfn, bool enable)
4416 {
4417 struct e1000_hw *hw = &adapter->hw;
4418 u32 val, reg;
4419
4420 if (hw->mac.type < e1000_82576)
4421 return;
4422
4423 if (hw->mac.type == e1000_i350)
4424 reg = E1000_DVMOLR(vfn);
4425 else
4426 reg = E1000_VMOLR(vfn);
4427
4428 val = rd32(reg);
4429 if (enable)
4430 val |= E1000_VMOLR_STRVLAN;
4431 else
4432 val &= ~(E1000_VMOLR_STRVLAN);
4433 wr32(reg, val);
4434 }
4435
4436 static inline void igb_set_vmolr(struct igb_adapter *adapter,
4437 int vfn, bool aupe)
4438 {
4439 struct e1000_hw *hw = &adapter->hw;
4440 u32 vmolr;
4441
4442
4443
4444
4445 if (hw->mac.type < e1000_82576)
4446 return;
4447
4448 vmolr = rd32(E1000_VMOLR(vfn));
4449 if (aupe)
4450 vmolr |= E1000_VMOLR_AUPE;
4451 else
4452 vmolr &= ~(E1000_VMOLR_AUPE);
4453
4454
4455 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4456
4457 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4458 vmolr |= E1000_VMOLR_RSSE;
4459
4460
4461
4462 if (vfn <= adapter->vfs_allocated_count)
4463 vmolr |= E1000_VMOLR_BAM;
4464
4465 wr32(E1000_VMOLR(vfn), vmolr);
4466 }
4467
4468
4469
4470
4471
4472
4473
4474
4475 void igb_configure_rx_ring(struct igb_adapter *adapter,
4476 struct igb_ring *ring)
4477 {
4478 struct e1000_hw *hw = &adapter->hw;
4479 union e1000_adv_rx_desc *rx_desc;
4480 u64 rdba = ring->dma;
4481 int reg_idx = ring->reg_idx;
4482 u32 srrctl = 0, rxdctl = 0;
4483
4484
4485 wr32(E1000_RXDCTL(reg_idx), 0);
4486
4487
4488 wr32(E1000_RDBAL(reg_idx),
4489 rdba & 0x00000000ffffffffULL);
4490 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4491 wr32(E1000_RDLEN(reg_idx),
4492 ring->count * sizeof(union e1000_adv_rx_desc));
4493
4494
4495 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4496 wr32(E1000_RDH(reg_idx), 0);
4497 writel(0, ring->tail);
4498
4499
4500 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4501 if (ring_uses_large_buffer(ring))
4502 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4503 else
4504 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4505 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4506 if (hw->mac.type >= e1000_82580)
4507 srrctl |= E1000_SRRCTL_TIMESTAMP;
4508
4509 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4510 srrctl |= E1000_SRRCTL_DROP_EN;
4511
4512 wr32(E1000_SRRCTL(reg_idx), srrctl);
4513
4514
4515 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4516
4517 rxdctl |= IGB_RX_PTHRESH;
4518 rxdctl |= IGB_RX_HTHRESH << 8;
4519 rxdctl |= IGB_RX_WTHRESH << 16;
4520
4521
4522 memset(ring->rx_buffer_info, 0,
4523 sizeof(struct igb_rx_buffer) * ring->count);
4524
4525
4526 rx_desc = IGB_RX_DESC(ring, 0);
4527 rx_desc->wb.upper.length = 0;
4528
4529
4530 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4531 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4532 }
4533
4534 static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4535 struct igb_ring *rx_ring)
4536 {
4537
4538 clear_ring_build_skb_enabled(rx_ring);
4539 clear_ring_uses_large_buffer(rx_ring);
4540
4541 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4542 return;
4543
4544 set_ring_build_skb_enabled(rx_ring);
4545
4546 #if (PAGE_SIZE < 8192)
4547 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4548 return;
4549
4550 set_ring_uses_large_buffer(rx_ring);
4551 #endif
4552 }
4553
4554
4555
4556
4557
4558
4559
4560 static void igb_configure_rx(struct igb_adapter *adapter)
4561 {
4562 int i;
4563
4564
4565 igb_set_default_mac_filter(adapter);
4566
4567
4568
4569
4570 for (i = 0; i < adapter->num_rx_queues; i++) {
4571 struct igb_ring *rx_ring = adapter->rx_ring[i];
4572
4573 igb_set_rx_buffer_len(adapter, rx_ring);
4574 igb_configure_rx_ring(adapter, rx_ring);
4575 }
4576 }
4577
4578
4579
4580
4581
4582
4583
4584 void igb_free_tx_resources(struct igb_ring *tx_ring)
4585 {
4586 igb_clean_tx_ring(tx_ring);
4587
4588 vfree(tx_ring->tx_buffer_info);
4589 tx_ring->tx_buffer_info = NULL;
4590
4591
4592 if (!tx_ring->desc)
4593 return;
4594
4595 dma_free_coherent(tx_ring->dev, tx_ring->size,
4596 tx_ring->desc, tx_ring->dma);
4597
4598 tx_ring->desc = NULL;
4599 }
4600
4601
4602
4603
4604
4605
4606
4607 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4608 {
4609 int i;
4610
4611 for (i = 0; i < adapter->num_tx_queues; i++)
4612 if (adapter->tx_ring[i])
4613 igb_free_tx_resources(adapter->tx_ring[i]);
4614 }
4615
4616
4617
4618
4619
4620 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4621 {
4622 u16 i = tx_ring->next_to_clean;
4623 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4624
4625 while (i != tx_ring->next_to_use) {
4626 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4627
4628
4629 dev_kfree_skb_any(tx_buffer->skb);
4630
4631
4632 dma_unmap_single(tx_ring->dev,
4633 dma_unmap_addr(tx_buffer, dma),
4634 dma_unmap_len(tx_buffer, len),
4635 DMA_TO_DEVICE);
4636
4637
4638 eop_desc = tx_buffer->next_to_watch;
4639 tx_desc = IGB_TX_DESC(tx_ring, i);
4640
4641
4642 while (tx_desc != eop_desc) {
4643 tx_buffer++;
4644 tx_desc++;
4645 i++;
4646 if (unlikely(i == tx_ring->count)) {
4647 i = 0;
4648 tx_buffer = tx_ring->tx_buffer_info;
4649 tx_desc = IGB_TX_DESC(tx_ring, 0);
4650 }
4651
4652
4653 if (dma_unmap_len(tx_buffer, len))
4654 dma_unmap_page(tx_ring->dev,
4655 dma_unmap_addr(tx_buffer, dma),
4656 dma_unmap_len(tx_buffer, len),
4657 DMA_TO_DEVICE);
4658 }
4659
4660
4661 tx_buffer++;
4662 i++;
4663 if (unlikely(i == tx_ring->count)) {
4664 i = 0;
4665 tx_buffer = tx_ring->tx_buffer_info;
4666 }
4667 }
4668
4669
4670 netdev_tx_reset_queue(txring_txq(tx_ring));
4671
4672
4673 tx_ring->next_to_use = 0;
4674 tx_ring->next_to_clean = 0;
4675 }
4676
4677
4678
4679
4680
4681 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4682 {
4683 int i;
4684
4685 for (i = 0; i < adapter->num_tx_queues; i++)
4686 if (adapter->tx_ring[i])
4687 igb_clean_tx_ring(adapter->tx_ring[i]);
4688 }
4689
4690
4691
4692
4693
4694
4695
4696 void igb_free_rx_resources(struct igb_ring *rx_ring)
4697 {
4698 igb_clean_rx_ring(rx_ring);
4699
4700 vfree(rx_ring->rx_buffer_info);
4701 rx_ring->rx_buffer_info = NULL;
4702
4703
4704 if (!rx_ring->desc)
4705 return;
4706
4707 dma_free_coherent(rx_ring->dev, rx_ring->size,
4708 rx_ring->desc, rx_ring->dma);
4709
4710 rx_ring->desc = NULL;
4711 }
4712
4713
4714
4715
4716
4717
4718
4719 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4720 {
4721 int i;
4722
4723 for (i = 0; i < adapter->num_rx_queues; i++)
4724 if (adapter->rx_ring[i])
4725 igb_free_rx_resources(adapter->rx_ring[i]);
4726 }
4727
4728
4729
4730
4731
4732 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4733 {
4734 u16 i = rx_ring->next_to_clean;
4735
4736 dev_kfree_skb(rx_ring->skb);
4737 rx_ring->skb = NULL;
4738
4739
4740 while (i != rx_ring->next_to_alloc) {
4741 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4742
4743
4744
4745
4746 dma_sync_single_range_for_cpu(rx_ring->dev,
4747 buffer_info->dma,
4748 buffer_info->page_offset,
4749 igb_rx_bufsz(rx_ring),
4750 DMA_FROM_DEVICE);
4751
4752
4753 dma_unmap_page_attrs(rx_ring->dev,
4754 buffer_info->dma,
4755 igb_rx_pg_size(rx_ring),
4756 DMA_FROM_DEVICE,
4757 IGB_RX_DMA_ATTR);
4758 __page_frag_cache_drain(buffer_info->page,
4759 buffer_info->pagecnt_bias);
4760
4761 i++;
4762 if (i == rx_ring->count)
4763 i = 0;
4764 }
4765
4766 rx_ring->next_to_alloc = 0;
4767 rx_ring->next_to_clean = 0;
4768 rx_ring->next_to_use = 0;
4769 }
4770
4771
4772
4773
4774
4775 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4776 {
4777 int i;
4778
4779 for (i = 0; i < adapter->num_rx_queues; i++)
4780 if (adapter->rx_ring[i])
4781 igb_clean_rx_ring(adapter->rx_ring[i]);
4782 }
4783
4784
4785
4786
4787
4788
4789
4790
4791 static int igb_set_mac(struct net_device *netdev, void *p)
4792 {
4793 struct igb_adapter *adapter = netdev_priv(netdev);
4794 struct e1000_hw *hw = &adapter->hw;
4795 struct sockaddr *addr = p;
4796
4797 if (!is_valid_ether_addr(addr->sa_data))
4798 return -EADDRNOTAVAIL;
4799
4800 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4801 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4802
4803
4804 igb_set_default_mac_filter(adapter);
4805
4806 return 0;
4807 }
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818 static int igb_write_mc_addr_list(struct net_device *netdev)
4819 {
4820 struct igb_adapter *adapter = netdev_priv(netdev);
4821 struct e1000_hw *hw = &adapter->hw;
4822 struct netdev_hw_addr *ha;
4823 u8 *mta_list;
4824 int i;
4825
4826 if (netdev_mc_empty(netdev)) {
4827
4828 igb_update_mc_addr_list(hw, NULL, 0);
4829 igb_restore_vf_multicasts(adapter);
4830 return 0;
4831 }
4832
4833 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4834 if (!mta_list)
4835 return -ENOMEM;
4836
4837
4838 i = 0;
4839 netdev_for_each_mc_addr(ha, netdev)
4840 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4841
4842 igb_update_mc_addr_list(hw, mta_list, i);
4843 kfree(mta_list);
4844
4845 return netdev_mc_count(netdev);
4846 }
4847
4848 static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4849 {
4850 struct e1000_hw *hw = &adapter->hw;
4851 u32 i, pf_id;
4852
4853 switch (hw->mac.type) {
4854 case e1000_i210:
4855 case e1000_i211:
4856 case e1000_i350:
4857
4858 if (adapter->netdev->features & NETIF_F_NTUPLE)
4859 break;
4860
4861 case e1000_82576:
4862 case e1000_82580:
4863 case e1000_i354:
4864
4865 if (adapter->vfs_allocated_count)
4866 break;
4867
4868 default:
4869 return 1;
4870 }
4871
4872
4873 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4874 return 0;
4875
4876 if (!adapter->vfs_allocated_count)
4877 goto set_vfta;
4878
4879
4880 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4881
4882 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4883 u32 vlvf = rd32(E1000_VLVF(i));
4884
4885 vlvf |= BIT(pf_id);
4886 wr32(E1000_VLVF(i), vlvf);
4887 }
4888
4889 set_vfta:
4890
4891 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4892 hw->mac.ops.write_vfta(hw, i, ~0U);
4893
4894
4895 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4896
4897 return 0;
4898 }
4899
4900 #define VFTA_BLOCK_SIZE 8
4901 static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4902 {
4903 struct e1000_hw *hw = &adapter->hw;
4904 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4905 u32 vid_start = vfta_offset * 32;
4906 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4907 u32 i, vid, word, bits, pf_id;
4908
4909
4910 vid = adapter->mng_vlan_id;
4911 if (vid >= vid_start && vid < vid_end)
4912 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4913
4914 if (!adapter->vfs_allocated_count)
4915 goto set_vfta;
4916
4917 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4918
4919 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4920 u32 vlvf = rd32(E1000_VLVF(i));
4921
4922
4923 vid = vlvf & VLAN_VID_MASK;
4924
4925
4926 if (vid < vid_start || vid >= vid_end)
4927 continue;
4928
4929 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4930
4931 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4932
4933
4934 if (test_bit(vid, adapter->active_vlans))
4935 continue;
4936 }
4937
4938
4939 bits = ~BIT(pf_id);
4940 bits &= rd32(E1000_VLVF(i));
4941 wr32(E1000_VLVF(i), bits);
4942 }
4943
4944 set_vfta:
4945
4946 for (i = VFTA_BLOCK_SIZE; i--;) {
4947 vid = (vfta_offset + i) * 32;
4948 word = vid / BITS_PER_LONG;
4949 bits = vid % BITS_PER_LONG;
4950
4951 vfta[i] |= adapter->active_vlans[word] >> bits;
4952
4953 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4954 }
4955 }
4956
4957 static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4958 {
4959 u32 i;
4960
4961
4962 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4963 return;
4964
4965
4966 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4967
4968 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4969 igb_scrub_vfta(adapter, i);
4970 }
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981 static void igb_set_rx_mode(struct net_device *netdev)
4982 {
4983 struct igb_adapter *adapter = netdev_priv(netdev);
4984 struct e1000_hw *hw = &adapter->hw;
4985 unsigned int vfn = adapter->vfs_allocated_count;
4986 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4987 int count;
4988
4989
4990 if (netdev->flags & IFF_PROMISC) {
4991 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
4992 vmolr |= E1000_VMOLR_MPME;
4993
4994
4995 if (hw->mac.type == e1000_82576)
4996 vmolr |= E1000_VMOLR_ROPE;
4997 } else {
4998 if (netdev->flags & IFF_ALLMULTI) {
4999 rctl |= E1000_RCTL_MPE;
5000 vmolr |= E1000_VMOLR_MPME;
5001 } else {
5002
5003
5004
5005
5006 count = igb_write_mc_addr_list(netdev);
5007 if (count < 0) {
5008 rctl |= E1000_RCTL_MPE;
5009 vmolr |= E1000_VMOLR_MPME;
5010 } else if (count) {
5011 vmolr |= E1000_VMOLR_ROMPE;
5012 }
5013 }
5014 }
5015
5016
5017
5018
5019
5020 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5021 rctl |= E1000_RCTL_UPE;
5022 vmolr |= E1000_VMOLR_ROPE;
5023 }
5024
5025
5026 rctl |= E1000_RCTL_VFE;
5027
5028
5029 if ((netdev->flags & IFF_PROMISC) ||
5030 (netdev->features & NETIF_F_RXALL)) {
5031
5032 if (igb_vlan_promisc_enable(adapter))
5033 rctl &= ~E1000_RCTL_VFE;
5034 } else {
5035 igb_vlan_promisc_disable(adapter);
5036 }
5037
5038
5039 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5040 E1000_RCTL_VFE);
5041 wr32(E1000_RCTL, rctl);
5042
5043 #if (PAGE_SIZE < 8192)
5044 if (!adapter->vfs_allocated_count) {
5045 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5046 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5047 }
5048 #endif
5049 wr32(E1000_RLPML, rlpml);
5050
5051
5052
5053
5054
5055
5056 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5057 return;
5058
5059
5060 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5061
5062 vmolr |= rd32(E1000_VMOLR(vfn)) &
5063 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5064
5065
5066 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5067 #if (PAGE_SIZE < 8192)
5068 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5069 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5070 else
5071 #endif
5072 vmolr |= MAX_JUMBO_FRAME_SIZE;
5073 vmolr |= E1000_VMOLR_LPE;
5074
5075 wr32(E1000_VMOLR(vfn), vmolr);
5076
5077 igb_restore_vf_multicasts(adapter);
5078 }
5079
5080 static void igb_check_wvbr(struct igb_adapter *adapter)
5081 {
5082 struct e1000_hw *hw = &adapter->hw;
5083 u32 wvbr = 0;
5084
5085 switch (hw->mac.type) {
5086 case e1000_82576:
5087 case e1000_i350:
5088 wvbr = rd32(E1000_WVBR);
5089 if (!wvbr)
5090 return;
5091 break;
5092 default:
5093 break;
5094 }
5095
5096 adapter->wvbr |= wvbr;
5097 }
5098
5099 #define IGB_STAGGERED_QUEUE_OFFSET 8
5100
5101 static void igb_spoof_check(struct igb_adapter *adapter)
5102 {
5103 int j;
5104
5105 if (!adapter->wvbr)
5106 return;
5107
5108 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5109 if (adapter->wvbr & BIT(j) ||
5110 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5111 dev_warn(&adapter->pdev->dev,
5112 "Spoof event(s) detected on VF %d\n", j);
5113 adapter->wvbr &=
5114 ~(BIT(j) |
5115 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5116 }
5117 }
5118 }
5119
5120
5121
5122
5123 static void igb_update_phy_info(struct timer_list *t)
5124 {
5125 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5126 igb_get_phy_info(&adapter->hw);
5127 }
5128
5129
5130
5131
5132
5133 bool igb_has_link(struct igb_adapter *adapter)
5134 {
5135 struct e1000_hw *hw = &adapter->hw;
5136 bool link_active = false;
5137
5138
5139
5140
5141
5142
5143 switch (hw->phy.media_type) {
5144 case e1000_media_type_copper:
5145 if (!hw->mac.get_link_status)
5146 return true;
5147
5148 case e1000_media_type_internal_serdes:
5149 hw->mac.ops.check_for_link(hw);
5150 link_active = !hw->mac.get_link_status;
5151 break;
5152 default:
5153 case e1000_media_type_unknown:
5154 break;
5155 }
5156
5157 if (((hw->mac.type == e1000_i210) ||
5158 (hw->mac.type == e1000_i211)) &&
5159 (hw->phy.id == I210_I_PHY_ID)) {
5160 if (!netif_carrier_ok(adapter->netdev)) {
5161 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5162 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5163 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5164 adapter->link_check_timeout = jiffies;
5165 }
5166 }
5167
5168 return link_active;
5169 }
5170
5171 static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5172 {
5173 bool ret = false;
5174 u32 ctrl_ext, thstat;
5175
5176
5177 if (hw->mac.type == e1000_i350) {
5178 thstat = rd32(E1000_THSTAT);
5179 ctrl_ext = rd32(E1000_CTRL_EXT);
5180
5181 if ((hw->phy.media_type == e1000_media_type_copper) &&
5182 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5183 ret = !!(thstat & event);
5184 }
5185
5186 return ret;
5187 }
5188
5189
5190
5191
5192
5193
5194 static void igb_check_lvmmc(struct igb_adapter *adapter)
5195 {
5196 struct e1000_hw *hw = &adapter->hw;
5197 u32 lvmmc;
5198
5199 lvmmc = rd32(E1000_LVMMC);
5200 if (lvmmc) {
5201 if (unlikely(net_ratelimit())) {
5202 netdev_warn(adapter->netdev,
5203 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5204 lvmmc);
5205 }
5206 }
5207 }
5208
5209
5210
5211
5212
5213 static void igb_watchdog(struct timer_list *t)
5214 {
5215 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5216
5217 schedule_work(&adapter->watchdog_task);
5218 }
5219
5220 static void igb_watchdog_task(struct work_struct *work)
5221 {
5222 struct igb_adapter *adapter = container_of(work,
5223 struct igb_adapter,
5224 watchdog_task);
5225 struct e1000_hw *hw = &adapter->hw;
5226 struct e1000_phy_info *phy = &hw->phy;
5227 struct net_device *netdev = adapter->netdev;
5228 u32 link;
5229 int i;
5230 u32 connsw;
5231 u16 phy_data, retry_count = 20;
5232
5233 link = igb_has_link(adapter);
5234
5235 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5236 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5237 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5238 else
5239 link = false;
5240 }
5241
5242
5243 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5244 if (hw->phy.media_type == e1000_media_type_copper) {
5245 connsw = rd32(E1000_CONNSW);
5246 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5247 link = 0;
5248 }
5249 }
5250 if (link) {
5251
5252 if (hw->dev_spec._82575.media_changed) {
5253 hw->dev_spec._82575.media_changed = false;
5254 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5255 igb_reset(adapter);
5256 }
5257
5258 pm_runtime_resume(netdev->dev.parent);
5259
5260 if (!netif_carrier_ok(netdev)) {
5261 u32 ctrl;
5262
5263 hw->mac.ops.get_speed_and_duplex(hw,
5264 &adapter->link_speed,
5265 &adapter->link_duplex);
5266
5267 ctrl = rd32(E1000_CTRL);
5268
5269 netdev_info(netdev,
5270 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5271 netdev->name,
5272 adapter->link_speed,
5273 adapter->link_duplex == FULL_DUPLEX ?
5274 "Full" : "Half",
5275 (ctrl & E1000_CTRL_TFCE) &&
5276 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5277 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5278 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5279
5280
5281 if ((adapter->flags & IGB_FLAG_EEE) &&
5282 (adapter->link_duplex == HALF_DUPLEX)) {
5283 dev_info(&adapter->pdev->dev,
5284 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5285 adapter->hw.dev_spec._82575.eee_disable = true;
5286 adapter->flags &= ~IGB_FLAG_EEE;
5287 }
5288
5289
5290 igb_check_downshift(hw);
5291 if (phy->speed_downgraded)
5292 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5293
5294
5295 if (igb_thermal_sensor_event(hw,
5296 E1000_THSTAT_LINK_THROTTLE))
5297 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5298
5299
5300 adapter->tx_timeout_factor = 1;
5301 switch (adapter->link_speed) {
5302 case SPEED_10:
5303 adapter->tx_timeout_factor = 14;
5304 break;
5305 case SPEED_100:
5306
5307 break;
5308 }
5309
5310 if (adapter->link_speed != SPEED_1000)
5311 goto no_wait;
5312
5313
5314 retry_read_status:
5315 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5316 &phy_data)) {
5317 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5318 retry_count) {
5319 msleep(100);
5320 retry_count--;
5321 goto retry_read_status;
5322 } else if (!retry_count) {
5323 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5324 }
5325 } else {
5326 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5327 }
5328 no_wait:
5329 netif_carrier_on(netdev);
5330
5331 igb_ping_all_vfs(adapter);
5332 igb_check_vf_rate_limit(adapter);
5333
5334
5335 if (!test_bit(__IGB_DOWN, &adapter->state))
5336 mod_timer(&adapter->phy_info_timer,
5337 round_jiffies(jiffies + 2 * HZ));
5338 }
5339 } else {
5340 if (netif_carrier_ok(netdev)) {
5341 adapter->link_speed = 0;
5342 adapter->link_duplex = 0;
5343
5344
5345 if (igb_thermal_sensor_event(hw,
5346 E1000_THSTAT_PWR_DOWN)) {
5347 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5348 }
5349
5350
5351 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5352 netdev->name);
5353 netif_carrier_off(netdev);
5354
5355 igb_ping_all_vfs(adapter);
5356
5357
5358 if (!test_bit(__IGB_DOWN, &adapter->state))
5359 mod_timer(&adapter->phy_info_timer,
5360 round_jiffies(jiffies + 2 * HZ));
5361
5362
5363 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5364 igb_check_swap_media(adapter);
5365 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5366 schedule_work(&adapter->reset_task);
5367
5368 return;
5369 }
5370 }
5371 pm_schedule_suspend(netdev->dev.parent,
5372 MSEC_PER_SEC * 5);
5373
5374
5375 } else if (!netif_carrier_ok(netdev) &&
5376 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5377 igb_check_swap_media(adapter);
5378 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5379 schedule_work(&adapter->reset_task);
5380
5381 return;
5382 }
5383 }
5384 }
5385
5386 spin_lock(&adapter->stats64_lock);
5387 igb_update_stats(adapter);
5388 spin_unlock(&adapter->stats64_lock);
5389
5390 for (i = 0; i < adapter->num_tx_queues; i++) {
5391 struct igb_ring *tx_ring = adapter->tx_ring[i];
5392 if (!netif_carrier_ok(netdev)) {
5393
5394
5395
5396
5397
5398 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5399 adapter->tx_timeout_count++;
5400 schedule_work(&adapter->reset_task);
5401
5402 return;
5403 }
5404 }
5405
5406
5407 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5408 }
5409
5410
5411 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5412 u32 eics = 0;
5413
5414 for (i = 0; i < adapter->num_q_vectors; i++)
5415 eics |= adapter->q_vector[i]->eims_value;
5416 wr32(E1000_EICS, eics);
5417 } else {
5418 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5419 }
5420
5421 igb_spoof_check(adapter);
5422 igb_ptp_rx_hang(adapter);
5423 igb_ptp_tx_hang(adapter);
5424
5425
5426 if ((adapter->hw.mac.type == e1000_i350) ||
5427 (adapter->hw.mac.type == e1000_i354))
5428 igb_check_lvmmc(adapter);
5429
5430
5431 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5432 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5433 mod_timer(&adapter->watchdog_timer,
5434 round_jiffies(jiffies + HZ));
5435 else
5436 mod_timer(&adapter->watchdog_timer,
5437 round_jiffies(jiffies + 2 * HZ));
5438 }
5439 }
5440
5441 enum latency_range {
5442 lowest_latency = 0,
5443 low_latency = 1,
5444 bulk_latency = 2,
5445 latency_invalid = 255
5446 };
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463 static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5464 {
5465 int new_val = q_vector->itr_val;
5466 int avg_wire_size = 0;
5467 struct igb_adapter *adapter = q_vector->adapter;
5468 unsigned int packets;
5469
5470
5471
5472
5473 if (adapter->link_speed != SPEED_1000) {
5474 new_val = IGB_4K_ITR;
5475 goto set_itr_val;
5476 }
5477
5478 packets = q_vector->rx.total_packets;
5479 if (packets)
5480 avg_wire_size = q_vector->rx.total_bytes / packets;
5481
5482 packets = q_vector->tx.total_packets;
5483 if (packets)
5484 avg_wire_size = max_t(u32, avg_wire_size,
5485 q_vector->tx.total_bytes / packets);
5486
5487
5488 if (!avg_wire_size)
5489 goto clear_counts;
5490
5491
5492 avg_wire_size += 24;
5493
5494
5495 avg_wire_size = min(avg_wire_size, 3000);
5496
5497
5498 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5499 new_val = avg_wire_size / 3;
5500 else
5501 new_val = avg_wire_size / 2;
5502
5503
5504 if (new_val < IGB_20K_ITR &&
5505 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5506 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5507 new_val = IGB_20K_ITR;
5508
5509 set_itr_val:
5510 if (new_val != q_vector->itr_val) {
5511 q_vector->itr_val = new_val;
5512 q_vector->set_itr = 1;
5513 }
5514 clear_counts:
5515 q_vector->rx.total_bytes = 0;
5516 q_vector->rx.total_packets = 0;
5517 q_vector->tx.total_bytes = 0;
5518 q_vector->tx.total_packets = 0;
5519 }
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537 static void igb_update_itr(struct igb_q_vector *q_vector,
5538 struct igb_ring_container *ring_container)
5539 {
5540 unsigned int packets = ring_container->total_packets;
5541 unsigned int bytes = ring_container->total_bytes;
5542 u8 itrval = ring_container->itr;
5543
5544
5545 if (packets == 0)
5546 return;
5547
5548 switch (itrval) {
5549 case lowest_latency:
5550
5551 if (bytes/packets > 8000)
5552 itrval = bulk_latency;
5553 else if ((packets < 5) && (bytes > 512))
5554 itrval = low_latency;
5555 break;
5556 case low_latency:
5557 if (bytes > 10000) {
5558
5559 if (bytes/packets > 8000)
5560 itrval = bulk_latency;
5561 else if ((packets < 10) || ((bytes/packets) > 1200))
5562 itrval = bulk_latency;
5563 else if ((packets > 35))
5564 itrval = lowest_latency;
5565 } else if (bytes/packets > 2000) {
5566 itrval = bulk_latency;
5567 } else if (packets <= 2 && bytes < 512) {
5568 itrval = lowest_latency;
5569 }
5570 break;
5571 case bulk_latency:
5572 if (bytes > 25000) {
5573 if (packets > 35)
5574 itrval = low_latency;
5575 } else if (bytes < 1500) {
5576 itrval = low_latency;
5577 }
5578 break;
5579 }
5580
5581
5582 ring_container->total_bytes = 0;
5583 ring_container->total_packets = 0;
5584
5585
5586 ring_container->itr = itrval;
5587 }
5588
5589 static void igb_set_itr(struct igb_q_vector *q_vector)
5590 {
5591 struct igb_adapter *adapter = q_vector->adapter;
5592 u32 new_itr = q_vector->itr_val;
5593 u8 current_itr = 0;
5594
5595
5596 if (adapter->link_speed != SPEED_1000) {
5597 current_itr = 0;
5598 new_itr = IGB_4K_ITR;
5599 goto set_itr_now;
5600 }
5601
5602 igb_update_itr(q_vector, &q_vector->tx);
5603 igb_update_itr(q_vector, &q_vector->rx);
5604
5605 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5606
5607
5608 if (current_itr == lowest_latency &&
5609 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5610 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5611 current_itr = low_latency;
5612
5613 switch (current_itr) {
5614
5615 case lowest_latency:
5616 new_itr = IGB_70K_ITR;
5617 break;
5618 case low_latency:
5619 new_itr = IGB_20K_ITR;
5620 break;
5621 case bulk_latency:
5622 new_itr = IGB_4K_ITR;
5623 break;
5624 default:
5625 break;
5626 }
5627
5628 set_itr_now:
5629 if (new_itr != q_vector->itr_val) {
5630
5631
5632
5633
5634 new_itr = new_itr > q_vector->itr_val ?
5635 max((new_itr * q_vector->itr_val) /
5636 (new_itr + (q_vector->itr_val >> 2)),
5637 new_itr) : new_itr;
5638
5639
5640
5641
5642
5643
5644 q_vector->itr_val = new_itr;
5645 q_vector->set_itr = 1;
5646 }
5647 }
5648
5649 static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5650 struct igb_tx_buffer *first,
5651 u32 vlan_macip_lens, u32 type_tucmd,
5652 u32 mss_l4len_idx)
5653 {
5654 struct e1000_adv_tx_context_desc *context_desc;
5655 u16 i = tx_ring->next_to_use;
5656 struct timespec64 ts;
5657
5658 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5659
5660 i++;
5661 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5662
5663
5664 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5665
5666
5667 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5668 mss_l4len_idx |= tx_ring->reg_idx << 4;
5669
5670 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5671 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5672 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5673
5674
5675
5676
5677 if (tx_ring->launchtime_enable) {
5678 ts = ktime_to_timespec64(first->skb->tstamp);
5679 first->skb->tstamp = ktime_set(0, 0);
5680 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5681 } else {
5682 context_desc->seqnum_seed = 0;
5683 }
5684 }
5685
5686 static int igb_tso(struct igb_ring *tx_ring,
5687 struct igb_tx_buffer *first,
5688 u8 *hdr_len)
5689 {
5690 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5691 struct sk_buff *skb = first->skb;
5692 union {
5693 struct iphdr *v4;
5694 struct ipv6hdr *v6;
5695 unsigned char *hdr;
5696 } ip;
5697 union {
5698 struct tcphdr *tcp;
5699 unsigned char *hdr;
5700 } l4;
5701 u32 paylen, l4_offset;
5702 int err;
5703
5704 if (skb->ip_summed != CHECKSUM_PARTIAL)
5705 return 0;
5706
5707 if (!skb_is_gso(skb))
5708 return 0;
5709
5710 err = skb_cow_head(skb, 0);
5711 if (err < 0)
5712 return err;
5713
5714 ip.hdr = skb_network_header(skb);
5715 l4.hdr = skb_checksum_start(skb);
5716
5717
5718 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5719
5720
5721 if (ip.v4->version == 4) {
5722 unsigned char *csum_start = skb_checksum_start(skb);
5723 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5724
5725
5726
5727
5728 ip.v4->check = csum_fold(csum_partial(trans_start,
5729 csum_start - trans_start,
5730 0));
5731 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5732
5733 ip.v4->tot_len = 0;
5734 first->tx_flags |= IGB_TX_FLAGS_TSO |
5735 IGB_TX_FLAGS_CSUM |
5736 IGB_TX_FLAGS_IPV4;
5737 } else {
5738 ip.v6->payload_len = 0;
5739 first->tx_flags |= IGB_TX_FLAGS_TSO |
5740 IGB_TX_FLAGS_CSUM;
5741 }
5742
5743
5744 l4_offset = l4.hdr - skb->data;
5745
5746
5747 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5748
5749
5750 paylen = skb->len - l4_offset;
5751 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5752
5753
5754 first->gso_segs = skb_shinfo(skb)->gso_segs;
5755 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5756
5757
5758 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5759 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5760
5761
5762 vlan_macip_lens = l4.hdr - ip.hdr;
5763 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5764 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5765
5766 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5767 type_tucmd, mss_l4len_idx);
5768
5769 return 1;
5770 }
5771
5772 static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5773 {
5774 unsigned int offset = 0;
5775
5776 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5777
5778 return offset == skb_checksum_start_offset(skb);
5779 }
5780
5781 static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5782 {
5783 struct sk_buff *skb = first->skb;
5784 u32 vlan_macip_lens = 0;
5785 u32 type_tucmd = 0;
5786
5787 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5788 csum_failed:
5789 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5790 !tx_ring->launchtime_enable)
5791 return;
5792 goto no_csum;
5793 }
5794
5795 switch (skb->csum_offset) {
5796 case offsetof(struct tcphdr, check):
5797 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5798
5799 case offsetof(struct udphdr, check):
5800 break;
5801 case offsetof(struct sctphdr, checksum):
5802
5803 if (((first->protocol == htons(ETH_P_IP)) &&
5804 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5805 ((first->protocol == htons(ETH_P_IPV6)) &&
5806 igb_ipv6_csum_is_sctp(skb))) {
5807 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5808 break;
5809 }
5810
5811 default:
5812 skb_checksum_help(skb);
5813 goto csum_failed;
5814 }
5815
5816
5817 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5818 vlan_macip_lens = skb_checksum_start_offset(skb) -
5819 skb_network_offset(skb);
5820 no_csum:
5821 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5822 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5823
5824 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5825 }
5826
5827 #define IGB_SET_FLAG(_input, _flag, _result) \
5828 ((_flag <= _result) ? \
5829 ((u32)(_input & _flag) * (_result / _flag)) : \
5830 ((u32)(_input & _flag) / (_flag / _result)))
5831
5832 static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5833 {
5834
5835 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5836 E1000_ADVTXD_DCMD_DEXT |
5837 E1000_ADVTXD_DCMD_IFCS;
5838
5839
5840 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5841 (E1000_ADVTXD_DCMD_VLE));
5842
5843
5844 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5845 (E1000_ADVTXD_DCMD_TSE));
5846
5847
5848 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5849 (E1000_ADVTXD_MAC_TSTAMP));
5850
5851
5852 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5853
5854 return cmd_type;
5855 }
5856
5857 static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5858 union e1000_adv_tx_desc *tx_desc,
5859 u32 tx_flags, unsigned int paylen)
5860 {
5861 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5862
5863
5864 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5865 olinfo_status |= tx_ring->reg_idx << 4;
5866
5867
5868 olinfo_status |= IGB_SET_FLAG(tx_flags,
5869 IGB_TX_FLAGS_CSUM,
5870 (E1000_TXD_POPTS_TXSM << 8));
5871
5872
5873 olinfo_status |= IGB_SET_FLAG(tx_flags,
5874 IGB_TX_FLAGS_IPV4,
5875 (E1000_TXD_POPTS_IXSM << 8));
5876
5877 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5878 }
5879
5880 static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5881 {
5882 struct net_device *netdev = tx_ring->netdev;
5883
5884 netif_stop_subqueue(netdev, tx_ring->queue_index);
5885
5886
5887
5888
5889
5890 smp_mb();
5891
5892
5893
5894
5895 if (igb_desc_unused(tx_ring) < size)
5896 return -EBUSY;
5897
5898
5899 netif_wake_subqueue(netdev, tx_ring->queue_index);
5900
5901 u64_stats_update_begin(&tx_ring->tx_syncp2);
5902 tx_ring->tx_stats.restart_queue2++;
5903 u64_stats_update_end(&tx_ring->tx_syncp2);
5904
5905 return 0;
5906 }
5907
5908 static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5909 {
5910 if (igb_desc_unused(tx_ring) >= size)
5911 return 0;
5912 return __igb_maybe_stop_tx(tx_ring, size);
5913 }
5914
5915 static int igb_tx_map(struct igb_ring *tx_ring,
5916 struct igb_tx_buffer *first,
5917 const u8 hdr_len)
5918 {
5919 struct sk_buff *skb = first->skb;
5920 struct igb_tx_buffer *tx_buffer;
5921 union e1000_adv_tx_desc *tx_desc;
5922 skb_frag_t *frag;
5923 dma_addr_t dma;
5924 unsigned int data_len, size;
5925 u32 tx_flags = first->tx_flags;
5926 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5927 u16 i = tx_ring->next_to_use;
5928
5929 tx_desc = IGB_TX_DESC(tx_ring, i);
5930
5931 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5932
5933 size = skb_headlen(skb);
5934 data_len = skb->data_len;
5935
5936 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5937
5938 tx_buffer = first;
5939
5940 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5941 if (dma_mapping_error(tx_ring->dev, dma))
5942 goto dma_error;
5943
5944
5945 dma_unmap_len_set(tx_buffer, len, size);
5946 dma_unmap_addr_set(tx_buffer, dma, dma);
5947
5948 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5949
5950 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5951 tx_desc->read.cmd_type_len =
5952 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5953
5954 i++;
5955 tx_desc++;
5956 if (i == tx_ring->count) {
5957 tx_desc = IGB_TX_DESC(tx_ring, 0);
5958 i = 0;
5959 }
5960 tx_desc->read.olinfo_status = 0;
5961
5962 dma += IGB_MAX_DATA_PER_TXD;
5963 size -= IGB_MAX_DATA_PER_TXD;
5964
5965 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5966 }
5967
5968 if (likely(!data_len))
5969 break;
5970
5971 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5972
5973 i++;
5974 tx_desc++;
5975 if (i == tx_ring->count) {
5976 tx_desc = IGB_TX_DESC(tx_ring, 0);
5977 i = 0;
5978 }
5979 tx_desc->read.olinfo_status = 0;
5980
5981 size = skb_frag_size(frag);
5982 data_len -= size;
5983
5984 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5985 size, DMA_TO_DEVICE);
5986
5987 tx_buffer = &tx_ring->tx_buffer_info[i];
5988 }
5989
5990
5991 cmd_type |= size | IGB_TXD_DCMD;
5992 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
5993
5994 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5995
5996
5997 first->time_stamp = jiffies;
5998
5999 skb_tx_timestamp(skb);
6000
6001
6002
6003
6004
6005
6006
6007
6008 dma_wmb();
6009
6010
6011 first->next_to_watch = tx_desc;
6012
6013 i++;
6014 if (i == tx_ring->count)
6015 i = 0;
6016
6017 tx_ring->next_to_use = i;
6018
6019
6020 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6021
6022 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6023 writel(i, tx_ring->tail);
6024 }
6025 return 0;
6026
6027 dma_error:
6028 dev_err(tx_ring->dev, "TX DMA map failed\n");
6029 tx_buffer = &tx_ring->tx_buffer_info[i];
6030
6031
6032 while (tx_buffer != first) {
6033 if (dma_unmap_len(tx_buffer, len))
6034 dma_unmap_page(tx_ring->dev,
6035 dma_unmap_addr(tx_buffer, dma),
6036 dma_unmap_len(tx_buffer, len),
6037 DMA_TO_DEVICE);
6038 dma_unmap_len_set(tx_buffer, len, 0);
6039
6040 if (i-- == 0)
6041 i += tx_ring->count;
6042 tx_buffer = &tx_ring->tx_buffer_info[i];
6043 }
6044
6045 if (dma_unmap_len(tx_buffer, len))
6046 dma_unmap_single(tx_ring->dev,
6047 dma_unmap_addr(tx_buffer, dma),
6048 dma_unmap_len(tx_buffer, len),
6049 DMA_TO_DEVICE);
6050 dma_unmap_len_set(tx_buffer, len, 0);
6051
6052 dev_kfree_skb_any(tx_buffer->skb);
6053 tx_buffer->skb = NULL;
6054
6055 tx_ring->next_to_use = i;
6056
6057 return -1;
6058 }
6059
6060 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6061 struct igb_ring *tx_ring)
6062 {
6063 struct igb_tx_buffer *first;
6064 int tso;
6065 u32 tx_flags = 0;
6066 unsigned short f;
6067 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6068 __be16 protocol = vlan_get_protocol(skb);
6069 u8 hdr_len = 0;
6070
6071
6072
6073
6074
6075
6076
6077 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6078 count += TXD_USE_COUNT(skb_frag_size(
6079 &skb_shinfo(skb)->frags[f]));
6080
6081 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6082
6083 return NETDEV_TX_BUSY;
6084 }
6085
6086
6087 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6088 first->skb = skb;
6089 first->bytecount = skb->len;
6090 first->gso_segs = 1;
6091
6092 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6093 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6094
6095 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6096 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6097 &adapter->state)) {
6098 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6099 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6100
6101 adapter->ptp_tx_skb = skb_get(skb);
6102 adapter->ptp_tx_start = jiffies;
6103 if (adapter->hw.mac.type == e1000_82576)
6104 schedule_work(&adapter->ptp_tx_work);
6105 } else {
6106 adapter->tx_hwtstamp_skipped++;
6107 }
6108 }
6109
6110 if (skb_vlan_tag_present(skb)) {
6111 tx_flags |= IGB_TX_FLAGS_VLAN;
6112 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6113 }
6114
6115
6116 first->tx_flags = tx_flags;
6117 first->protocol = protocol;
6118
6119 tso = igb_tso(tx_ring, first, &hdr_len);
6120 if (tso < 0)
6121 goto out_drop;
6122 else if (!tso)
6123 igb_tx_csum(tx_ring, first);
6124
6125 if (igb_tx_map(tx_ring, first, hdr_len))
6126 goto cleanup_tx_tstamp;
6127
6128 return NETDEV_TX_OK;
6129
6130 out_drop:
6131 dev_kfree_skb_any(first->skb);
6132 first->skb = NULL;
6133 cleanup_tx_tstamp:
6134 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6135 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6136
6137 dev_kfree_skb_any(adapter->ptp_tx_skb);
6138 adapter->ptp_tx_skb = NULL;
6139 if (adapter->hw.mac.type == e1000_82576)
6140 cancel_work_sync(&adapter->ptp_tx_work);
6141 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6142 }
6143
6144 return NETDEV_TX_OK;
6145 }
6146
6147 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6148 struct sk_buff *skb)
6149 {
6150 unsigned int r_idx = skb->queue_mapping;
6151
6152 if (r_idx >= adapter->num_tx_queues)
6153 r_idx = r_idx % adapter->num_tx_queues;
6154
6155 return adapter->tx_ring[r_idx];
6156 }
6157
6158 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6159 struct net_device *netdev)
6160 {
6161 struct igb_adapter *adapter = netdev_priv(netdev);
6162
6163
6164
6165
6166 if (skb_put_padto(skb, 17))
6167 return NETDEV_TX_OK;
6168
6169 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6170 }
6171
6172
6173
6174
6175
6176 static void igb_tx_timeout(struct net_device *netdev)
6177 {
6178 struct igb_adapter *adapter = netdev_priv(netdev);
6179 struct e1000_hw *hw = &adapter->hw;
6180
6181
6182 adapter->tx_timeout_count++;
6183
6184 if (hw->mac.type >= e1000_82580)
6185 hw->dev_spec._82575.global_device_reset = true;
6186
6187 schedule_work(&adapter->reset_task);
6188 wr32(E1000_EICS,
6189 (adapter->eims_enable_mask & ~adapter->eims_other));
6190 }
6191
6192 static void igb_reset_task(struct work_struct *work)
6193 {
6194 struct igb_adapter *adapter;
6195 adapter = container_of(work, struct igb_adapter, reset_task);
6196
6197 igb_dump(adapter);
6198 netdev_err(adapter->netdev, "Reset adapter\n");
6199 igb_reinit_locked(adapter);
6200 }
6201
6202
6203
6204
6205
6206
6207 static void igb_get_stats64(struct net_device *netdev,
6208 struct rtnl_link_stats64 *stats)
6209 {
6210 struct igb_adapter *adapter = netdev_priv(netdev);
6211
6212 spin_lock(&adapter->stats64_lock);
6213 igb_update_stats(adapter);
6214 memcpy(stats, &adapter->stats64, sizeof(*stats));
6215 spin_unlock(&adapter->stats64_lock);
6216 }
6217
6218
6219
6220
6221
6222
6223
6224
6225 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6226 {
6227 struct igb_adapter *adapter = netdev_priv(netdev);
6228 struct pci_dev *pdev = adapter->pdev;
6229 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6230
6231
6232 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6233 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6234
6235 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6236 usleep_range(1000, 2000);
6237
6238
6239 adapter->max_frame_size = max_frame;
6240
6241 if (netif_running(netdev))
6242 igb_down(adapter);
6243
6244 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6245 netdev->mtu, new_mtu);
6246 netdev->mtu = new_mtu;
6247
6248 if (netif_running(netdev))
6249 igb_up(adapter);
6250 else
6251 igb_reset(adapter);
6252
6253 clear_bit(__IGB_RESETTING, &adapter->state);
6254
6255 return 0;
6256 }
6257
6258
6259
6260
6261
6262 void igb_update_stats(struct igb_adapter *adapter)
6263 {
6264 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6265 struct e1000_hw *hw = &adapter->hw;
6266 struct pci_dev *pdev = adapter->pdev;
6267 u32 reg, mpc;
6268 int i;
6269 u64 bytes, packets;
6270 unsigned int start;
6271 u64 _bytes, _packets;
6272
6273
6274
6275
6276 if (adapter->link_speed == 0)
6277 return;
6278 if (pci_channel_offline(pdev))
6279 return;
6280
6281 bytes = 0;
6282 packets = 0;
6283
6284 rcu_read_lock();
6285 for (i = 0; i < adapter->num_rx_queues; i++) {
6286 struct igb_ring *ring = adapter->rx_ring[i];
6287 u32 rqdpc = rd32(E1000_RQDPC(i));
6288 if (hw->mac.type >= e1000_i210)
6289 wr32(E1000_RQDPC(i), 0);
6290
6291 if (rqdpc) {
6292 ring->rx_stats.drops += rqdpc;
6293 net_stats->rx_fifo_errors += rqdpc;
6294 }
6295
6296 do {
6297 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6298 _bytes = ring->rx_stats.bytes;
6299 _packets = ring->rx_stats.packets;
6300 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6301 bytes += _bytes;
6302 packets += _packets;
6303 }
6304
6305 net_stats->rx_bytes = bytes;
6306 net_stats->rx_packets = packets;
6307
6308 bytes = 0;
6309 packets = 0;
6310 for (i = 0; i < adapter->num_tx_queues; i++) {
6311 struct igb_ring *ring = adapter->tx_ring[i];
6312 do {
6313 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6314 _bytes = ring->tx_stats.bytes;
6315 _packets = ring->tx_stats.packets;
6316 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6317 bytes += _bytes;
6318 packets += _packets;
6319 }
6320 net_stats->tx_bytes = bytes;
6321 net_stats->tx_packets = packets;
6322 rcu_read_unlock();
6323
6324
6325 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6326 adapter->stats.gprc += rd32(E1000_GPRC);
6327 adapter->stats.gorc += rd32(E1000_GORCL);
6328 rd32(E1000_GORCH);
6329 adapter->stats.bprc += rd32(E1000_BPRC);
6330 adapter->stats.mprc += rd32(E1000_MPRC);
6331 adapter->stats.roc += rd32(E1000_ROC);
6332
6333 adapter->stats.prc64 += rd32(E1000_PRC64);
6334 adapter->stats.prc127 += rd32(E1000_PRC127);
6335 adapter->stats.prc255 += rd32(E1000_PRC255);
6336 adapter->stats.prc511 += rd32(E1000_PRC511);
6337 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6338 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6339 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6340 adapter->stats.sec += rd32(E1000_SEC);
6341
6342 mpc = rd32(E1000_MPC);
6343 adapter->stats.mpc += mpc;
6344 net_stats->rx_fifo_errors += mpc;
6345 adapter->stats.scc += rd32(E1000_SCC);
6346 adapter->stats.ecol += rd32(E1000_ECOL);
6347 adapter->stats.mcc += rd32(E1000_MCC);
6348 adapter->stats.latecol += rd32(E1000_LATECOL);
6349 adapter->stats.dc += rd32(E1000_DC);
6350 adapter->stats.rlec += rd32(E1000_RLEC);
6351 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6352 adapter->stats.xontxc += rd32(E1000_XONTXC);
6353 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6354 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6355 adapter->stats.fcruc += rd32(E1000_FCRUC);
6356 adapter->stats.gptc += rd32(E1000_GPTC);
6357 adapter->stats.gotc += rd32(E1000_GOTCL);
6358 rd32(E1000_GOTCH);
6359 adapter->stats.rnbc += rd32(E1000_RNBC);
6360 adapter->stats.ruc += rd32(E1000_RUC);
6361 adapter->stats.rfc += rd32(E1000_RFC);
6362 adapter->stats.rjc += rd32(E1000_RJC);
6363 adapter->stats.tor += rd32(E1000_TORH);
6364 adapter->stats.tot += rd32(E1000_TOTH);
6365 adapter->stats.tpr += rd32(E1000_TPR);
6366
6367 adapter->stats.ptc64 += rd32(E1000_PTC64);
6368 adapter->stats.ptc127 += rd32(E1000_PTC127);
6369 adapter->stats.ptc255 += rd32(E1000_PTC255);
6370 adapter->stats.ptc511 += rd32(E1000_PTC511);
6371 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6372 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6373
6374 adapter->stats.mptc += rd32(E1000_MPTC);
6375 adapter->stats.bptc += rd32(E1000_BPTC);
6376
6377 adapter->stats.tpt += rd32(E1000_TPT);
6378 adapter->stats.colc += rd32(E1000_COLC);
6379
6380 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6381
6382 reg = rd32(E1000_CTRL_EXT);
6383 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6384 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6385
6386
6387 if ((hw->mac.type != e1000_i210) &&
6388 (hw->mac.type != e1000_i211))
6389 adapter->stats.tncrs += rd32(E1000_TNCRS);
6390 }
6391
6392 adapter->stats.tsctc += rd32(E1000_TSCTC);
6393 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6394
6395 adapter->stats.iac += rd32(E1000_IAC);
6396 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6397 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6398 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6399 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6400 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6401 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6402 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6403 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6404
6405
6406 net_stats->multicast = adapter->stats.mprc;
6407 net_stats->collisions = adapter->stats.colc;
6408
6409
6410
6411
6412
6413
6414 net_stats->rx_errors = adapter->stats.rxerrc +
6415 adapter->stats.crcerrs + adapter->stats.algnerrc +
6416 adapter->stats.ruc + adapter->stats.roc +
6417 adapter->stats.cexterr;
6418 net_stats->rx_length_errors = adapter->stats.ruc +
6419 adapter->stats.roc;
6420 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6421 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6422 net_stats->rx_missed_errors = adapter->stats.mpc;
6423
6424
6425 net_stats->tx_errors = adapter->stats.ecol +
6426 adapter->stats.latecol;
6427 net_stats->tx_aborted_errors = adapter->stats.ecol;
6428 net_stats->tx_window_errors = adapter->stats.latecol;
6429 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6430
6431
6432
6433
6434 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6435 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6436 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6437
6438
6439 reg = rd32(E1000_MANC);
6440 if (reg & E1000_MANC_EN_BMC2OS) {
6441 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6442 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6443 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6444 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6445 }
6446 }
6447
6448 static void igb_tsync_interrupt(struct igb_adapter *adapter)
6449 {
6450 struct e1000_hw *hw = &adapter->hw;
6451 struct ptp_clock_event event;
6452 struct timespec64 ts;
6453 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6454
6455 if (tsicr & TSINTR_SYS_WRAP) {
6456 event.type = PTP_CLOCK_PPS;
6457 if (adapter->ptp_caps.pps)
6458 ptp_clock_event(adapter->ptp_clock, &event);
6459 ack |= TSINTR_SYS_WRAP;
6460 }
6461
6462 if (tsicr & E1000_TSICR_TXTS) {
6463
6464 schedule_work(&adapter->ptp_tx_work);
6465 ack |= E1000_TSICR_TXTS;
6466 }
6467
6468 if (tsicr & TSINTR_TT0) {
6469 spin_lock(&adapter->tmreg_lock);
6470 ts = timespec64_add(adapter->perout[0].start,
6471 adapter->perout[0].period);
6472
6473 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6474 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6475 tsauxc = rd32(E1000_TSAUXC);
6476 tsauxc |= TSAUXC_EN_TT0;
6477 wr32(E1000_TSAUXC, tsauxc);
6478 adapter->perout[0].start = ts;
6479 spin_unlock(&adapter->tmreg_lock);
6480 ack |= TSINTR_TT0;
6481 }
6482
6483 if (tsicr & TSINTR_TT1) {
6484 spin_lock(&adapter->tmreg_lock);
6485 ts = timespec64_add(adapter->perout[1].start,
6486 adapter->perout[1].period);
6487 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6488 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6489 tsauxc = rd32(E1000_TSAUXC);
6490 tsauxc |= TSAUXC_EN_TT1;
6491 wr32(E1000_TSAUXC, tsauxc);
6492 adapter->perout[1].start = ts;
6493 spin_unlock(&adapter->tmreg_lock);
6494 ack |= TSINTR_TT1;
6495 }
6496
6497 if (tsicr & TSINTR_AUTT0) {
6498 nsec = rd32(E1000_AUXSTMPL0);
6499 sec = rd32(E1000_AUXSTMPH0);
6500 event.type = PTP_CLOCK_EXTTS;
6501 event.index = 0;
6502 event.timestamp = sec * 1000000000ULL + nsec;
6503 ptp_clock_event(adapter->ptp_clock, &event);
6504 ack |= TSINTR_AUTT0;
6505 }
6506
6507 if (tsicr & TSINTR_AUTT1) {
6508 nsec = rd32(E1000_AUXSTMPL1);
6509 sec = rd32(E1000_AUXSTMPH1);
6510 event.type = PTP_CLOCK_EXTTS;
6511 event.index = 1;
6512 event.timestamp = sec * 1000000000ULL + nsec;
6513 ptp_clock_event(adapter->ptp_clock, &event);
6514 ack |= TSINTR_AUTT1;
6515 }
6516
6517
6518 wr32(E1000_TSICR, ack);
6519 }
6520
6521 static irqreturn_t igb_msix_other(int irq, void *data)
6522 {
6523 struct igb_adapter *adapter = data;
6524 struct e1000_hw *hw = &adapter->hw;
6525 u32 icr = rd32(E1000_ICR);
6526
6527
6528 if (icr & E1000_ICR_DRSTA)
6529 schedule_work(&adapter->reset_task);
6530
6531 if (icr & E1000_ICR_DOUTSYNC) {
6532
6533 adapter->stats.doosync++;
6534
6535
6536
6537
6538 igb_check_wvbr(adapter);
6539 }
6540
6541
6542 if (icr & E1000_ICR_VMMB)
6543 igb_msg_task(adapter);
6544
6545 if (icr & E1000_ICR_LSC) {
6546 hw->mac.get_link_status = 1;
6547
6548 if (!test_bit(__IGB_DOWN, &adapter->state))
6549 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6550 }
6551
6552 if (icr & E1000_ICR_TS)
6553 igb_tsync_interrupt(adapter);
6554
6555 wr32(E1000_EIMS, adapter->eims_other);
6556
6557 return IRQ_HANDLED;
6558 }
6559
6560 static void igb_write_itr(struct igb_q_vector *q_vector)
6561 {
6562 struct igb_adapter *adapter = q_vector->adapter;
6563 u32 itr_val = q_vector->itr_val & 0x7FFC;
6564
6565 if (!q_vector->set_itr)
6566 return;
6567
6568 if (!itr_val)
6569 itr_val = 0x4;
6570
6571 if (adapter->hw.mac.type == e1000_82575)
6572 itr_val |= itr_val << 16;
6573 else
6574 itr_val |= E1000_EITR_CNT_IGNR;
6575
6576 writel(itr_val, q_vector->itr_register);
6577 q_vector->set_itr = 0;
6578 }
6579
6580 static irqreturn_t igb_msix_ring(int irq, void *data)
6581 {
6582 struct igb_q_vector *q_vector = data;
6583
6584
6585 igb_write_itr(q_vector);
6586
6587 napi_schedule(&q_vector->napi);
6588
6589 return IRQ_HANDLED;
6590 }
6591
6592 #ifdef CONFIG_IGB_DCA
6593 static void igb_update_tx_dca(struct igb_adapter *adapter,
6594 struct igb_ring *tx_ring,
6595 int cpu)
6596 {
6597 struct e1000_hw *hw = &adapter->hw;
6598 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6599
6600 if (hw->mac.type != e1000_82575)
6601 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6602
6603
6604
6605
6606
6607 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6608 E1000_DCA_TXCTRL_DATA_RRO_EN |
6609 E1000_DCA_TXCTRL_DESC_DCA_EN;
6610
6611 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6612 }
6613
6614 static void igb_update_rx_dca(struct igb_adapter *adapter,
6615 struct igb_ring *rx_ring,
6616 int cpu)
6617 {
6618 struct e1000_hw *hw = &adapter->hw;
6619 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6620
6621 if (hw->mac.type != e1000_82575)
6622 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6623
6624
6625
6626
6627
6628 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6629 E1000_DCA_RXCTRL_DESC_DCA_EN;
6630
6631 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6632 }
6633
6634 static void igb_update_dca(struct igb_q_vector *q_vector)
6635 {
6636 struct igb_adapter *adapter = q_vector->adapter;
6637 int cpu = get_cpu();
6638
6639 if (q_vector->cpu == cpu)
6640 goto out_no_update;
6641
6642 if (q_vector->tx.ring)
6643 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6644
6645 if (q_vector->rx.ring)
6646 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6647
6648 q_vector->cpu = cpu;
6649 out_no_update:
6650 put_cpu();
6651 }
6652
6653 static void igb_setup_dca(struct igb_adapter *adapter)
6654 {
6655 struct e1000_hw *hw = &adapter->hw;
6656 int i;
6657
6658 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6659 return;
6660
6661
6662 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6663
6664 for (i = 0; i < adapter->num_q_vectors; i++) {
6665 adapter->q_vector[i]->cpu = -1;
6666 igb_update_dca(adapter->q_vector[i]);
6667 }
6668 }
6669
6670 static int __igb_notify_dca(struct device *dev, void *data)
6671 {
6672 struct net_device *netdev = dev_get_drvdata(dev);
6673 struct igb_adapter *adapter = netdev_priv(netdev);
6674 struct pci_dev *pdev = adapter->pdev;
6675 struct e1000_hw *hw = &adapter->hw;
6676 unsigned long event = *(unsigned long *)data;
6677
6678 switch (event) {
6679 case DCA_PROVIDER_ADD:
6680
6681 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6682 break;
6683 if (dca_add_requester(dev) == 0) {
6684 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6685 dev_info(&pdev->dev, "DCA enabled\n");
6686 igb_setup_dca(adapter);
6687 break;
6688 }
6689
6690 case DCA_PROVIDER_REMOVE:
6691 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6692
6693
6694
6695 dca_remove_requester(dev);
6696 dev_info(&pdev->dev, "DCA disabled\n");
6697 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6698 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6699 }
6700 break;
6701 }
6702
6703 return 0;
6704 }
6705
6706 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6707 void *p)
6708 {
6709 int ret_val;
6710
6711 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6712 __igb_notify_dca);
6713
6714 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6715 }
6716 #endif
6717
6718 #ifdef CONFIG_PCI_IOV
6719 static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6720 {
6721 unsigned char mac_addr[ETH_ALEN];
6722
6723 eth_zero_addr(mac_addr);
6724 igb_set_vf_mac(adapter, vf, mac_addr);
6725
6726
6727 adapter->vf_data[vf].spoofchk_enabled = true;
6728
6729
6730 adapter->vf_data[vf].trusted = false;
6731
6732 return 0;
6733 }
6734
6735 #endif
6736 static void igb_ping_all_vfs(struct igb_adapter *adapter)
6737 {
6738 struct e1000_hw *hw = &adapter->hw;
6739 u32 ping;
6740 int i;
6741
6742 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6743 ping = E1000_PF_CONTROL_MSG;
6744 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6745 ping |= E1000_VT_MSGTYPE_CTS;
6746 igb_write_mbx(hw, &ping, 1, i);
6747 }
6748 }
6749
6750 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6751 {
6752 struct e1000_hw *hw = &adapter->hw;
6753 u32 vmolr = rd32(E1000_VMOLR(vf));
6754 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6755
6756 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6757 IGB_VF_FLAG_MULTI_PROMISC);
6758 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6759
6760 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6761 vmolr |= E1000_VMOLR_MPME;
6762 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6763 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6764 } else {
6765
6766
6767
6768
6769 if (vf_data->num_vf_mc_hashes > 30) {
6770 vmolr |= E1000_VMOLR_MPME;
6771 } else if (vf_data->num_vf_mc_hashes) {
6772 int j;
6773
6774 vmolr |= E1000_VMOLR_ROMPE;
6775 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6776 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6777 }
6778 }
6779
6780 wr32(E1000_VMOLR(vf), vmolr);
6781
6782
6783 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6784 return -EINVAL;
6785
6786 return 0;
6787 }
6788
6789 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6790 u32 *msgbuf, u32 vf)
6791 {
6792 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6793 u16 *hash_list = (u16 *)&msgbuf[1];
6794 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6795 int i;
6796
6797
6798
6799
6800
6801 vf_data->num_vf_mc_hashes = n;
6802
6803
6804 if (n > 30)
6805 n = 30;
6806
6807
6808 for (i = 0; i < n; i++)
6809 vf_data->vf_mc_hashes[i] = hash_list[i];
6810
6811
6812 igb_set_rx_mode(adapter->netdev);
6813
6814 return 0;
6815 }
6816
6817 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6818 {
6819 struct e1000_hw *hw = &adapter->hw;
6820 struct vf_data_storage *vf_data;
6821 int i, j;
6822
6823 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6824 u32 vmolr = rd32(E1000_VMOLR(i));
6825
6826 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6827
6828 vf_data = &adapter->vf_data[i];
6829
6830 if ((vf_data->num_vf_mc_hashes > 30) ||
6831 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6832 vmolr |= E1000_VMOLR_MPME;
6833 } else if (vf_data->num_vf_mc_hashes) {
6834 vmolr |= E1000_VMOLR_ROMPE;
6835 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6836 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6837 }
6838 wr32(E1000_VMOLR(i), vmolr);
6839 }
6840 }
6841
6842 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6843 {
6844 struct e1000_hw *hw = &adapter->hw;
6845 u32 pool_mask, vlvf_mask, i;
6846
6847
6848 pool_mask = E1000_VLVF_POOLSEL_MASK;
6849 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6850
6851
6852 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6853 adapter->vfs_allocated_count);
6854
6855
6856 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6857 u32 vlvf = rd32(E1000_VLVF(i));
6858 u32 vfta_mask, vid, vfta;
6859
6860
6861 if (!(vlvf & vlvf_mask))
6862 continue;
6863
6864
6865 vlvf ^= vlvf_mask;
6866
6867
6868 if (vlvf & pool_mask)
6869 goto update_vlvfb;
6870
6871
6872 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6873 goto update_vlvf;
6874
6875 vid = vlvf & E1000_VLVF_VLANID_MASK;
6876 vfta_mask = BIT(vid % 32);
6877
6878
6879 vfta = adapter->shadow_vfta[vid / 32];
6880 if (vfta & vfta_mask)
6881 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6882 update_vlvf:
6883
6884 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6885 vlvf &= E1000_VLVF_POOLSEL_MASK;
6886 else
6887 vlvf = 0;
6888 update_vlvfb:
6889
6890 wr32(E1000_VLVF(i), vlvf);
6891 }
6892 }
6893
6894 static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6895 {
6896 u32 vlvf;
6897 int idx;
6898
6899
6900 if (vlan == 0)
6901 return 0;
6902
6903
6904 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6905 vlvf = rd32(E1000_VLVF(idx));
6906 if ((vlvf & VLAN_VID_MASK) == vlan)
6907 break;
6908 }
6909
6910 return idx;
6911 }
6912
6913 static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6914 {
6915 struct e1000_hw *hw = &adapter->hw;
6916 u32 bits, pf_id;
6917 int idx;
6918
6919 idx = igb_find_vlvf_entry(hw, vid);
6920 if (!idx)
6921 return;
6922
6923
6924
6925
6926 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6927 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6928 bits &= rd32(E1000_VLVF(idx));
6929
6930
6931 if (!bits) {
6932 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6933 wr32(E1000_VLVF(idx), BIT(pf_id));
6934 else
6935 wr32(E1000_VLVF(idx), 0);
6936 }
6937 }
6938
6939 static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6940 bool add, u32 vf)
6941 {
6942 int pf_id = adapter->vfs_allocated_count;
6943 struct e1000_hw *hw = &adapter->hw;
6944 int err;
6945
6946
6947
6948
6949
6950
6951 if (add && test_bit(vid, adapter->active_vlans)) {
6952 err = igb_vfta_set(hw, vid, pf_id, true, false);
6953 if (err)
6954 return err;
6955 }
6956
6957 err = igb_vfta_set(hw, vid, vf, add, false);
6958
6959 if (add && !err)
6960 return err;
6961
6962
6963
6964
6965
6966 if (test_bit(vid, adapter->active_vlans) ||
6967 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6968 igb_update_pf_vlvf(adapter, vid);
6969
6970 return err;
6971 }
6972
6973 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6974 {
6975 struct e1000_hw *hw = &adapter->hw;
6976
6977 if (vid)
6978 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6979 else
6980 wr32(E1000_VMVIR(vf), 0);
6981 }
6982
6983 static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6984 u16 vlan, u8 qos)
6985 {
6986 int err;
6987
6988 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6989 if (err)
6990 return err;
6991
6992 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6993 igb_set_vmolr(adapter, vf, !vlan);
6994
6995
6996 if (vlan != adapter->vf_data[vf].pf_vlan)
6997 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6998 false, vf);
6999
7000 adapter->vf_data[vf].pf_vlan = vlan;
7001 adapter->vf_data[vf].pf_qos = qos;
7002 igb_set_vf_vlan_strip(adapter, vf, true);
7003 dev_info(&adapter->pdev->dev,
7004 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7005 if (test_bit(__IGB_DOWN, &adapter->state)) {
7006 dev_warn(&adapter->pdev->dev,
7007 "The VF VLAN has been set, but the PF device is not up.\n");
7008 dev_warn(&adapter->pdev->dev,
7009 "Bring the PF device up before attempting to use the VF device.\n");
7010 }
7011
7012 return err;
7013 }
7014
7015 static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7016 {
7017
7018 igb_set_vf_vlan(adapter, 0, true, vf);
7019
7020 igb_set_vmvir(adapter, 0, vf);
7021 igb_set_vmolr(adapter, vf, true);
7022
7023
7024 if (adapter->vf_data[vf].pf_vlan)
7025 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7026 false, vf);
7027
7028 adapter->vf_data[vf].pf_vlan = 0;
7029 adapter->vf_data[vf].pf_qos = 0;
7030 igb_set_vf_vlan_strip(adapter, vf, false);
7031
7032 return 0;
7033 }
7034
7035 static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7036 u16 vlan, u8 qos, __be16 vlan_proto)
7037 {
7038 struct igb_adapter *adapter = netdev_priv(netdev);
7039
7040 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7041 return -EINVAL;
7042
7043 if (vlan_proto != htons(ETH_P_8021Q))
7044 return -EPROTONOSUPPORT;
7045
7046 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7047 igb_disable_port_vlan(adapter, vf);
7048 }
7049
7050 static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7051 {
7052 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7053 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7054 int ret;
7055
7056 if (adapter->vf_data[vf].pf_vlan)
7057 return -1;
7058
7059
7060 if (!vid && !add)
7061 return 0;
7062
7063 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7064 if (!ret)
7065 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7066 return ret;
7067 }
7068
7069 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7070 {
7071 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7072
7073
7074 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7075 vf_data->last_nack = jiffies;
7076
7077
7078 igb_clear_vf_vfta(adapter, vf);
7079 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7080 igb_set_vmvir(adapter, vf_data->pf_vlan |
7081 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7082 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7083 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7084
7085
7086 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7087
7088
7089 igb_set_rx_mode(adapter->netdev);
7090 }
7091
7092 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7093 {
7094 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7095
7096
7097 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7098 eth_zero_addr(vf_mac);
7099
7100
7101 igb_vf_reset(adapter, vf);
7102 }
7103
7104 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7105 {
7106 struct e1000_hw *hw = &adapter->hw;
7107 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7108 u32 reg, msgbuf[3];
7109 u8 *addr = (u8 *)(&msgbuf[1]);
7110
7111
7112 igb_vf_reset(adapter, vf);
7113
7114
7115 igb_set_vf_mac(adapter, vf, vf_mac);
7116
7117
7118 reg = rd32(E1000_VFTE);
7119 wr32(E1000_VFTE, reg | BIT(vf));
7120 reg = rd32(E1000_VFRE);
7121 wr32(E1000_VFRE, reg | BIT(vf));
7122
7123 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7124
7125
7126 if (!is_zero_ether_addr(vf_mac)) {
7127 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7128 memcpy(addr, vf_mac, ETH_ALEN);
7129 } else {
7130 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7131 }
7132 igb_write_mbx(hw, msgbuf, 3, vf);
7133 }
7134
7135 static void igb_flush_mac_table(struct igb_adapter *adapter)
7136 {
7137 struct e1000_hw *hw = &adapter->hw;
7138 int i;
7139
7140 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7141 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7142 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7143 adapter->mac_table[i].queue = 0;
7144 igb_rar_set_index(adapter, i);
7145 }
7146 }
7147
7148 static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7149 {
7150 struct e1000_hw *hw = &adapter->hw;
7151
7152 int rar_entries = hw->mac.rar_entry_count -
7153 adapter->vfs_allocated_count;
7154 int i, count = 0;
7155
7156 for (i = 0; i < rar_entries; i++) {
7157
7158 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7159 continue;
7160
7161
7162 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7163 (adapter->mac_table[i].queue != queue))
7164 continue;
7165
7166 count++;
7167 }
7168
7169 return count;
7170 }
7171
7172
7173 static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7174 {
7175 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7176
7177 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7178 mac_table->queue = adapter->vfs_allocated_count;
7179 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7180
7181 igb_rar_set_index(adapter, 0);
7182 }
7183
7184
7185
7186
7187
7188
7189 static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7190 const u8 *addr, const u8 flags)
7191 {
7192 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7193 return true;
7194
7195 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7196 (flags & IGB_MAC_STATE_SRC_ADDR))
7197 return false;
7198
7199 if (!ether_addr_equal(addr, entry->addr))
7200 return false;
7201
7202 return true;
7203 }
7204
7205
7206
7207
7208
7209
7210 static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7211 const u8 *addr, const u8 queue,
7212 const u8 flags)
7213 {
7214 struct e1000_hw *hw = &adapter->hw;
7215 int rar_entries = hw->mac.rar_entry_count -
7216 adapter->vfs_allocated_count;
7217 int i;
7218
7219 if (is_zero_ether_addr(addr))
7220 return -EINVAL;
7221
7222
7223
7224
7225
7226 for (i = 0; i < rar_entries; i++) {
7227 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7228 addr, flags))
7229 continue;
7230
7231 ether_addr_copy(adapter->mac_table[i].addr, addr);
7232 adapter->mac_table[i].queue = queue;
7233 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7234
7235 igb_rar_set_index(adapter, i);
7236 return i;
7237 }
7238
7239 return -ENOSPC;
7240 }
7241
7242 static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7243 const u8 queue)
7244 {
7245 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7246 }
7247
7248
7249
7250
7251
7252
7253
7254 static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7255 const u8 *addr, const u8 queue,
7256 const u8 flags)
7257 {
7258 struct e1000_hw *hw = &adapter->hw;
7259 int rar_entries = hw->mac.rar_entry_count -
7260 adapter->vfs_allocated_count;
7261 int i;
7262
7263 if (is_zero_ether_addr(addr))
7264 return -EINVAL;
7265
7266
7267
7268
7269
7270 for (i = 0; i < rar_entries; i++) {
7271 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7272 continue;
7273 if ((adapter->mac_table[i].state & flags) != flags)
7274 continue;
7275 if (adapter->mac_table[i].queue != queue)
7276 continue;
7277 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7278 continue;
7279
7280
7281
7282
7283 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7284 adapter->mac_table[i].state =
7285 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7286 adapter->mac_table[i].queue =
7287 adapter->vfs_allocated_count;
7288 } else {
7289 adapter->mac_table[i].state = 0;
7290 adapter->mac_table[i].queue = 0;
7291 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7292 }
7293
7294 igb_rar_set_index(adapter, i);
7295 return 0;
7296 }
7297
7298 return -ENOENT;
7299 }
7300
7301 static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7302 const u8 queue)
7303 {
7304 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7305 }
7306
7307 int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7308 const u8 *addr, u8 queue, u8 flags)
7309 {
7310 struct e1000_hw *hw = &adapter->hw;
7311
7312
7313
7314
7315 if (hw->mac.type != e1000_i210)
7316 return -EOPNOTSUPP;
7317
7318 return igb_add_mac_filter_flags(adapter, addr, queue,
7319 IGB_MAC_STATE_QUEUE_STEERING | flags);
7320 }
7321
7322 int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7323 const u8 *addr, u8 queue, u8 flags)
7324 {
7325 return igb_del_mac_filter_flags(adapter, addr, queue,
7326 IGB_MAC_STATE_QUEUE_STEERING | flags);
7327 }
7328
7329 static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7330 {
7331 struct igb_adapter *adapter = netdev_priv(netdev);
7332 int ret;
7333
7334 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7335
7336 return min_t(int, ret, 0);
7337 }
7338
7339 static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7340 {
7341 struct igb_adapter *adapter = netdev_priv(netdev);
7342
7343 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7344
7345 return 0;
7346 }
7347
7348 static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7349 const u32 info, const u8 *addr)
7350 {
7351 struct pci_dev *pdev = adapter->pdev;
7352 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7353 struct list_head *pos;
7354 struct vf_mac_filter *entry = NULL;
7355 int ret = 0;
7356
7357 switch (info) {
7358 case E1000_VF_MAC_FILTER_CLR:
7359
7360 list_for_each(pos, &adapter->vf_macs.l) {
7361 entry = list_entry(pos, struct vf_mac_filter, l);
7362 if (entry->vf == vf) {
7363 entry->vf = -1;
7364 entry->free = true;
7365 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7366 }
7367 }
7368 break;
7369 case E1000_VF_MAC_FILTER_ADD:
7370 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7371 !vf_data->trusted) {
7372 dev_warn(&pdev->dev,
7373 "VF %d requested MAC filter but is administratively denied\n",
7374 vf);
7375 return -EINVAL;
7376 }
7377 if (!is_valid_ether_addr(addr)) {
7378 dev_warn(&pdev->dev,
7379 "VF %d attempted to set invalid MAC filter\n",
7380 vf);
7381 return -EINVAL;
7382 }
7383
7384
7385 list_for_each(pos, &adapter->vf_macs.l) {
7386 entry = list_entry(pos, struct vf_mac_filter, l);
7387 if (entry->free)
7388 break;
7389 }
7390
7391 if (entry && entry->free) {
7392 entry->free = false;
7393 entry->vf = vf;
7394 ether_addr_copy(entry->vf_mac, addr);
7395
7396 ret = igb_add_mac_filter(adapter, addr, vf);
7397 ret = min_t(int, ret, 0);
7398 } else {
7399 ret = -ENOSPC;
7400 }
7401
7402 if (ret == -ENOSPC)
7403 dev_warn(&pdev->dev,
7404 "VF %d has requested MAC filter but there is no space for it\n",
7405 vf);
7406 break;
7407 default:
7408 ret = -EINVAL;
7409 break;
7410 }
7411
7412 return ret;
7413 }
7414
7415 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7416 {
7417 struct pci_dev *pdev = adapter->pdev;
7418 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7419 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7420
7421
7422
7423
7424 unsigned char *addr = (unsigned char *)&msg[1];
7425 int ret = 0;
7426
7427 if (!info) {
7428 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7429 !vf_data->trusted) {
7430 dev_warn(&pdev->dev,
7431 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7432 vf);
7433 return -EINVAL;
7434 }
7435
7436 if (!is_valid_ether_addr(addr)) {
7437 dev_warn(&pdev->dev,
7438 "VF %d attempted to set invalid MAC\n",
7439 vf);
7440 return -EINVAL;
7441 }
7442
7443 ret = igb_set_vf_mac(adapter, vf, addr);
7444 } else {
7445 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7446 }
7447
7448 return ret;
7449 }
7450
7451 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7452 {
7453 struct e1000_hw *hw = &adapter->hw;
7454 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7455 u32 msg = E1000_VT_MSGTYPE_NACK;
7456
7457
7458 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7459 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7460 igb_write_mbx(hw, &msg, 1, vf);
7461 vf_data->last_nack = jiffies;
7462 }
7463 }
7464
7465 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7466 {
7467 struct pci_dev *pdev = adapter->pdev;
7468 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7469 struct e1000_hw *hw = &adapter->hw;
7470 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7471 s32 retval;
7472
7473 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7474
7475 if (retval) {
7476
7477 dev_err(&pdev->dev, "Error receiving message from VF\n");
7478 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7479 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7480 goto unlock;
7481 goto out;
7482 }
7483
7484
7485 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7486 goto unlock;
7487
7488
7489
7490
7491 if (msgbuf[0] == E1000_VF_RESET) {
7492
7493 igb_vf_reset_msg(adapter, vf);
7494 return;
7495 }
7496
7497 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7498 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7499 goto unlock;
7500 retval = -1;
7501 goto out;
7502 }
7503
7504 switch ((msgbuf[0] & 0xFFFF)) {
7505 case E1000_VF_SET_MAC_ADDR:
7506 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7507 break;
7508 case E1000_VF_SET_PROMISC:
7509 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7510 break;
7511 case E1000_VF_SET_MULTICAST:
7512 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7513 break;
7514 case E1000_VF_SET_LPE:
7515 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7516 break;
7517 case E1000_VF_SET_VLAN:
7518 retval = -1;
7519 if (vf_data->pf_vlan)
7520 dev_warn(&pdev->dev,
7521 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7522 vf);
7523 else
7524 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7525 break;
7526 default:
7527 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7528 retval = -1;
7529 break;
7530 }
7531
7532 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7533 out:
7534
7535 if (retval)
7536 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7537 else
7538 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7539
7540
7541 igb_write_mbx(hw, msgbuf, 1, vf);
7542 return;
7543
7544 unlock:
7545 igb_unlock_mbx(hw, vf);
7546 }
7547
7548 static void igb_msg_task(struct igb_adapter *adapter)
7549 {
7550 struct e1000_hw *hw = &adapter->hw;
7551 u32 vf;
7552
7553 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7554
7555 if (!igb_check_for_rst(hw, vf))
7556 igb_vf_reset_event(adapter, vf);
7557
7558
7559 if (!igb_check_for_msg(hw, vf))
7560 igb_rcv_msg_from_vf(adapter, vf);
7561
7562
7563 if (!igb_check_for_ack(hw, vf))
7564 igb_rcv_ack_from_vf(adapter, vf);
7565 }
7566 }
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579 static void igb_set_uta(struct igb_adapter *adapter, bool set)
7580 {
7581 struct e1000_hw *hw = &adapter->hw;
7582 u32 uta = set ? ~0 : 0;
7583 int i;
7584
7585
7586 if (!adapter->vfs_allocated_count)
7587 return;
7588
7589 for (i = hw->mac.uta_reg_count; i--;)
7590 array_wr32(E1000_UTA, i, uta);
7591 }
7592
7593
7594
7595
7596
7597
7598 static irqreturn_t igb_intr_msi(int irq, void *data)
7599 {
7600 struct igb_adapter *adapter = data;
7601 struct igb_q_vector *q_vector = adapter->q_vector[0];
7602 struct e1000_hw *hw = &adapter->hw;
7603
7604 u32 icr = rd32(E1000_ICR);
7605
7606 igb_write_itr(q_vector);
7607
7608 if (icr & E1000_ICR_DRSTA)
7609 schedule_work(&adapter->reset_task);
7610
7611 if (icr & E1000_ICR_DOUTSYNC) {
7612
7613 adapter->stats.doosync++;
7614 }
7615
7616 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7617 hw->mac.get_link_status = 1;
7618 if (!test_bit(__IGB_DOWN, &adapter->state))
7619 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7620 }
7621
7622 if (icr & E1000_ICR_TS)
7623 igb_tsync_interrupt(adapter);
7624
7625 napi_schedule(&q_vector->napi);
7626
7627 return IRQ_HANDLED;
7628 }
7629
7630
7631
7632
7633
7634
7635 static irqreturn_t igb_intr(int irq, void *data)
7636 {
7637 struct igb_adapter *adapter = data;
7638 struct igb_q_vector *q_vector = adapter->q_vector[0];
7639 struct e1000_hw *hw = &adapter->hw;
7640
7641
7642
7643 u32 icr = rd32(E1000_ICR);
7644
7645
7646
7647
7648 if (!(icr & E1000_ICR_INT_ASSERTED))
7649 return IRQ_NONE;
7650
7651 igb_write_itr(q_vector);
7652
7653 if (icr & E1000_ICR_DRSTA)
7654 schedule_work(&adapter->reset_task);
7655
7656 if (icr & E1000_ICR_DOUTSYNC) {
7657
7658 adapter->stats.doosync++;
7659 }
7660
7661 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7662 hw->mac.get_link_status = 1;
7663
7664 if (!test_bit(__IGB_DOWN, &adapter->state))
7665 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7666 }
7667
7668 if (icr & E1000_ICR_TS)
7669 igb_tsync_interrupt(adapter);
7670
7671 napi_schedule(&q_vector->napi);
7672
7673 return IRQ_HANDLED;
7674 }
7675
7676 static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7677 {
7678 struct igb_adapter *adapter = q_vector->adapter;
7679 struct e1000_hw *hw = &adapter->hw;
7680
7681 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7682 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7683 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7684 igb_set_itr(q_vector);
7685 else
7686 igb_update_ring_itr(q_vector);
7687 }
7688
7689 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7690 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7691 wr32(E1000_EIMS, q_vector->eims_value);
7692 else
7693 igb_irq_enable(adapter);
7694 }
7695 }
7696
7697
7698
7699
7700
7701
7702 static int igb_poll(struct napi_struct *napi, int budget)
7703 {
7704 struct igb_q_vector *q_vector = container_of(napi,
7705 struct igb_q_vector,
7706 napi);
7707 bool clean_complete = true;
7708 int work_done = 0;
7709
7710 #ifdef CONFIG_IGB_DCA
7711 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7712 igb_update_dca(q_vector);
7713 #endif
7714 if (q_vector->tx.ring)
7715 clean_complete = igb_clean_tx_irq(q_vector, budget);
7716
7717 if (q_vector->rx.ring) {
7718 int cleaned = igb_clean_rx_irq(q_vector, budget);
7719
7720 work_done += cleaned;
7721 if (cleaned >= budget)
7722 clean_complete = false;
7723 }
7724
7725
7726 if (!clean_complete)
7727 return budget;
7728
7729
7730
7731
7732 if (likely(napi_complete_done(napi, work_done)))
7733 igb_ring_irq_enable(q_vector);
7734
7735 return min(work_done, budget - 1);
7736 }
7737
7738
7739
7740
7741
7742
7743
7744
7745 static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7746 {
7747 struct igb_adapter *adapter = q_vector->adapter;
7748 struct igb_ring *tx_ring = q_vector->tx.ring;
7749 struct igb_tx_buffer *tx_buffer;
7750 union e1000_adv_tx_desc *tx_desc;
7751 unsigned int total_bytes = 0, total_packets = 0;
7752 unsigned int budget = q_vector->tx.work_limit;
7753 unsigned int i = tx_ring->next_to_clean;
7754
7755 if (test_bit(__IGB_DOWN, &adapter->state))
7756 return true;
7757
7758 tx_buffer = &tx_ring->tx_buffer_info[i];
7759 tx_desc = IGB_TX_DESC(tx_ring, i);
7760 i -= tx_ring->count;
7761
7762 do {
7763 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7764
7765
7766 if (!eop_desc)
7767 break;
7768
7769
7770 smp_rmb();
7771
7772
7773 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7774 break;
7775
7776
7777 tx_buffer->next_to_watch = NULL;
7778
7779
7780 total_bytes += tx_buffer->bytecount;
7781 total_packets += tx_buffer->gso_segs;
7782
7783
7784 napi_consume_skb(tx_buffer->skb, napi_budget);
7785
7786
7787 dma_unmap_single(tx_ring->dev,
7788 dma_unmap_addr(tx_buffer, dma),
7789 dma_unmap_len(tx_buffer, len),
7790 DMA_TO_DEVICE);
7791
7792
7793 dma_unmap_len_set(tx_buffer, len, 0);
7794
7795
7796 while (tx_desc != eop_desc) {
7797 tx_buffer++;
7798 tx_desc++;
7799 i++;
7800 if (unlikely(!i)) {
7801 i -= tx_ring->count;
7802 tx_buffer = tx_ring->tx_buffer_info;
7803 tx_desc = IGB_TX_DESC(tx_ring, 0);
7804 }
7805
7806
7807 if (dma_unmap_len(tx_buffer, len)) {
7808 dma_unmap_page(tx_ring->dev,
7809 dma_unmap_addr(tx_buffer, dma),
7810 dma_unmap_len(tx_buffer, len),
7811 DMA_TO_DEVICE);
7812 dma_unmap_len_set(tx_buffer, len, 0);
7813 }
7814 }
7815
7816
7817 tx_buffer++;
7818 tx_desc++;
7819 i++;
7820 if (unlikely(!i)) {
7821 i -= tx_ring->count;
7822 tx_buffer = tx_ring->tx_buffer_info;
7823 tx_desc = IGB_TX_DESC(tx_ring, 0);
7824 }
7825
7826
7827 prefetch(tx_desc);
7828
7829
7830 budget--;
7831 } while (likely(budget));
7832
7833 netdev_tx_completed_queue(txring_txq(tx_ring),
7834 total_packets, total_bytes);
7835 i += tx_ring->count;
7836 tx_ring->next_to_clean = i;
7837 u64_stats_update_begin(&tx_ring->tx_syncp);
7838 tx_ring->tx_stats.bytes += total_bytes;
7839 tx_ring->tx_stats.packets += total_packets;
7840 u64_stats_update_end(&tx_ring->tx_syncp);
7841 q_vector->tx.total_bytes += total_bytes;
7842 q_vector->tx.total_packets += total_packets;
7843
7844 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7845 struct e1000_hw *hw = &adapter->hw;
7846
7847
7848
7849
7850 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7851 if (tx_buffer->next_to_watch &&
7852 time_after(jiffies, tx_buffer->time_stamp +
7853 (adapter->tx_timeout_factor * HZ)) &&
7854 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7855
7856
7857 dev_err(tx_ring->dev,
7858 "Detected Tx Unit Hang\n"
7859 " Tx Queue <%d>\n"
7860 " TDH <%x>\n"
7861 " TDT <%x>\n"
7862 " next_to_use <%x>\n"
7863 " next_to_clean <%x>\n"
7864 "buffer_info[next_to_clean]\n"
7865 " time_stamp <%lx>\n"
7866 " next_to_watch <%p>\n"
7867 " jiffies <%lx>\n"
7868 " desc.status <%x>\n",
7869 tx_ring->queue_index,
7870 rd32(E1000_TDH(tx_ring->reg_idx)),
7871 readl(tx_ring->tail),
7872 tx_ring->next_to_use,
7873 tx_ring->next_to_clean,
7874 tx_buffer->time_stamp,
7875 tx_buffer->next_to_watch,
7876 jiffies,
7877 tx_buffer->next_to_watch->wb.status);
7878 netif_stop_subqueue(tx_ring->netdev,
7879 tx_ring->queue_index);
7880
7881
7882 return true;
7883 }
7884 }
7885
7886 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7887 if (unlikely(total_packets &&
7888 netif_carrier_ok(tx_ring->netdev) &&
7889 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7890
7891
7892
7893 smp_mb();
7894 if (__netif_subqueue_stopped(tx_ring->netdev,
7895 tx_ring->queue_index) &&
7896 !(test_bit(__IGB_DOWN, &adapter->state))) {
7897 netif_wake_subqueue(tx_ring->netdev,
7898 tx_ring->queue_index);
7899
7900 u64_stats_update_begin(&tx_ring->tx_syncp);
7901 tx_ring->tx_stats.restart_queue++;
7902 u64_stats_update_end(&tx_ring->tx_syncp);
7903 }
7904 }
7905
7906 return !!budget;
7907 }
7908
7909
7910
7911
7912
7913
7914
7915
7916 static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7917 struct igb_rx_buffer *old_buff)
7918 {
7919 struct igb_rx_buffer *new_buff;
7920 u16 nta = rx_ring->next_to_alloc;
7921
7922 new_buff = &rx_ring->rx_buffer_info[nta];
7923
7924
7925 nta++;
7926 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7927
7928
7929
7930
7931
7932 new_buff->dma = old_buff->dma;
7933 new_buff->page = old_buff->page;
7934 new_buff->page_offset = old_buff->page_offset;
7935 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7936 }
7937
7938 static inline bool igb_page_is_reserved(struct page *page)
7939 {
7940 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7941 }
7942
7943 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7944 {
7945 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7946 struct page *page = rx_buffer->page;
7947
7948
7949 if (unlikely(igb_page_is_reserved(page)))
7950 return false;
7951
7952 #if (PAGE_SIZE < 8192)
7953
7954 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7955 return false;
7956 #else
7957 #define IGB_LAST_OFFSET \
7958 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7959
7960 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7961 return false;
7962 #endif
7963
7964
7965
7966
7967
7968 if (unlikely(!pagecnt_bias)) {
7969 page_ref_add(page, USHRT_MAX);
7970 rx_buffer->pagecnt_bias = USHRT_MAX;
7971 }
7972
7973 return true;
7974 }
7975
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985 static void igb_add_rx_frag(struct igb_ring *rx_ring,
7986 struct igb_rx_buffer *rx_buffer,
7987 struct sk_buff *skb,
7988 unsigned int size)
7989 {
7990 #if (PAGE_SIZE < 8192)
7991 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7992 #else
7993 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7994 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7995 SKB_DATA_ALIGN(size);
7996 #endif
7997 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7998 rx_buffer->page_offset, size, truesize);
7999 #if (PAGE_SIZE < 8192)
8000 rx_buffer->page_offset ^= truesize;
8001 #else
8002 rx_buffer->page_offset += truesize;
8003 #endif
8004 }
8005
8006 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8007 struct igb_rx_buffer *rx_buffer,
8008 union e1000_adv_rx_desc *rx_desc,
8009 unsigned int size)
8010 {
8011 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8012 #if (PAGE_SIZE < 8192)
8013 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8014 #else
8015 unsigned int truesize = SKB_DATA_ALIGN(size);
8016 #endif
8017 unsigned int headlen;
8018 struct sk_buff *skb;
8019
8020
8021 prefetch(va);
8022 #if L1_CACHE_BYTES < 128
8023 prefetch(va + L1_CACHE_BYTES);
8024 #endif
8025
8026
8027 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8028 if (unlikely(!skb))
8029 return NULL;
8030
8031 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8032 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8033 va += IGB_TS_HDR_LEN;
8034 size -= IGB_TS_HDR_LEN;
8035 }
8036
8037
8038 headlen = size;
8039 if (headlen > IGB_RX_HDR_LEN)
8040 headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
8041
8042
8043 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8044
8045
8046 size -= headlen;
8047 if (size) {
8048 skb_add_rx_frag(skb, 0, rx_buffer->page,
8049 (va + headlen) - page_address(rx_buffer->page),
8050 size, truesize);
8051 #if (PAGE_SIZE < 8192)
8052 rx_buffer->page_offset ^= truesize;
8053 #else
8054 rx_buffer->page_offset += truesize;
8055 #endif
8056 } else {
8057 rx_buffer->pagecnt_bias++;
8058 }
8059
8060 return skb;
8061 }
8062
8063 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8064 struct igb_rx_buffer *rx_buffer,
8065 union e1000_adv_rx_desc *rx_desc,
8066 unsigned int size)
8067 {
8068 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8069 #if (PAGE_SIZE < 8192)
8070 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8071 #else
8072 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8073 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8074 #endif
8075 struct sk_buff *skb;
8076
8077
8078 prefetch(va);
8079 #if L1_CACHE_BYTES < 128
8080 prefetch(va + L1_CACHE_BYTES);
8081 #endif
8082
8083
8084 skb = build_skb(va - IGB_SKB_PAD, truesize);
8085 if (unlikely(!skb))
8086 return NULL;
8087
8088
8089 skb_reserve(skb, IGB_SKB_PAD);
8090 __skb_put(skb, size);
8091
8092
8093 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8094 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8095 __skb_pull(skb, IGB_TS_HDR_LEN);
8096 }
8097
8098
8099 #if (PAGE_SIZE < 8192)
8100 rx_buffer->page_offset ^= truesize;
8101 #else
8102 rx_buffer->page_offset += truesize;
8103 #endif
8104
8105 return skb;
8106 }
8107
8108 static inline void igb_rx_checksum(struct igb_ring *ring,
8109 union e1000_adv_rx_desc *rx_desc,
8110 struct sk_buff *skb)
8111 {
8112 skb_checksum_none_assert(skb);
8113
8114
8115 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8116 return;
8117
8118
8119 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8120 return;
8121
8122
8123 if (igb_test_staterr(rx_desc,
8124 E1000_RXDEXT_STATERR_TCPE |
8125 E1000_RXDEXT_STATERR_IPE)) {
8126
8127
8128
8129
8130 if (!((skb->len == 60) &&
8131 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8132 u64_stats_update_begin(&ring->rx_syncp);
8133 ring->rx_stats.csum_err++;
8134 u64_stats_update_end(&ring->rx_syncp);
8135 }
8136
8137 return;
8138 }
8139
8140 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8141 E1000_RXD_STAT_UDPCS))
8142 skb->ip_summed = CHECKSUM_UNNECESSARY;
8143
8144 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8145 le32_to_cpu(rx_desc->wb.upper.status_error));
8146 }
8147
8148 static inline void igb_rx_hash(struct igb_ring *ring,
8149 union e1000_adv_rx_desc *rx_desc,
8150 struct sk_buff *skb)
8151 {
8152 if (ring->netdev->features & NETIF_F_RXHASH)
8153 skb_set_hash(skb,
8154 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8155 PKT_HASH_TYPE_L3);
8156 }
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169 static bool igb_is_non_eop(struct igb_ring *rx_ring,
8170 union e1000_adv_rx_desc *rx_desc)
8171 {
8172 u32 ntc = rx_ring->next_to_clean + 1;
8173
8174
8175 ntc = (ntc < rx_ring->count) ? ntc : 0;
8176 rx_ring->next_to_clean = ntc;
8177
8178 prefetch(IGB_RX_DESC(rx_ring, ntc));
8179
8180 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8181 return false;
8182
8183 return true;
8184 }
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200 static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8201 union e1000_adv_rx_desc *rx_desc,
8202 struct sk_buff *skb)
8203 {
8204 if (unlikely((igb_test_staterr(rx_desc,
8205 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8206 struct net_device *netdev = rx_ring->netdev;
8207 if (!(netdev->features & NETIF_F_RXALL)) {
8208 dev_kfree_skb_any(skb);
8209 return true;
8210 }
8211 }
8212
8213
8214 if (eth_skb_pad(skb))
8215 return true;
8216
8217 return false;
8218 }
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230 static void igb_process_skb_fields(struct igb_ring *rx_ring,
8231 union e1000_adv_rx_desc *rx_desc,
8232 struct sk_buff *skb)
8233 {
8234 struct net_device *dev = rx_ring->netdev;
8235
8236 igb_rx_hash(rx_ring, rx_desc, skb);
8237
8238 igb_rx_checksum(rx_ring, rx_desc, skb);
8239
8240 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8241 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8242 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8243
8244 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8245 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8246 u16 vid;
8247
8248 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8249 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8250 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8251 else
8252 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8253
8254 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8255 }
8256
8257 skb_record_rx_queue(skb, rx_ring->queue_index);
8258
8259 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8260 }
8261
8262 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8263 const unsigned int size)
8264 {
8265 struct igb_rx_buffer *rx_buffer;
8266
8267 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8268 prefetchw(rx_buffer->page);
8269
8270
8271 dma_sync_single_range_for_cpu(rx_ring->dev,
8272 rx_buffer->dma,
8273 rx_buffer->page_offset,
8274 size,
8275 DMA_FROM_DEVICE);
8276
8277 rx_buffer->pagecnt_bias--;
8278
8279 return rx_buffer;
8280 }
8281
8282 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8283 struct igb_rx_buffer *rx_buffer)
8284 {
8285 if (igb_can_reuse_rx_page(rx_buffer)) {
8286
8287 igb_reuse_rx_page(rx_ring, rx_buffer);
8288 } else {
8289
8290
8291
8292 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8293 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8294 IGB_RX_DMA_ATTR);
8295 __page_frag_cache_drain(rx_buffer->page,
8296 rx_buffer->pagecnt_bias);
8297 }
8298
8299
8300 rx_buffer->page = NULL;
8301 }
8302
8303 static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8304 {
8305 struct igb_ring *rx_ring = q_vector->rx.ring;
8306 struct sk_buff *skb = rx_ring->skb;
8307 unsigned int total_bytes = 0, total_packets = 0;
8308 u16 cleaned_count = igb_desc_unused(rx_ring);
8309
8310 while (likely(total_packets < budget)) {
8311 union e1000_adv_rx_desc *rx_desc;
8312 struct igb_rx_buffer *rx_buffer;
8313 unsigned int size;
8314
8315
8316 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8317 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8318 cleaned_count = 0;
8319 }
8320
8321 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8322 size = le16_to_cpu(rx_desc->wb.upper.length);
8323 if (!size)
8324 break;
8325
8326
8327
8328
8329
8330 dma_rmb();
8331
8332 rx_buffer = igb_get_rx_buffer(rx_ring, size);
8333
8334
8335 if (skb)
8336 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8337 else if (ring_uses_build_skb(rx_ring))
8338 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8339 else
8340 skb = igb_construct_skb(rx_ring, rx_buffer,
8341 rx_desc, size);
8342
8343
8344 if (!skb) {
8345 rx_ring->rx_stats.alloc_failed++;
8346 rx_buffer->pagecnt_bias++;
8347 break;
8348 }
8349
8350 igb_put_rx_buffer(rx_ring, rx_buffer);
8351 cleaned_count++;
8352
8353
8354 if (igb_is_non_eop(rx_ring, rx_desc))
8355 continue;
8356
8357
8358 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8359 skb = NULL;
8360 continue;
8361 }
8362
8363
8364 total_bytes += skb->len;
8365
8366
8367 igb_process_skb_fields(rx_ring, rx_desc, skb);
8368
8369 napi_gro_receive(&q_vector->napi, skb);
8370
8371
8372 skb = NULL;
8373
8374
8375 total_packets++;
8376 }
8377
8378
8379 rx_ring->skb = skb;
8380
8381 u64_stats_update_begin(&rx_ring->rx_syncp);
8382 rx_ring->rx_stats.packets += total_packets;
8383 rx_ring->rx_stats.bytes += total_bytes;
8384 u64_stats_update_end(&rx_ring->rx_syncp);
8385 q_vector->rx.total_packets += total_packets;
8386 q_vector->rx.total_bytes += total_bytes;
8387
8388 if (cleaned_count)
8389 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8390
8391 return total_packets;
8392 }
8393
8394 static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8395 {
8396 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8397 }
8398
8399 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8400 struct igb_rx_buffer *bi)
8401 {
8402 struct page *page = bi->page;
8403 dma_addr_t dma;
8404
8405
8406 if (likely(page))
8407 return true;
8408
8409
8410 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8411 if (unlikely(!page)) {
8412 rx_ring->rx_stats.alloc_failed++;
8413 return false;
8414 }
8415
8416
8417 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8418 igb_rx_pg_size(rx_ring),
8419 DMA_FROM_DEVICE,
8420 IGB_RX_DMA_ATTR);
8421
8422
8423
8424
8425 if (dma_mapping_error(rx_ring->dev, dma)) {
8426 __free_pages(page, igb_rx_pg_order(rx_ring));
8427
8428 rx_ring->rx_stats.alloc_failed++;
8429 return false;
8430 }
8431
8432 bi->dma = dma;
8433 bi->page = page;
8434 bi->page_offset = igb_rx_offset(rx_ring);
8435 bi->pagecnt_bias = 1;
8436
8437 return true;
8438 }
8439
8440
8441
8442
8443
8444 void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8445 {
8446 union e1000_adv_rx_desc *rx_desc;
8447 struct igb_rx_buffer *bi;
8448 u16 i = rx_ring->next_to_use;
8449 u16 bufsz;
8450
8451
8452 if (!cleaned_count)
8453 return;
8454
8455 rx_desc = IGB_RX_DESC(rx_ring, i);
8456 bi = &rx_ring->rx_buffer_info[i];
8457 i -= rx_ring->count;
8458
8459 bufsz = igb_rx_bufsz(rx_ring);
8460
8461 do {
8462 if (!igb_alloc_mapped_page(rx_ring, bi))
8463 break;
8464
8465
8466 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8467 bi->page_offset, bufsz,
8468 DMA_FROM_DEVICE);
8469
8470
8471
8472
8473 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8474
8475 rx_desc++;
8476 bi++;
8477 i++;
8478 if (unlikely(!i)) {
8479 rx_desc = IGB_RX_DESC(rx_ring, 0);
8480 bi = rx_ring->rx_buffer_info;
8481 i -= rx_ring->count;
8482 }
8483
8484
8485 rx_desc->wb.upper.length = 0;
8486
8487 cleaned_count--;
8488 } while (cleaned_count);
8489
8490 i += rx_ring->count;
8491
8492 if (rx_ring->next_to_use != i) {
8493
8494 rx_ring->next_to_use = i;
8495
8496
8497 rx_ring->next_to_alloc = i;
8498
8499
8500
8501
8502
8503
8504 dma_wmb();
8505 writel(i, rx_ring->tail);
8506 }
8507 }
8508
8509
8510
8511
8512
8513
8514
8515 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8516 {
8517 struct igb_adapter *adapter = netdev_priv(netdev);
8518 struct mii_ioctl_data *data = if_mii(ifr);
8519
8520 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8521 return -EOPNOTSUPP;
8522
8523 switch (cmd) {
8524 case SIOCGMIIPHY:
8525 data->phy_id = adapter->hw.phy.addr;
8526 break;
8527 case SIOCGMIIREG:
8528 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8529 &data->val_out))
8530 return -EIO;
8531 break;
8532 case SIOCSMIIREG:
8533 default:
8534 return -EOPNOTSUPP;
8535 }
8536 return 0;
8537 }
8538
8539
8540
8541
8542
8543
8544
8545 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8546 {
8547 switch (cmd) {
8548 case SIOCGMIIPHY:
8549 case SIOCGMIIREG:
8550 case SIOCSMIIREG:
8551 return igb_mii_ioctl(netdev, ifr, cmd);
8552 case SIOCGHWTSTAMP:
8553 return igb_ptp_get_ts_config(netdev, ifr);
8554 case SIOCSHWTSTAMP:
8555 return igb_ptp_set_ts_config(netdev, ifr);
8556 default:
8557 return -EOPNOTSUPP;
8558 }
8559 }
8560
8561 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8562 {
8563 struct igb_adapter *adapter = hw->back;
8564
8565 pci_read_config_word(adapter->pdev, reg, value);
8566 }
8567
8568 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8569 {
8570 struct igb_adapter *adapter = hw->back;
8571
8572 pci_write_config_word(adapter->pdev, reg, *value);
8573 }
8574
8575 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8576 {
8577 struct igb_adapter *adapter = hw->back;
8578
8579 if (pcie_capability_read_word(adapter->pdev, reg, value))
8580 return -E1000_ERR_CONFIG;
8581
8582 return 0;
8583 }
8584
8585 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8586 {
8587 struct igb_adapter *adapter = hw->back;
8588
8589 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8590 return -E1000_ERR_CONFIG;
8591
8592 return 0;
8593 }
8594
8595 static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8596 {
8597 struct igb_adapter *adapter = netdev_priv(netdev);
8598 struct e1000_hw *hw = &adapter->hw;
8599 u32 ctrl, rctl;
8600 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8601
8602 if (enable) {
8603
8604 ctrl = rd32(E1000_CTRL);
8605 ctrl |= E1000_CTRL_VME;
8606 wr32(E1000_CTRL, ctrl);
8607
8608
8609 rctl = rd32(E1000_RCTL);
8610 rctl &= ~E1000_RCTL_CFIEN;
8611 wr32(E1000_RCTL, rctl);
8612 } else {
8613
8614 ctrl = rd32(E1000_CTRL);
8615 ctrl &= ~E1000_CTRL_VME;
8616 wr32(E1000_CTRL, ctrl);
8617 }
8618
8619 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8620 }
8621
8622 static int igb_vlan_rx_add_vid(struct net_device *netdev,
8623 __be16 proto, u16 vid)
8624 {
8625 struct igb_adapter *adapter = netdev_priv(netdev);
8626 struct e1000_hw *hw = &adapter->hw;
8627 int pf_id = adapter->vfs_allocated_count;
8628
8629
8630 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8631 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8632
8633 set_bit(vid, adapter->active_vlans);
8634
8635 return 0;
8636 }
8637
8638 static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8639 __be16 proto, u16 vid)
8640 {
8641 struct igb_adapter *adapter = netdev_priv(netdev);
8642 int pf_id = adapter->vfs_allocated_count;
8643 struct e1000_hw *hw = &adapter->hw;
8644
8645
8646 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8647 igb_vfta_set(hw, vid, pf_id, false, true);
8648
8649 clear_bit(vid, adapter->active_vlans);
8650
8651 return 0;
8652 }
8653
8654 static void igb_restore_vlan(struct igb_adapter *adapter)
8655 {
8656 u16 vid = 1;
8657
8658 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8659 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8660
8661 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8662 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8663 }
8664
8665 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8666 {
8667 struct pci_dev *pdev = adapter->pdev;
8668 struct e1000_mac_info *mac = &adapter->hw.mac;
8669
8670 mac->autoneg = 0;
8671
8672
8673
8674
8675 if ((spd & 1) || (dplx & ~1))
8676 goto err_inval;
8677
8678
8679
8680
8681 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8682 switch (spd + dplx) {
8683 case SPEED_10 + DUPLEX_HALF:
8684 case SPEED_10 + DUPLEX_FULL:
8685 case SPEED_100 + DUPLEX_HALF:
8686 goto err_inval;
8687 default:
8688 break;
8689 }
8690 }
8691
8692 switch (spd + dplx) {
8693 case SPEED_10 + DUPLEX_HALF:
8694 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8695 break;
8696 case SPEED_10 + DUPLEX_FULL:
8697 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8698 break;
8699 case SPEED_100 + DUPLEX_HALF:
8700 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8701 break;
8702 case SPEED_100 + DUPLEX_FULL:
8703 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8704 break;
8705 case SPEED_1000 + DUPLEX_FULL:
8706 mac->autoneg = 1;
8707 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8708 break;
8709 case SPEED_1000 + DUPLEX_HALF:
8710 default:
8711 goto err_inval;
8712 }
8713
8714
8715 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8716
8717 return 0;
8718
8719 err_inval:
8720 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8721 return -EINVAL;
8722 }
8723
8724 static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8725 bool runtime)
8726 {
8727 struct net_device *netdev = pci_get_drvdata(pdev);
8728 struct igb_adapter *adapter = netdev_priv(netdev);
8729 struct e1000_hw *hw = &adapter->hw;
8730 u32 ctrl, rctl, status;
8731 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8732 bool wake;
8733
8734 rtnl_lock();
8735 netif_device_detach(netdev);
8736
8737 if (netif_running(netdev))
8738 __igb_close(netdev, true);
8739
8740 igb_ptp_suspend(adapter);
8741
8742 igb_clear_interrupt_scheme(adapter);
8743 rtnl_unlock();
8744
8745 status = rd32(E1000_STATUS);
8746 if (status & E1000_STATUS_LU)
8747 wufc &= ~E1000_WUFC_LNKC;
8748
8749 if (wufc) {
8750 igb_setup_rctl(adapter);
8751 igb_set_rx_mode(netdev);
8752
8753
8754 if (wufc & E1000_WUFC_MC) {
8755 rctl = rd32(E1000_RCTL);
8756 rctl |= E1000_RCTL_MPE;
8757 wr32(E1000_RCTL, rctl);
8758 }
8759
8760 ctrl = rd32(E1000_CTRL);
8761 ctrl |= E1000_CTRL_ADVD3WUC;
8762 wr32(E1000_CTRL, ctrl);
8763
8764
8765 igb_disable_pcie_master(hw);
8766
8767 wr32(E1000_WUC, E1000_WUC_PME_EN);
8768 wr32(E1000_WUFC, wufc);
8769 } else {
8770 wr32(E1000_WUC, 0);
8771 wr32(E1000_WUFC, 0);
8772 }
8773
8774 wake = wufc || adapter->en_mng_pt;
8775 if (!wake)
8776 igb_power_down_link(adapter);
8777 else
8778 igb_power_up_link(adapter);
8779
8780 if (enable_wake)
8781 *enable_wake = wake;
8782
8783
8784
8785
8786 igb_release_hw_control(adapter);
8787
8788 pci_disable_device(pdev);
8789
8790 return 0;
8791 }
8792
8793 static void igb_deliver_wake_packet(struct net_device *netdev)
8794 {
8795 struct igb_adapter *adapter = netdev_priv(netdev);
8796 struct e1000_hw *hw = &adapter->hw;
8797 struct sk_buff *skb;
8798 u32 wupl;
8799
8800 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8801
8802
8803
8804
8805 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8806 return;
8807
8808 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8809 if (!skb)
8810 return;
8811
8812 skb_put(skb, wupl);
8813
8814
8815 wupl = roundup(wupl, 4);
8816
8817 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8818
8819 skb->protocol = eth_type_trans(skb, netdev);
8820 netif_rx(skb);
8821 }
8822
8823 static int __maybe_unused igb_suspend(struct device *dev)
8824 {
8825 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8826 }
8827
8828 static int __maybe_unused igb_resume(struct device *dev)
8829 {
8830 struct pci_dev *pdev = to_pci_dev(dev);
8831 struct net_device *netdev = pci_get_drvdata(pdev);
8832 struct igb_adapter *adapter = netdev_priv(netdev);
8833 struct e1000_hw *hw = &adapter->hw;
8834 u32 err, val;
8835
8836 pci_set_power_state(pdev, PCI_D0);
8837 pci_restore_state(pdev);
8838 pci_save_state(pdev);
8839
8840 if (!pci_device_is_present(pdev))
8841 return -ENODEV;
8842 err = pci_enable_device_mem(pdev);
8843 if (err) {
8844 dev_err(&pdev->dev,
8845 "igb: Cannot enable PCI device from suspend\n");
8846 return err;
8847 }
8848 pci_set_master(pdev);
8849
8850 pci_enable_wake(pdev, PCI_D3hot, 0);
8851 pci_enable_wake(pdev, PCI_D3cold, 0);
8852
8853 if (igb_init_interrupt_scheme(adapter, true)) {
8854 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8855 return -ENOMEM;
8856 }
8857
8858 igb_reset(adapter);
8859
8860
8861
8862
8863 igb_get_hw_control(adapter);
8864
8865 val = rd32(E1000_WUS);
8866 if (val & WAKE_PKT_WUS)
8867 igb_deliver_wake_packet(netdev);
8868
8869 wr32(E1000_WUS, ~0);
8870
8871 rtnl_lock();
8872 if (!err && netif_running(netdev))
8873 err = __igb_open(netdev, true);
8874
8875 if (!err)
8876 netif_device_attach(netdev);
8877 rtnl_unlock();
8878
8879 return err;
8880 }
8881
8882 static int __maybe_unused igb_runtime_idle(struct device *dev)
8883 {
8884 struct net_device *netdev = dev_get_drvdata(dev);
8885 struct igb_adapter *adapter = netdev_priv(netdev);
8886
8887 if (!igb_has_link(adapter))
8888 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8889
8890 return -EBUSY;
8891 }
8892
8893 static int __maybe_unused igb_runtime_suspend(struct device *dev)
8894 {
8895 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8896 }
8897
8898 static int __maybe_unused igb_runtime_resume(struct device *dev)
8899 {
8900 return igb_resume(dev);
8901 }
8902
8903 static void igb_shutdown(struct pci_dev *pdev)
8904 {
8905 bool wake;
8906
8907 __igb_shutdown(pdev, &wake, 0);
8908
8909 if (system_state == SYSTEM_POWER_OFF) {
8910 pci_wake_from_d3(pdev, wake);
8911 pci_set_power_state(pdev, PCI_D3hot);
8912 }
8913 }
8914
8915 #ifdef CONFIG_PCI_IOV
8916 static int igb_sriov_reinit(struct pci_dev *dev)
8917 {
8918 struct net_device *netdev = pci_get_drvdata(dev);
8919 struct igb_adapter *adapter = netdev_priv(netdev);
8920 struct pci_dev *pdev = adapter->pdev;
8921
8922 rtnl_lock();
8923
8924 if (netif_running(netdev))
8925 igb_close(netdev);
8926 else
8927 igb_reset(adapter);
8928
8929 igb_clear_interrupt_scheme(adapter);
8930
8931 igb_init_queue_configuration(adapter);
8932
8933 if (igb_init_interrupt_scheme(adapter, true)) {
8934 rtnl_unlock();
8935 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8936 return -ENOMEM;
8937 }
8938
8939 if (netif_running(netdev))
8940 igb_open(netdev);
8941
8942 rtnl_unlock();
8943
8944 return 0;
8945 }
8946
8947 static int igb_pci_disable_sriov(struct pci_dev *dev)
8948 {
8949 int err = igb_disable_sriov(dev);
8950
8951 if (!err)
8952 err = igb_sriov_reinit(dev);
8953
8954 return err;
8955 }
8956
8957 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8958 {
8959 int err = igb_enable_sriov(dev, num_vfs);
8960
8961 if (err)
8962 goto out;
8963
8964 err = igb_sriov_reinit(dev);
8965 if (!err)
8966 return num_vfs;
8967
8968 out:
8969 return err;
8970 }
8971
8972 #endif
8973 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8974 {
8975 #ifdef CONFIG_PCI_IOV
8976 if (num_vfs == 0)
8977 return igb_pci_disable_sriov(dev);
8978 else
8979 return igb_pci_enable_sriov(dev, num_vfs);
8980 #endif
8981 return 0;
8982 }
8983
8984
8985
8986
8987
8988
8989
8990
8991
8992 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8993 pci_channel_state_t state)
8994 {
8995 struct net_device *netdev = pci_get_drvdata(pdev);
8996 struct igb_adapter *adapter = netdev_priv(netdev);
8997
8998 netif_device_detach(netdev);
8999
9000 if (state == pci_channel_io_perm_failure)
9001 return PCI_ERS_RESULT_DISCONNECT;
9002
9003 if (netif_running(netdev))
9004 igb_down(adapter);
9005 pci_disable_device(pdev);
9006
9007
9008 return PCI_ERS_RESULT_NEED_RESET;
9009 }
9010
9011
9012
9013
9014
9015
9016
9017
9018 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9019 {
9020 struct net_device *netdev = pci_get_drvdata(pdev);
9021 struct igb_adapter *adapter = netdev_priv(netdev);
9022 struct e1000_hw *hw = &adapter->hw;
9023 pci_ers_result_t result;
9024
9025 if (pci_enable_device_mem(pdev)) {
9026 dev_err(&pdev->dev,
9027 "Cannot re-enable PCI device after reset.\n");
9028 result = PCI_ERS_RESULT_DISCONNECT;
9029 } else {
9030 pci_set_master(pdev);
9031 pci_restore_state(pdev);
9032 pci_save_state(pdev);
9033
9034 pci_enable_wake(pdev, PCI_D3hot, 0);
9035 pci_enable_wake(pdev, PCI_D3cold, 0);
9036
9037
9038
9039
9040 hw->hw_addr = adapter->io_addr;
9041
9042 igb_reset(adapter);
9043 wr32(E1000_WUS, ~0);
9044 result = PCI_ERS_RESULT_RECOVERED;
9045 }
9046
9047 return result;
9048 }
9049
9050
9051
9052
9053
9054
9055
9056
9057
9058 static void igb_io_resume(struct pci_dev *pdev)
9059 {
9060 struct net_device *netdev = pci_get_drvdata(pdev);
9061 struct igb_adapter *adapter = netdev_priv(netdev);
9062
9063 if (netif_running(netdev)) {
9064 if (igb_up(adapter)) {
9065 dev_err(&pdev->dev, "igb_up failed after reset\n");
9066 return;
9067 }
9068 }
9069
9070 netif_device_attach(netdev);
9071
9072
9073
9074
9075 igb_get_hw_control(adapter);
9076 }
9077
9078
9079
9080
9081
9082
9083 static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9084 {
9085 struct e1000_hw *hw = &adapter->hw;
9086 u32 rar_low, rar_high;
9087 u8 *addr = adapter->mac_table[index].addr;
9088
9089
9090
9091
9092
9093
9094 rar_low = le32_to_cpup((__le32 *)(addr));
9095 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9096
9097
9098 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9099 if (is_valid_ether_addr(addr))
9100 rar_high |= E1000_RAH_AV;
9101
9102 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9103 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9104
9105 switch (hw->mac.type) {
9106 case e1000_82575:
9107 case e1000_i210:
9108 if (adapter->mac_table[index].state &
9109 IGB_MAC_STATE_QUEUE_STEERING)
9110 rar_high |= E1000_RAH_QSEL_ENABLE;
9111
9112 rar_high |= E1000_RAH_POOL_1 *
9113 adapter->mac_table[index].queue;
9114 break;
9115 default:
9116 rar_high |= E1000_RAH_POOL_1 <<
9117 adapter->mac_table[index].queue;
9118 break;
9119 }
9120 }
9121
9122 wr32(E1000_RAL(index), rar_low);
9123 wrfl();
9124 wr32(E1000_RAH(index), rar_high);
9125 wrfl();
9126 }
9127
9128 static int igb_set_vf_mac(struct igb_adapter *adapter,
9129 int vf, unsigned char *mac_addr)
9130 {
9131 struct e1000_hw *hw = &adapter->hw;
9132
9133
9134
9135 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9136 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9137
9138 ether_addr_copy(vf_mac_addr, mac_addr);
9139 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9140 adapter->mac_table[rar_entry].queue = vf;
9141 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9142 igb_rar_set_index(adapter, rar_entry);
9143
9144 return 0;
9145 }
9146
9147 static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9148 {
9149 struct igb_adapter *adapter = netdev_priv(netdev);
9150
9151 if (vf >= adapter->vfs_allocated_count)
9152 return -EINVAL;
9153
9154
9155
9156
9157
9158
9159
9160 if (is_zero_ether_addr(mac)) {
9161 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9162 dev_info(&adapter->pdev->dev,
9163 "remove administratively set MAC on VF %d\n",
9164 vf);
9165 } else if (is_valid_ether_addr(mac)) {
9166 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9167 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9168 mac, vf);
9169 dev_info(&adapter->pdev->dev,
9170 "Reload the VF driver to make this change effective.");
9171
9172 if (test_bit(__IGB_DOWN, &adapter->state)) {
9173 dev_warn(&adapter->pdev->dev,
9174 "The VF MAC address has been set, but the PF device is not up.\n");
9175 dev_warn(&adapter->pdev->dev,
9176 "Bring the PF device up before attempting to use the VF device.\n");
9177 }
9178 } else {
9179 return -EINVAL;
9180 }
9181 return igb_set_vf_mac(adapter, vf, mac);
9182 }
9183
9184 static int igb_link_mbps(int internal_link_speed)
9185 {
9186 switch (internal_link_speed) {
9187 case SPEED_100:
9188 return 100;
9189 case SPEED_1000:
9190 return 1000;
9191 default:
9192 return 0;
9193 }
9194 }
9195
9196 static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9197 int link_speed)
9198 {
9199 int rf_dec, rf_int;
9200 u32 bcnrc_val;
9201
9202 if (tx_rate != 0) {
9203
9204 rf_int = link_speed / tx_rate;
9205 rf_dec = (link_speed - (rf_int * tx_rate));
9206 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9207 tx_rate;
9208
9209 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9210 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9211 E1000_RTTBCNRC_RF_INT_MASK);
9212 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9213 } else {
9214 bcnrc_val = 0;
9215 }
9216
9217 wr32(E1000_RTTDQSEL, vf);
9218
9219
9220
9221 wr32(E1000_RTTBCNRM, 0x14);
9222 wr32(E1000_RTTBCNRC, bcnrc_val);
9223 }
9224
9225 static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9226 {
9227 int actual_link_speed, i;
9228 bool reset_rate = false;
9229
9230
9231 if ((adapter->vf_rate_link_speed == 0) ||
9232 (adapter->hw.mac.type != e1000_82576))
9233 return;
9234
9235 actual_link_speed = igb_link_mbps(adapter->link_speed);
9236 if (actual_link_speed != adapter->vf_rate_link_speed) {
9237 reset_rate = true;
9238 adapter->vf_rate_link_speed = 0;
9239 dev_info(&adapter->pdev->dev,
9240 "Link speed has been changed. VF Transmit rate is disabled\n");
9241 }
9242
9243 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9244 if (reset_rate)
9245 adapter->vf_data[i].tx_rate = 0;
9246
9247 igb_set_vf_rate_limit(&adapter->hw, i,
9248 adapter->vf_data[i].tx_rate,
9249 actual_link_speed);
9250 }
9251 }
9252
9253 static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9254 int min_tx_rate, int max_tx_rate)
9255 {
9256 struct igb_adapter *adapter = netdev_priv(netdev);
9257 struct e1000_hw *hw = &adapter->hw;
9258 int actual_link_speed;
9259
9260 if (hw->mac.type != e1000_82576)
9261 return -EOPNOTSUPP;
9262
9263 if (min_tx_rate)
9264 return -EINVAL;
9265
9266 actual_link_speed = igb_link_mbps(adapter->link_speed);
9267 if ((vf >= adapter->vfs_allocated_count) ||
9268 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9269 (max_tx_rate < 0) ||
9270 (max_tx_rate > actual_link_speed))
9271 return -EINVAL;
9272
9273 adapter->vf_rate_link_speed = actual_link_speed;
9274 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9275 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9276
9277 return 0;
9278 }
9279
9280 static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9281 bool setting)
9282 {
9283 struct igb_adapter *adapter = netdev_priv(netdev);
9284 struct e1000_hw *hw = &adapter->hw;
9285 u32 reg_val, reg_offset;
9286
9287 if (!adapter->vfs_allocated_count)
9288 return -EOPNOTSUPP;
9289
9290 if (vf >= adapter->vfs_allocated_count)
9291 return -EINVAL;
9292
9293 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9294 reg_val = rd32(reg_offset);
9295 if (setting)
9296 reg_val |= (BIT(vf) |
9297 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9298 else
9299 reg_val &= ~(BIT(vf) |
9300 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9301 wr32(reg_offset, reg_val);
9302
9303 adapter->vf_data[vf].spoofchk_enabled = setting;
9304 return 0;
9305 }
9306
9307 static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9308 {
9309 struct igb_adapter *adapter = netdev_priv(netdev);
9310
9311 if (vf >= adapter->vfs_allocated_count)
9312 return -EINVAL;
9313 if (adapter->vf_data[vf].trusted == setting)
9314 return 0;
9315
9316 adapter->vf_data[vf].trusted = setting;
9317
9318 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9319 vf, setting ? "" : "not ");
9320 return 0;
9321 }
9322
9323 static int igb_ndo_get_vf_config(struct net_device *netdev,
9324 int vf, struct ifla_vf_info *ivi)
9325 {
9326 struct igb_adapter *adapter = netdev_priv(netdev);
9327 if (vf >= adapter->vfs_allocated_count)
9328 return -EINVAL;
9329 ivi->vf = vf;
9330 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9331 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9332 ivi->min_tx_rate = 0;
9333 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9334 ivi->qos = adapter->vf_data[vf].pf_qos;
9335 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9336 ivi->trusted = adapter->vf_data[vf].trusted;
9337 return 0;
9338 }
9339
9340 static void igb_vmm_control(struct igb_adapter *adapter)
9341 {
9342 struct e1000_hw *hw = &adapter->hw;
9343 u32 reg;
9344
9345 switch (hw->mac.type) {
9346 case e1000_82575:
9347 case e1000_i210:
9348 case e1000_i211:
9349 case e1000_i354:
9350 default:
9351
9352 return;
9353 case e1000_82576:
9354
9355 reg = rd32(E1000_DTXCTL);
9356 reg |= E1000_DTXCTL_VLAN_ADDED;
9357 wr32(E1000_DTXCTL, reg);
9358
9359 case e1000_82580:
9360
9361 reg = rd32(E1000_RPLOLR);
9362 reg |= E1000_RPLOLR_STRVLAN;
9363 wr32(E1000_RPLOLR, reg);
9364
9365 case e1000_i350:
9366
9367 break;
9368 }
9369
9370 if (adapter->vfs_allocated_count) {
9371 igb_vmdq_set_loopback_pf(hw, true);
9372 igb_vmdq_set_replication_pf(hw, true);
9373 igb_vmdq_set_anti_spoofing_pf(hw, true,
9374 adapter->vfs_allocated_count);
9375 } else {
9376 igb_vmdq_set_loopback_pf(hw, false);
9377 igb_vmdq_set_replication_pf(hw, false);
9378 }
9379 }
9380
9381 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9382 {
9383 struct e1000_hw *hw = &adapter->hw;
9384 u32 dmac_thr;
9385 u16 hwm;
9386
9387 if (hw->mac.type > e1000_82580) {
9388 if (adapter->flags & IGB_FLAG_DMAC) {
9389 u32 reg;
9390
9391
9392 wr32(E1000_DMCTXTH, 0);
9393
9394
9395
9396
9397
9398 hwm = 64 * (pba - 6);
9399 reg = rd32(E1000_FCRTC);
9400 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9401 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9402 & E1000_FCRTC_RTH_COAL_MASK);
9403 wr32(E1000_FCRTC, reg);
9404
9405
9406
9407
9408 dmac_thr = pba - 10;
9409 reg = rd32(E1000_DMACR);
9410 reg &= ~E1000_DMACR_DMACTHR_MASK;
9411 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9412 & E1000_DMACR_DMACTHR_MASK);
9413
9414
9415 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9416
9417
9418 reg |= (1000 >> 5);
9419
9420
9421 if (hw->mac.type != e1000_i354)
9422 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9423
9424 wr32(E1000_DMACR, reg);
9425
9426
9427
9428
9429 wr32(E1000_DMCRTRH, 0);
9430
9431 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9432
9433 wr32(E1000_DMCTLX, reg);
9434
9435
9436
9437
9438 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9439 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9440
9441
9442
9443
9444 reg = rd32(E1000_PCIEMISC);
9445 reg &= ~E1000_PCIEMISC_LX_DECISION;
9446 wr32(E1000_PCIEMISC, reg);
9447 }
9448 } else if (hw->mac.type == e1000_82580) {
9449 u32 reg = rd32(E1000_PCIEMISC);
9450
9451 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9452 wr32(E1000_DMACR, 0);
9453 }
9454 }
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466 s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9467 u8 dev_addr, u8 *data)
9468 {
9469 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9470 struct i2c_client *this_client = adapter->i2c_client;
9471 s32 status;
9472 u16 swfw_mask = 0;
9473
9474 if (!this_client)
9475 return E1000_ERR_I2C;
9476
9477 swfw_mask = E1000_SWFW_PHY0_SM;
9478
9479 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9480 return E1000_ERR_SWFW_SYNC;
9481
9482 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9483 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9484
9485 if (status < 0)
9486 return E1000_ERR_I2C;
9487 else {
9488 *data = status;
9489 return 0;
9490 }
9491 }
9492
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503 s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9504 u8 dev_addr, u8 data)
9505 {
9506 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9507 struct i2c_client *this_client = adapter->i2c_client;
9508 s32 status;
9509 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9510
9511 if (!this_client)
9512 return E1000_ERR_I2C;
9513
9514 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9515 return E1000_ERR_SWFW_SYNC;
9516 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9517 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9518
9519 if (status)
9520 return E1000_ERR_I2C;
9521 else
9522 return 0;
9523
9524 }
9525
9526 int igb_reinit_queues(struct igb_adapter *adapter)
9527 {
9528 struct net_device *netdev = adapter->netdev;
9529 struct pci_dev *pdev = adapter->pdev;
9530 int err = 0;
9531
9532 if (netif_running(netdev))
9533 igb_close(netdev);
9534
9535 igb_reset_interrupt_capability(adapter);
9536
9537 if (igb_init_interrupt_scheme(adapter, true)) {
9538 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9539 return -ENOMEM;
9540 }
9541
9542 if (netif_running(netdev))
9543 err = igb_open(netdev);
9544
9545 return err;
9546 }
9547
9548 static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9549 {
9550 struct igb_nfc_filter *rule;
9551
9552 spin_lock(&adapter->nfc_lock);
9553
9554 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9555 igb_erase_filter(adapter, rule);
9556
9557 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9558 igb_erase_filter(adapter, rule);
9559
9560 spin_unlock(&adapter->nfc_lock);
9561 }
9562
9563 static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9564 {
9565 struct igb_nfc_filter *rule;
9566
9567 spin_lock(&adapter->nfc_lock);
9568
9569 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9570 igb_add_filter(adapter, rule);
9571
9572 spin_unlock(&adapter->nfc_lock);
9573 }
9574