This source file includes following definitions.
- i40e_allocate_dma_mem_d
- i40e_free_dma_mem_d
- i40e_allocate_virt_mem_d
- i40e_free_virt_mem_d
- i40e_get_lump
- i40e_put_lump
- i40e_find_vsi_from_id
- i40e_service_event_schedule
- i40e_tx_timeout
- i40e_get_vsi_stats_struct
- i40e_get_netdev_stats_struct_tx
- i40e_get_netdev_stats_struct
- i40e_vsi_reset_stats
- i40e_pf_reset_stats
- i40e_stat_update48
- i40e_stat_update32
- i40e_stat_update_and_clear32
- i40e_update_eth_stats
- i40e_update_veb_stats
- i40e_update_vsi_stats
- i40e_update_pf_stats
- i40e_update_stats
- i40e_find_filter
- i40e_find_mac
- i40e_is_vsi_in_vlan
- i40e_correct_mac_vlan_filters
- i40e_rm_default_mac_filter
- i40e_add_filter
- __i40e_del_filter
- i40e_del_filter
- i40e_add_mac_filter
- i40e_del_mac_filter
- i40e_set_mac
- i40e_config_rss_aq
- i40e_vsi_config_rss
- i40e_vsi_setup_queue_map_mqprio
- i40e_vsi_setup_queue_map
- i40e_addr_sync
- i40e_addr_unsync
- i40e_set_rx_mode
- i40e_undo_del_filter_entries
- i40e_undo_add_filter_entries
- i40e_next_filter
- i40e_update_filter_state
- i40e_aqc_del_filters
- i40e_aqc_add_filters
- i40e_aqc_broadcast_filter
- i40e_set_promiscuous
- i40e_sync_vsi_filters
- i40e_sync_filters_subtask
- i40e_max_xdp_frame_size
- i40e_change_mtu
- i40e_ioctl
- i40e_vlan_stripping_enable
- i40e_vlan_stripping_disable
- i40e_add_vlan_all_mac
- i40e_vsi_add_vlan
- i40e_rm_vlan_all_mac
- i40e_vsi_kill_vlan
- i40e_vlan_rx_add_vid
- i40e_vlan_rx_add_vid_up
- i40e_vlan_rx_kill_vid
- i40e_restore_vlan
- i40e_vsi_add_pvid
- i40e_vsi_remove_pvid
- i40e_vsi_setup_tx_resources
- i40e_vsi_free_tx_resources
- i40e_vsi_setup_rx_resources
- i40e_vsi_free_rx_resources
- i40e_config_xps_tx_ring
- i40e_xsk_umem
- i40e_configure_tx_ring
- i40e_configure_rx_ring
- i40e_vsi_configure_tx
- i40e_vsi_configure_rx
- i40e_vsi_config_dcb_rings
- i40e_set_vsi_rx_mode
- i40e_fdir_filter_restore
- i40e_vsi_configure
- i40e_vsi_configure_msix
- i40e_enable_misc_int_causes
- i40e_configure_msi_and_legacy
- i40e_irq_dynamic_disable_icr0
- i40e_irq_dynamic_enable_icr0
- i40e_msix_clean_rings
- i40e_irq_affinity_notify
- i40e_irq_affinity_release
- i40e_vsi_request_irq_msix
- i40e_vsi_disable_irq
- i40e_vsi_enable_irq
- i40e_free_misc_vector
- i40e_intr
- i40e_clean_fdir_tx_irq
- i40e_fdir_clean_ring
- i40e_map_vector_to_qp
- i40e_vsi_map_rings_to_vectors
- i40e_vsi_request_irq
- i40e_netpoll
- i40e_pf_txq_wait
- i40e_control_tx_q
- i40e_control_wait_tx_q
- i40e_vsi_control_tx
- i40e_pf_rxq_wait
- i40e_control_rx_q
- i40e_control_wait_rx_q
- i40e_vsi_control_rx
- i40e_vsi_start_rings
- i40e_vsi_stop_rings
- i40e_vsi_stop_rings_no_wait
- i40e_vsi_free_irq
- i40e_free_q_vector
- i40e_vsi_free_q_vectors
- i40e_reset_interrupt_capability
- i40e_clear_interrupt_scheme
- i40e_napi_enable_all
- i40e_napi_disable_all
- i40e_vsi_close
- i40e_quiesce_vsi
- i40e_unquiesce_vsi
- i40e_pf_quiesce_all_vsi
- i40e_pf_unquiesce_all_vsi
- i40e_vsi_wait_queues_disabled
- i40e_pf_wait_queues_disabled
- i40e_get_iscsi_tc_map
- i40e_dcb_get_num_tc
- i40e_dcb_get_enabled_tc
- i40e_mqprio_get_enabled_tc
- i40e_pf_get_num_tc
- i40e_pf_get_tc_map
- i40e_vsi_get_bw_info
- i40e_vsi_configure_bw_alloc
- i40e_vsi_config_netdev_tc
- i40e_vsi_update_queue_map
- i40e_vsi_config_tc
- i40e_get_link_speed
- i40e_set_bw_limit
- i40e_remove_queue_channels
- i40e_is_any_channel
- i40e_get_max_queues_for_channel
- i40e_validate_num_queues
- i40e_vsi_reconfig_rss
- i40e_channel_setup_queue_map
- i40e_add_channel
- i40e_channel_config_bw
- i40e_channel_config_tx_ring
- i40e_setup_hw_channel
- i40e_setup_channel
- i40e_validate_and_set_switch_mode
- i40e_create_queue_channel
- i40e_configure_queue_channels
- i40e_veb_config_tc
- i40e_dcb_reconfigure
- i40e_resume_port_tx
- i40e_init_pf_dcb
- i40e_print_link_message
- i40e_up_complete
- i40e_vsi_reinit_locked
- i40e_up
- i40e_force_link_state
- i40e_down
- i40e_validate_mqprio_qopt
- i40e_vsi_set_default_tc_config
- i40e_del_macvlan_filter
- i40e_add_macvlan_filter
- i40e_reset_ch_rings
- i40e_free_macvlan_channels
- i40e_fwd_ring_up
- i40e_setup_macvlans
- i40e_fwd_add
- i40e_del_all_macvlans
- i40e_fwd_del
- i40e_setup_tc
- i40e_set_cld_element
- i40e_add_del_cloud_filter
- i40e_add_del_cloud_filter_big_buf
- i40e_parse_cls_flower
- i40e_handle_tclass
- i40e_configure_clsflower
- i40e_find_cloud_filter
- i40e_delete_clsflower
- i40e_setup_tc_cls_flower
- i40e_setup_tc_block_cb
- __i40e_setup_tc
- i40e_open
- i40e_vsi_open
- i40e_fdir_filter_exit
- i40e_cloud_filter_exit
- i40e_close
- i40e_do_reset
- i40e_dcb_need_reconfig
- i40e_handle_lldp_event
- i40e_do_reset_safe
- i40e_handle_lan_overflow_event
- i40e_get_cur_guaranteed_fd_count
- i40e_get_current_fd_count
- i40e_get_global_fd_count
- i40e_reenable_fdir_sb
- i40e_reenable_fdir_atr
- i40e_delete_invalid_filter
- i40e_fdir_check_and_reenable
- i40e_fdir_flush_and_replay
- i40e_get_current_atr_cnt
- i40e_fdir_reinit_subtask
- i40e_vsi_link_event
- i40e_veb_link_event
- i40e_link_event
- i40e_watchdog_subtask
- i40e_reset_subtask
- i40e_handle_link_event
- i40e_clean_adminq_subtask
- i40e_verify_eeprom
- i40e_enable_pf_switch_lb
- i40e_disable_pf_switch_lb
- i40e_config_bridge_mode
- i40e_reconstitute_veb
- i40e_get_capabilities
- i40e_fdir_sb_setup
- i40e_fdir_teardown
- i40e_rebuild_cloud_filters
- i40e_rebuild_channels
- i40e_prep_for_reset
- i40e_send_version
- i40e_get_oem_version
- i40e_reset
- i40e_rebuild
- i40e_reset_and_rebuild
- i40e_handle_reset_warning
- i40e_handle_mdd_event
- i40e_tunnel_name
- i40e_sync_udp_filters
- i40e_sync_udp_filters_subtask
- i40e_service_task
- i40e_service_timer
- i40e_set_num_rings_in_vsi
- i40e_vsi_alloc_arrays
- i40e_vsi_mem_alloc
- i40e_vsi_free_arrays
- i40e_clear_rss_config_user
- i40e_vsi_clear
- i40e_vsi_clear_rings
- i40e_alloc_rings
- i40e_reserve_msix_vectors
- i40e_init_msix
- i40e_vsi_alloc_q_vector
- i40e_vsi_alloc_q_vectors
- i40e_init_interrupt_scheme
- i40e_restore_interrupt_scheme
- i40e_setup_misc_vector_for_recovery_mode
- i40e_setup_misc_vector
- i40e_get_rss_aq
- i40e_config_rss_reg
- i40e_get_rss_reg
- i40e_config_rss
- i40e_get_rss
- i40e_fill_rss_lut
- i40e_pf_config_rss
- i40e_reconfig_rss_queues
- i40e_get_partition_bw_setting
- i40e_set_partition_bw_setting
- i40e_commit_partition_bw_setting
- i40e_sw_init
- i40e_set_ntuple
- i40e_clear_rss_lut
- i40e_set_features
- i40e_get_udp_port_idx
- i40e_udp_tunnel_add
- i40e_udp_tunnel_del
- i40e_get_phys_port_id
- i40e_ndo_fdb_add
- i40e_ndo_bridge_setlink
- i40e_ndo_bridge_getlink
- i40e_features_check
- i40e_xdp_setup
- i40e_enter_busy_conf
- i40e_exit_busy_conf
- i40e_queue_pair_reset_stats
- i40e_queue_pair_clean_rings
- i40e_queue_pair_toggle_napi
- i40e_queue_pair_toggle_rings
- i40e_queue_pair_enable_irq
- i40e_queue_pair_disable_irq
- i40e_queue_pair_disable
- i40e_queue_pair_enable
- i40e_xdp
- i40e_config_netdev
- i40e_vsi_delete
- i40e_is_vsi_uplink_mode_veb
- i40e_add_vsi
- i40e_vsi_release
- i40e_vsi_setup_vectors
- i40e_vsi_reinit_setup
- i40e_vsi_setup
- i40e_veb_get_bw_info
- i40e_veb_mem_alloc
- i40e_switch_branch_release
- i40e_veb_clear
- i40e_veb_release
- i40e_add_veb
- i40e_veb_setup
- i40e_setup_pf_switch_element
- i40e_fetch_switch_configuration
- i40e_setup_pf_switch
- i40e_determine_queue_usage
- i40e_setup_pf_filter_control
- i40e_print_features
- i40e_get_platform_mac_addr
- i40e_set_fec_in_flags
- i40e_check_recovery_mode
- i40e_pf_loop_reset
- i40e_init_recovery_mode
- i40e_probe
- i40e_remove
- i40e_pci_error_detected
- i40e_pci_error_slot_reset
- i40e_pci_error_reset_prepare
- i40e_pci_error_reset_done
- i40e_pci_error_resume
- i40e_enable_mc_magic_wake
- i40e_shutdown
- i40e_suspend
- i40e_resume
- i40e_init_module
- i40e_exit_module
1
2
3
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
6 #include <linux/pci.h>
7 #include <linux/bpf.h>
8
9
10 #include "i40e.h"
11 #include "i40e_diag.h"
12 #include "i40e_xsk.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15
16
17
18
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
21
22 const char i40e_driver_name[] = "i40e";
23 static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
25
26 #define DRV_KERN "-k"
27
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 8
30 #define DRV_VERSION_BUILD 20
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str[] = DRV_VERSION;
35 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
36
37
38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43 static int i40e_setup_misc_vector(struct i40e_pf *pf);
44 static void i40e_determine_queue_usage(struct i40e_pf *pf);
45 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
50 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
51 static bool i40e_check_recovery_mode(struct i40e_pf *pf);
52 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
53 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
54 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
55 static int i40e_get_capabilities(struct i40e_pf *pf,
56 enum i40e_admin_queue_opc list_type);
57
58
59
60
61
62
63
64
65
66 static const struct pci_device_id i40e_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91
92 {0, }
93 };
94 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95
96 #define I40E_MAX_VF_COUNT 128
97 static int debug = -1;
98 module_param(debug, uint, 0);
99 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103 MODULE_LICENSE("GPL v2");
104 MODULE_VERSION(DRV_VERSION);
105
106 static struct workqueue_struct *i40e_wq;
107
108
109
110
111
112
113
114
115 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
116 u64 size, u32 alignment)
117 {
118 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
119
120 mem->size = ALIGN(size, alignment);
121 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
122 GFP_KERNEL);
123 if (!mem->va)
124 return -ENOMEM;
125
126 return 0;
127 }
128
129
130
131
132
133
134 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
135 {
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137
138 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
139 mem->va = NULL;
140 mem->pa = 0;
141 mem->size = 0;
142
143 return 0;
144 }
145
146
147
148
149
150
151
152 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
153 u32 size)
154 {
155 mem->size = size;
156 mem->va = kzalloc(size, GFP_KERNEL);
157
158 if (!mem->va)
159 return -ENOMEM;
160
161 return 0;
162 }
163
164
165
166
167
168
169 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
170 {
171
172 kfree(mem->va);
173 mem->va = NULL;
174 mem->size = 0;
175
176 return 0;
177 }
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
193 u16 needed, u16 id)
194 {
195 int ret = -ENOMEM;
196 int i, j;
197
198 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
199 dev_info(&pf->pdev->dev,
200 "param err: pile=%s needed=%d id=0x%04x\n",
201 pile ? "<valid>" : "<null>", needed, id);
202 return -EINVAL;
203 }
204
205
206 i = pile->search_hint;
207 while (i < pile->num_entries) {
208
209 if (pile->list[i] & I40E_PILE_VALID_BIT) {
210 i++;
211 continue;
212 }
213
214
215 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
216 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
217 break;
218 }
219
220 if (j == needed) {
221
222 for (j = 0; j < needed; j++)
223 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
224 ret = i;
225 pile->search_hint = i + j;
226 break;
227 }
228
229
230 i += j;
231 }
232
233 return ret;
234 }
235
236
237
238
239
240
241
242
243
244 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
245 {
246 int valid_id = (id | I40E_PILE_VALID_BIT);
247 int count = 0;
248 int i;
249
250 if (!pile || index >= pile->num_entries)
251 return -EINVAL;
252
253 for (i = index;
254 i < pile->num_entries && pile->list[i] == valid_id;
255 i++) {
256 pile->list[i] = 0;
257 count++;
258 }
259
260 if (count && index < pile->search_hint)
261 pile->search_hint = index;
262
263 return count;
264 }
265
266
267
268
269
270
271 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
272 {
273 int i;
274
275 for (i = 0; i < pf->num_alloc_vsi; i++)
276 if (pf->vsi[i] && (pf->vsi[i]->id == id))
277 return pf->vsi[i];
278
279 return NULL;
280 }
281
282
283
284
285
286
287
288 void i40e_service_event_schedule(struct i40e_pf *pf)
289 {
290 if ((!test_bit(__I40E_DOWN, pf->state) &&
291 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
292 test_bit(__I40E_RECOVERY_MODE, pf->state))
293 queue_work(i40e_wq, &pf->service_task);
294 }
295
296
297
298
299
300
301
302
303
304 static void i40e_tx_timeout(struct net_device *netdev)
305 {
306 struct i40e_netdev_priv *np = netdev_priv(netdev);
307 struct i40e_vsi *vsi = np->vsi;
308 struct i40e_pf *pf = vsi->back;
309 struct i40e_ring *tx_ring = NULL;
310 unsigned int i, hung_queue = 0;
311 u32 head, val;
312
313 pf->tx_timeout_count++;
314
315
316 for (i = 0; i < netdev->num_tx_queues; i++) {
317 struct netdev_queue *q;
318 unsigned long trans_start;
319
320 q = netdev_get_tx_queue(netdev, i);
321 trans_start = q->trans_start;
322 if (netif_xmit_stopped(q) &&
323 time_after(jiffies,
324 (trans_start + netdev->watchdog_timeo))) {
325 hung_queue = i;
326 break;
327 }
328 }
329
330 if (i == netdev->num_tx_queues) {
331 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
332 } else {
333
334 for (i = 0; i < vsi->num_queue_pairs; i++) {
335 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
336 if (hung_queue ==
337 vsi->tx_rings[i]->queue_index) {
338 tx_ring = vsi->tx_rings[i];
339 break;
340 }
341 }
342 }
343 }
344
345 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
346 pf->tx_timeout_recovery_level = 1;
347 else if (time_before(jiffies,
348 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
349 return;
350
351
352 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
353 return;
354
355 if (tx_ring) {
356 head = i40e_get_head(tx_ring);
357
358 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
359 val = rd32(&pf->hw,
360 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
361 tx_ring->vsi->base_vector - 1));
362 else
363 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
364
365 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
366 vsi->seid, hung_queue, tx_ring->next_to_clean,
367 head, tx_ring->next_to_use,
368 readl(tx_ring->tail), val);
369 }
370
371 pf->tx_timeout_last_recovery = jiffies;
372 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
373 pf->tx_timeout_recovery_level, hung_queue);
374
375 switch (pf->tx_timeout_recovery_level) {
376 case 1:
377 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
378 break;
379 case 2:
380 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
381 break;
382 case 3:
383 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
384 break;
385 default:
386 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
387 break;
388 }
389
390 i40e_service_event_schedule(pf);
391 pf->tx_timeout_recovery_level++;
392 }
393
394
395
396
397
398
399
400
401 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
402 {
403 return &vsi->net_stats;
404 }
405
406
407
408
409
410
411 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
412 struct rtnl_link_stats64 *stats)
413 {
414 u64 bytes, packets;
415 unsigned int start;
416
417 do {
418 start = u64_stats_fetch_begin_irq(&ring->syncp);
419 packets = ring->stats.packets;
420 bytes = ring->stats.bytes;
421 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
422
423 stats->tx_packets += packets;
424 stats->tx_bytes += bytes;
425 }
426
427
428
429
430
431
432
433
434
435 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
436 struct rtnl_link_stats64 *stats)
437 {
438 struct i40e_netdev_priv *np = netdev_priv(netdev);
439 struct i40e_vsi *vsi = np->vsi;
440 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
441 struct i40e_ring *ring;
442 int i;
443
444 if (test_bit(__I40E_VSI_DOWN, vsi->state))
445 return;
446
447 if (!vsi->tx_rings)
448 return;
449
450 rcu_read_lock();
451 for (i = 0; i < vsi->num_queue_pairs; i++) {
452 u64 bytes, packets;
453 unsigned int start;
454
455 ring = READ_ONCE(vsi->tx_rings[i]);
456 if (!ring)
457 continue;
458 i40e_get_netdev_stats_struct_tx(ring, stats);
459
460 if (i40e_enabled_xdp_vsi(vsi)) {
461 ring++;
462 i40e_get_netdev_stats_struct_tx(ring, stats);
463 }
464
465 ring++;
466 do {
467 start = u64_stats_fetch_begin_irq(&ring->syncp);
468 packets = ring->stats.packets;
469 bytes = ring->stats.bytes;
470 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471
472 stats->rx_packets += packets;
473 stats->rx_bytes += bytes;
474
475 }
476 rcu_read_unlock();
477
478
479 stats->multicast = vsi_stats->multicast;
480 stats->tx_errors = vsi_stats->tx_errors;
481 stats->tx_dropped = vsi_stats->tx_dropped;
482 stats->rx_errors = vsi_stats->rx_errors;
483 stats->rx_dropped = vsi_stats->rx_dropped;
484 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
485 stats->rx_length_errors = vsi_stats->rx_length_errors;
486 }
487
488
489
490
491
492 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
493 {
494 struct rtnl_link_stats64 *ns;
495 int i;
496
497 if (!vsi)
498 return;
499
500 ns = i40e_get_vsi_stats_struct(vsi);
501 memset(ns, 0, sizeof(*ns));
502 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
503 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
504 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
505 if (vsi->rx_rings && vsi->rx_rings[0]) {
506 for (i = 0; i < vsi->num_queue_pairs; i++) {
507 memset(&vsi->rx_rings[i]->stats, 0,
508 sizeof(vsi->rx_rings[i]->stats));
509 memset(&vsi->rx_rings[i]->rx_stats, 0,
510 sizeof(vsi->rx_rings[i]->rx_stats));
511 memset(&vsi->tx_rings[i]->stats, 0,
512 sizeof(vsi->tx_rings[i]->stats));
513 memset(&vsi->tx_rings[i]->tx_stats, 0,
514 sizeof(vsi->tx_rings[i]->tx_stats));
515 }
516 }
517 vsi->stat_offsets_loaded = false;
518 }
519
520
521
522
523
524 void i40e_pf_reset_stats(struct i40e_pf *pf)
525 {
526 int i;
527
528 memset(&pf->stats, 0, sizeof(pf->stats));
529 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
530 pf->stat_offsets_loaded = false;
531
532 for (i = 0; i < I40E_MAX_VEB; i++) {
533 if (pf->veb[i]) {
534 memset(&pf->veb[i]->stats, 0,
535 sizeof(pf->veb[i]->stats));
536 memset(&pf->veb[i]->stats_offsets, 0,
537 sizeof(pf->veb[i]->stats_offsets));
538 memset(&pf->veb[i]->tc_stats, 0,
539 sizeof(pf->veb[i]->tc_stats));
540 memset(&pf->veb[i]->tc_stats_offsets, 0,
541 sizeof(pf->veb[i]->tc_stats_offsets));
542 pf->veb[i]->stat_offsets_loaded = false;
543 }
544 }
545 pf->hw_csum_rx_error = 0;
546 }
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
564 bool offset_loaded, u64 *offset, u64 *stat)
565 {
566 u64 new_data;
567
568 if (hw->device_id == I40E_DEV_ID_QEMU) {
569 new_data = rd32(hw, loreg);
570 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
571 } else {
572 new_data = rd64(hw, loreg);
573 }
574 if (!offset_loaded)
575 *offset = new_data;
576 if (likely(new_data >= *offset))
577 *stat = new_data - *offset;
578 else
579 *stat = (new_data + BIT_ULL(48)) - *offset;
580 *stat &= 0xFFFFFFFFFFFFULL;
581 }
582
583
584
585
586
587
588
589
590
591 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
592 bool offset_loaded, u64 *offset, u64 *stat)
593 {
594 u32 new_data;
595
596 new_data = rd32(hw, reg);
597 if (!offset_loaded)
598 *offset = new_data;
599 if (likely(new_data >= *offset))
600 *stat = (u32)(new_data - *offset);
601 else
602 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
603 }
604
605
606
607
608
609
610
611 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
612 {
613 u32 new_data = rd32(hw, reg);
614
615 wr32(hw, reg, 1);
616 *stat += new_data;
617 }
618
619
620
621
622
623 void i40e_update_eth_stats(struct i40e_vsi *vsi)
624 {
625 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
626 struct i40e_pf *pf = vsi->back;
627 struct i40e_hw *hw = &pf->hw;
628 struct i40e_eth_stats *oes;
629 struct i40e_eth_stats *es;
630
631 es = &vsi->eth_stats;
632 oes = &vsi->eth_stats_offsets;
633
634
635 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->tx_errors, &es->tx_errors);
638 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
639 vsi->stat_offsets_loaded,
640 &oes->rx_discards, &es->rx_discards);
641 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
644
645 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
646 I40E_GLV_GORCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->rx_bytes, &es->rx_bytes);
649 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
650 I40E_GLV_UPRCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->rx_unicast, &es->rx_unicast);
653 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
654 I40E_GLV_MPRCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_multicast, &es->rx_multicast);
657 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
658 I40E_GLV_BPRCL(stat_idx),
659 vsi->stat_offsets_loaded,
660 &oes->rx_broadcast, &es->rx_broadcast);
661
662 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
663 I40E_GLV_GOTCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->tx_bytes, &es->tx_bytes);
666 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
667 I40E_GLV_UPTCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->tx_unicast, &es->tx_unicast);
670 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
671 I40E_GLV_MPTCL(stat_idx),
672 vsi->stat_offsets_loaded,
673 &oes->tx_multicast, &es->tx_multicast);
674 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
675 I40E_GLV_BPTCL(stat_idx),
676 vsi->stat_offsets_loaded,
677 &oes->tx_broadcast, &es->tx_broadcast);
678 vsi->stat_offsets_loaded = true;
679 }
680
681
682
683
684
685 void i40e_update_veb_stats(struct i40e_veb *veb)
686 {
687 struct i40e_pf *pf = veb->pf;
688 struct i40e_hw *hw = &pf->hw;
689 struct i40e_eth_stats *oes;
690 struct i40e_eth_stats *es;
691 struct i40e_veb_tc_stats *veb_oes;
692 struct i40e_veb_tc_stats *veb_es;
693 int i, idx = 0;
694
695 idx = veb->stats_idx;
696 es = &veb->stats;
697 oes = &veb->stats_offsets;
698 veb_es = &veb->tc_stats;
699 veb_oes = &veb->tc_stats_offsets;
700
701
702 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_discards, &es->tx_discards);
705 if (hw->revision_id > 0)
706 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unknown_protocol,
709 &es->rx_unknown_protocol);
710 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
711 veb->stat_offsets_loaded,
712 &oes->rx_bytes, &es->rx_bytes);
713 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
714 veb->stat_offsets_loaded,
715 &oes->rx_unicast, &es->rx_unicast);
716 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->rx_multicast, &es->rx_multicast);
719 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_broadcast, &es->rx_broadcast);
722
723 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
724 veb->stat_offsets_loaded,
725 &oes->tx_bytes, &es->tx_bytes);
726 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
727 veb->stat_offsets_loaded,
728 &oes->tx_unicast, &es->tx_unicast);
729 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->tx_multicast, &es->tx_multicast);
732 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_broadcast, &es->tx_broadcast);
735 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
736 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
737 I40E_GLVEBTC_RPCL(i, idx),
738 veb->stat_offsets_loaded,
739 &veb_oes->tc_rx_packets[i],
740 &veb_es->tc_rx_packets[i]);
741 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
742 I40E_GLVEBTC_RBCL(i, idx),
743 veb->stat_offsets_loaded,
744 &veb_oes->tc_rx_bytes[i],
745 &veb_es->tc_rx_bytes[i]);
746 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
747 I40E_GLVEBTC_TPCL(i, idx),
748 veb->stat_offsets_loaded,
749 &veb_oes->tc_tx_packets[i],
750 &veb_es->tc_tx_packets[i]);
751 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
752 I40E_GLVEBTC_TBCL(i, idx),
753 veb->stat_offsets_loaded,
754 &veb_oes->tc_tx_bytes[i],
755 &veb_es->tc_tx_bytes[i]);
756 }
757 veb->stat_offsets_loaded = true;
758 }
759
760
761
762
763
764
765
766
767
768
769
770 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
771 {
772 struct i40e_pf *pf = vsi->back;
773 struct rtnl_link_stats64 *ons;
774 struct rtnl_link_stats64 *ns;
775 struct i40e_eth_stats *oes;
776 struct i40e_eth_stats *es;
777 u32 tx_restart, tx_busy;
778 struct i40e_ring *p;
779 u32 rx_page, rx_buf;
780 u64 bytes, packets;
781 unsigned int start;
782 u64 tx_linearize;
783 u64 tx_force_wb;
784 u64 rx_p, rx_b;
785 u64 tx_p, tx_b;
786 u16 q;
787
788 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
789 test_bit(__I40E_CONFIG_BUSY, pf->state))
790 return;
791
792 ns = i40e_get_vsi_stats_struct(vsi);
793 ons = &vsi->net_stats_offsets;
794 es = &vsi->eth_stats;
795 oes = &vsi->eth_stats_offsets;
796
797
798
799
800 rx_b = rx_p = 0;
801 tx_b = tx_p = 0;
802 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
803 rx_page = 0;
804 rx_buf = 0;
805 rcu_read_lock();
806 for (q = 0; q < vsi->num_queue_pairs; q++) {
807
808 p = READ_ONCE(vsi->tx_rings[q]);
809
810 do {
811 start = u64_stats_fetch_begin_irq(&p->syncp);
812 packets = p->stats.packets;
813 bytes = p->stats.bytes;
814 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
815 tx_b += bytes;
816 tx_p += packets;
817 tx_restart += p->tx_stats.restart_queue;
818 tx_busy += p->tx_stats.tx_busy;
819 tx_linearize += p->tx_stats.tx_linearize;
820 tx_force_wb += p->tx_stats.tx_force_wb;
821
822
823 p = &p[1];
824 do {
825 start = u64_stats_fetch_begin_irq(&p->syncp);
826 packets = p->stats.packets;
827 bytes = p->stats.bytes;
828 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 rx_b += bytes;
830 rx_p += packets;
831 rx_buf += p->rx_stats.alloc_buff_failed;
832 rx_page += p->rx_stats.alloc_page_failed;
833 }
834 rcu_read_unlock();
835 vsi->tx_restart = tx_restart;
836 vsi->tx_busy = tx_busy;
837 vsi->tx_linearize = tx_linearize;
838 vsi->tx_force_wb = tx_force_wb;
839 vsi->rx_page_failed = rx_page;
840 vsi->rx_buf_failed = rx_buf;
841
842 ns->rx_packets = rx_p;
843 ns->rx_bytes = rx_b;
844 ns->tx_packets = tx_p;
845 ns->tx_bytes = tx_b;
846
847
848 i40e_update_eth_stats(vsi);
849 ons->tx_errors = oes->tx_errors;
850 ns->tx_errors = es->tx_errors;
851 ons->multicast = oes->rx_multicast;
852 ns->multicast = es->rx_multicast;
853 ons->rx_dropped = oes->rx_discards;
854 ns->rx_dropped = es->rx_discards;
855 ons->tx_dropped = oes->tx_discards;
856 ns->tx_dropped = es->tx_discards;
857
858
859 if (vsi == pf->vsi[pf->lan_vsi]) {
860 ns->rx_crc_errors = pf->stats.crc_errors;
861 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
862 ns->rx_length_errors = pf->stats.rx_length_errors;
863 }
864 }
865
866
867
868
869
870 static void i40e_update_pf_stats(struct i40e_pf *pf)
871 {
872 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
873 struct i40e_hw_port_stats *nsd = &pf->stats;
874 struct i40e_hw *hw = &pf->hw;
875 u32 val;
876 int i;
877
878 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
879 I40E_GLPRT_GORCL(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
882 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
883 I40E_GLPRT_GOTCL(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
886 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
887 pf->stat_offsets_loaded,
888 &osd->eth.rx_discards,
889 &nsd->eth.rx_discards);
890 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
891 I40E_GLPRT_UPRCL(hw->port),
892 pf->stat_offsets_loaded,
893 &osd->eth.rx_unicast,
894 &nsd->eth.rx_unicast);
895 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
896 I40E_GLPRT_MPRCL(hw->port),
897 pf->stat_offsets_loaded,
898 &osd->eth.rx_multicast,
899 &nsd->eth.rx_multicast);
900 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
901 I40E_GLPRT_BPRCL(hw->port),
902 pf->stat_offsets_loaded,
903 &osd->eth.rx_broadcast,
904 &nsd->eth.rx_broadcast);
905 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
906 I40E_GLPRT_UPTCL(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->eth.tx_unicast,
909 &nsd->eth.tx_unicast);
910 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
911 I40E_GLPRT_MPTCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.tx_multicast,
914 &nsd->eth.tx_multicast);
915 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
916 I40E_GLPRT_BPTCL(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->eth.tx_broadcast,
919 &nsd->eth.tx_broadcast);
920
921 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
922 pf->stat_offsets_loaded,
923 &osd->tx_dropped_link_down,
924 &nsd->tx_dropped_link_down);
925
926 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
927 pf->stat_offsets_loaded,
928 &osd->crc_errors, &nsd->crc_errors);
929
930 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->illegal_bytes, &nsd->illegal_bytes);
933
934 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
935 pf->stat_offsets_loaded,
936 &osd->mac_local_faults,
937 &nsd->mac_local_faults);
938 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->mac_remote_faults,
941 &nsd->mac_remote_faults);
942
943 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->rx_length_errors,
946 &nsd->rx_length_errors);
947
948 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->link_xon_rx, &nsd->link_xon_rx);
951 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->link_xon_tx, &nsd->link_xon_tx);
954 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->link_xoff_rx, &nsd->link_xoff_rx);
957 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xoff_tx, &nsd->link_xoff_tx);
960
961 for (i = 0; i < 8; i++) {
962 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
963 pf->stat_offsets_loaded,
964 &osd->priority_xoff_rx[i],
965 &nsd->priority_xoff_rx[i]);
966 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
967 pf->stat_offsets_loaded,
968 &osd->priority_xon_rx[i],
969 &nsd->priority_xon_rx[i]);
970 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
971 pf->stat_offsets_loaded,
972 &osd->priority_xon_tx[i],
973 &nsd->priority_xon_tx[i]);
974 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
975 pf->stat_offsets_loaded,
976 &osd->priority_xoff_tx[i],
977 &nsd->priority_xoff_tx[i]);
978 i40e_stat_update32(hw,
979 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_2_xoff[i],
982 &nsd->priority_xon_2_xoff[i]);
983 }
984
985 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
986 I40E_GLPRT_PRC64L(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->rx_size_64, &nsd->rx_size_64);
989 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
990 I40E_GLPRT_PRC127L(hw->port),
991 pf->stat_offsets_loaded,
992 &osd->rx_size_127, &nsd->rx_size_127);
993 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
994 I40E_GLPRT_PRC255L(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->rx_size_255, &nsd->rx_size_255);
997 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
998 I40E_GLPRT_PRC511L(hw->port),
999 pf->stat_offsets_loaded,
1000 &osd->rx_size_511, &nsd->rx_size_511);
1001 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1002 I40E_GLPRT_PRC1023L(hw->port),
1003 pf->stat_offsets_loaded,
1004 &osd->rx_size_1023, &nsd->rx_size_1023);
1005 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1006 I40E_GLPRT_PRC1522L(hw->port),
1007 pf->stat_offsets_loaded,
1008 &osd->rx_size_1522, &nsd->rx_size_1522);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1010 I40E_GLPRT_PRC9522L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_big, &nsd->rx_size_big);
1013
1014 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1015 I40E_GLPRT_PTC64L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->tx_size_64, &nsd->tx_size_64);
1018 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1019 I40E_GLPRT_PTC127L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->tx_size_127, &nsd->tx_size_127);
1022 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1023 I40E_GLPRT_PTC255L(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->tx_size_255, &nsd->tx_size_255);
1026 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1027 I40E_GLPRT_PTC511L(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->tx_size_511, &nsd->tx_size_511);
1030 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1031 I40E_GLPRT_PTC1023L(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->tx_size_1023, &nsd->tx_size_1023);
1034 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1035 I40E_GLPRT_PTC1522L(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->tx_size_1522, &nsd->tx_size_1522);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1039 I40E_GLPRT_PTC9522L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_big, &nsd->tx_size_big);
1042
1043 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->rx_undersize, &nsd->rx_undersize);
1046 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->rx_fragments, &nsd->rx_fragments);
1049 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_oversize, &nsd->rx_oversize);
1052 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_jabber, &nsd->rx_jabber);
1055
1056
1057 i40e_stat_update_and_clear32(hw,
1058 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1059 &nsd->fd_atr_match);
1060 i40e_stat_update_and_clear32(hw,
1061 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1062 &nsd->fd_sb_match);
1063 i40e_stat_update_and_clear32(hw,
1064 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1065 &nsd->fd_atr_tunnel_match);
1066
1067 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1068 nsd->tx_lpi_status =
1069 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1070 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1071 nsd->rx_lpi_status =
1072 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1073 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1074 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1075 pf->stat_offsets_loaded,
1076 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1077 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1078 pf->stat_offsets_loaded,
1079 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1080
1081 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1082 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1083 nsd->fd_sb_status = true;
1084 else
1085 nsd->fd_sb_status = false;
1086
1087 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1088 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1089 nsd->fd_atr_status = true;
1090 else
1091 nsd->fd_atr_status = false;
1092
1093 pf->stat_offsets_loaded = true;
1094 }
1095
1096
1097
1098
1099
1100
1101
1102 void i40e_update_stats(struct i40e_vsi *vsi)
1103 {
1104 struct i40e_pf *pf = vsi->back;
1105
1106 if (vsi == pf->vsi[pf->lan_vsi])
1107 i40e_update_pf_stats(pf);
1108
1109 i40e_update_vsi_stats(vsi);
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1121 const u8 *macaddr, s16 vlan)
1122 {
1123 struct i40e_mac_filter *f;
1124 u64 key;
1125
1126 if (!vsi || !macaddr)
1127 return NULL;
1128
1129 key = i40e_addr_to_hkey(macaddr);
1130 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1131 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1132 (vlan == f->vlan))
1133 return f;
1134 }
1135 return NULL;
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1147 {
1148 struct i40e_mac_filter *f;
1149 u64 key;
1150
1151 if (!vsi || !macaddr)
1152 return NULL;
1153
1154 key = i40e_addr_to_hkey(macaddr);
1155 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1156 if ((ether_addr_equal(macaddr, f->macaddr)))
1157 return f;
1158 }
1159 return NULL;
1160 }
1161
1162
1163
1164
1165
1166
1167
1168 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1169 {
1170
1171 if (vsi->info.pvid)
1172 return true;
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 return vsi->has_vlan_filter;
1195 }
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1227 struct hlist_head *tmp_add_list,
1228 struct hlist_head *tmp_del_list,
1229 int vlan_filters)
1230 {
1231 s16 pvid = le16_to_cpu(vsi->info.pvid);
1232 struct i40e_mac_filter *f, *add_head;
1233 struct i40e_new_mac_filter *new;
1234 struct hlist_node *h;
1235 int bkt, new_vlan;
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 hlist_for_each_entry(new, tmp_add_list, hlist) {
1253 if (pvid && new->f->vlan != pvid)
1254 new->f->vlan = pvid;
1255 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1256 new->f->vlan = 0;
1257 else if (!vlan_filters && new->f->vlan == 0)
1258 new->f->vlan = I40E_VLAN_ANY;
1259 }
1260
1261
1262 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1263
1264
1265
1266
1267
1268 if ((pvid && f->vlan != pvid) ||
1269 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1270 (!vlan_filters && f->vlan == 0)) {
1271
1272 if (pvid)
1273 new_vlan = pvid;
1274 else if (vlan_filters)
1275 new_vlan = 0;
1276 else
1277 new_vlan = I40E_VLAN_ANY;
1278
1279
1280 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1281 if (!add_head)
1282 return -ENOMEM;
1283
1284
1285 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1286 if (!new)
1287 return -ENOMEM;
1288
1289 new->f = add_head;
1290 new->state = add_head->state;
1291
1292
1293 hlist_add_head(&new->hlist, tmp_add_list);
1294
1295
1296 f->state = I40E_FILTER_REMOVE;
1297 hash_del(&f->hlist);
1298 hlist_add_head(&f->hlist, tmp_del_list);
1299 }
1300 }
1301
1302 vsi->has_vlan_filter = !!vlan_filters;
1303
1304 return 0;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1316 {
1317 struct i40e_aqc_remove_macvlan_element_data element;
1318 struct i40e_pf *pf = vsi->back;
1319
1320
1321 if (vsi->type != I40E_VSI_MAIN)
1322 return;
1323
1324 memset(&element, 0, sizeof(element));
1325 ether_addr_copy(element.mac_addr, macaddr);
1326 element.vlan_tag = 0;
1327
1328 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1329 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1330
1331 memset(&element, 0, sizeof(element));
1332 ether_addr_copy(element.mac_addr, macaddr);
1333 element.vlan_tag = 0;
1334
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1352 const u8 *macaddr, s16 vlan)
1353 {
1354 struct i40e_mac_filter *f;
1355 u64 key;
1356
1357 if (!vsi || !macaddr)
1358 return NULL;
1359
1360 f = i40e_find_filter(vsi, macaddr, vlan);
1361 if (!f) {
1362 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1363 if (!f)
1364 return NULL;
1365
1366
1367
1368
1369 if (vlan >= 0)
1370 vsi->has_vlan_filter = true;
1371
1372 ether_addr_copy(f->macaddr, macaddr);
1373 f->vlan = vlan;
1374 f->state = I40E_FILTER_NEW;
1375 INIT_HLIST_NODE(&f->hlist);
1376
1377 key = i40e_addr_to_hkey(macaddr);
1378 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1379
1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 if (f->state == I40E_FILTER_REMOVE)
1393 f->state = I40E_FILTER_ACTIVE;
1394
1395 return f;
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1414 {
1415 if (!f)
1416 return;
1417
1418
1419
1420
1421
1422 if ((f->state == I40E_FILTER_FAILED) ||
1423 (f->state == I40E_FILTER_NEW)) {
1424 hash_del(&f->hlist);
1425 kfree(f);
1426 } else {
1427 f->state = I40E_FILTER_REMOVE;
1428 }
1429
1430 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1431 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1432 }
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1447 {
1448 struct i40e_mac_filter *f;
1449
1450 if (!vsi || !macaddr)
1451 return;
1452
1453 f = i40e_find_filter(vsi, macaddr, vlan);
1454 __i40e_del_filter(vsi, f);
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1470 const u8 *macaddr)
1471 {
1472 struct i40e_mac_filter *f, *add = NULL;
1473 struct hlist_node *h;
1474 int bkt;
1475
1476 if (vsi->info.pvid)
1477 return i40e_add_filter(vsi, macaddr,
1478 le16_to_cpu(vsi->info.pvid));
1479
1480 if (!i40e_is_vsi_in_vlan(vsi))
1481 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1482
1483 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1484 if (f->state == I40E_FILTER_REMOVE)
1485 continue;
1486 add = i40e_add_filter(vsi, macaddr, f->vlan);
1487 if (!add)
1488 return NULL;
1489 }
1490
1491 return add;
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1505 {
1506 struct i40e_mac_filter *f;
1507 struct hlist_node *h;
1508 bool found = false;
1509 int bkt;
1510
1511 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1512 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1513 if (ether_addr_equal(macaddr, f->macaddr)) {
1514 __i40e_del_filter(vsi, f);
1515 found = true;
1516 }
1517 }
1518
1519 if (found)
1520 return 0;
1521 else
1522 return -ENOENT;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532 static int i40e_set_mac(struct net_device *netdev, void *p)
1533 {
1534 struct i40e_netdev_priv *np = netdev_priv(netdev);
1535 struct i40e_vsi *vsi = np->vsi;
1536 struct i40e_pf *pf = vsi->back;
1537 struct i40e_hw *hw = &pf->hw;
1538 struct sockaddr *addr = p;
1539
1540 if (!is_valid_ether_addr(addr->sa_data))
1541 return -EADDRNOTAVAIL;
1542
1543 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1544 netdev_info(netdev, "already using mac address %pM\n",
1545 addr->sa_data);
1546 return 0;
1547 }
1548
1549 if (test_bit(__I40E_DOWN, pf->state) ||
1550 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1551 return -EADDRNOTAVAIL;
1552
1553 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1554 netdev_info(netdev, "returning to hw mac address %pM\n",
1555 hw->mac.addr);
1556 else
1557 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1558
1559
1560
1561
1562
1563
1564
1565 spin_lock_bh(&vsi->mac_filter_hash_lock);
1566 i40e_del_mac_filter(vsi, netdev->dev_addr);
1567 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1568 i40e_add_mac_filter(vsi, netdev->dev_addr);
1569 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1570
1571 if (vsi->type == I40E_VSI_MAIN) {
1572 i40e_status ret;
1573
1574 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1575 addr->sa_data, NULL);
1576 if (ret)
1577 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1578 i40e_stat_str(hw, ret),
1579 i40e_aq_str(hw, hw->aq.asq_last_status));
1580 }
1581
1582
1583
1584
1585 i40e_service_event_schedule(pf);
1586 return 0;
1587 }
1588
1589
1590
1591
1592
1593
1594 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1595 u8 *lut, u16 lut_size)
1596 {
1597 struct i40e_pf *pf = vsi->back;
1598 struct i40e_hw *hw = &pf->hw;
1599 int ret = 0;
1600
1601 if (seed) {
1602 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1603 (struct i40e_aqc_get_set_rss_key_data *)seed;
1604 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1605 if (ret) {
1606 dev_info(&pf->pdev->dev,
1607 "Cannot set RSS key, err %s aq_err %s\n",
1608 i40e_stat_str(hw, ret),
1609 i40e_aq_str(hw, hw->aq.asq_last_status));
1610 return ret;
1611 }
1612 }
1613 if (lut) {
1614 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1615
1616 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1617 if (ret) {
1618 dev_info(&pf->pdev->dev,
1619 "Cannot set RSS lut, err %s aq_err %s\n",
1620 i40e_stat_str(hw, ret),
1621 i40e_aq_str(hw, hw->aq.asq_last_status));
1622 return ret;
1623 }
1624 }
1625 return ret;
1626 }
1627
1628
1629
1630
1631
1632 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1633 {
1634 struct i40e_pf *pf = vsi->back;
1635 u8 seed[I40E_HKEY_ARRAY_SIZE];
1636 u8 *lut;
1637 int ret;
1638
1639 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1640 return 0;
1641 if (!vsi->rss_size)
1642 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1643 vsi->num_queue_pairs);
1644 if (!vsi->rss_size)
1645 return -EINVAL;
1646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1647 if (!lut)
1648 return -ENOMEM;
1649
1650
1651
1652
1653 if (vsi->rss_lut_user)
1654 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1655 else
1656 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1657 if (vsi->rss_hkey_user)
1658 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1659 else
1660 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1661 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1662 kfree(lut);
1663 return ret;
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1675 struct i40e_vsi_context *ctxt,
1676 u8 enabled_tc)
1677 {
1678 u16 qcount = 0, max_qcount, qmap, sections = 0;
1679 int i, override_q, pow, num_qps, ret;
1680 u8 netdev_tc = 0, offset = 0;
1681
1682 if (vsi->type != I40E_VSI_MAIN)
1683 return -EINVAL;
1684 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1686 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1687 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1688 num_qps = vsi->mqprio_qopt.qopt.count[0];
1689
1690
1691 pow = ilog2(num_qps);
1692 if (!is_power_of_2(num_qps))
1693 pow++;
1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1696
1697
1698 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1699 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1700
1701 if (vsi->tc_config.enabled_tc & BIT(i)) {
1702 offset = vsi->mqprio_qopt.qopt.offset[i];
1703 qcount = vsi->mqprio_qopt.qopt.count[i];
1704 if (qcount > max_qcount)
1705 max_qcount = qcount;
1706 vsi->tc_config.tc_info[i].qoffset = offset;
1707 vsi->tc_config.tc_info[i].qcount = qcount;
1708 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1709 } else {
1710
1711
1712
1713
1714 vsi->tc_config.tc_info[i].qoffset = 0;
1715 vsi->tc_config.tc_info[i].qcount = 1;
1716 vsi->tc_config.tc_info[i].netdev_tc = 0;
1717 }
1718 }
1719
1720
1721 vsi->num_queue_pairs = offset + qcount;
1722
1723
1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1725 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1726 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1727 ctxt->info.valid_sections |= cpu_to_le16(sections);
1728
1729
1730 vsi->rss_size = max_qcount;
1731 ret = i40e_vsi_config_rss(vsi);
1732 if (ret) {
1733 dev_info(&vsi->back->pdev->dev,
1734 "Failed to reconfig rss for num_queues (%u)\n",
1735 max_qcount);
1736 return ret;
1737 }
1738 vsi->reconfig_rss = true;
1739 dev_dbg(&vsi->back->pdev->dev,
1740 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1741
1742
1743
1744
1745 override_q = vsi->mqprio_qopt.qopt.count[0];
1746 if (override_q && override_q < vsi->num_queue_pairs) {
1747 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1748 vsi->next_base_queue = override_q;
1749 }
1750 return 0;
1751 }
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1763 struct i40e_vsi_context *ctxt,
1764 u8 enabled_tc,
1765 bool is_add)
1766 {
1767 struct i40e_pf *pf = vsi->back;
1768 u16 sections = 0;
1769 u8 netdev_tc = 0;
1770 u16 numtc = 1;
1771 u16 qcount;
1772 u8 offset;
1773 u16 qmap;
1774 int i;
1775 u16 num_tc_qps = 0;
1776
1777 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1778 offset = 0;
1779
1780
1781 num_tc_qps = vsi->alloc_queue_pairs;
1782 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1783
1784 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1785 if (enabled_tc & BIT(i))
1786 numtc++;
1787 }
1788 if (!numtc) {
1789 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1790 numtc = 1;
1791 }
1792 num_tc_qps = num_tc_qps / numtc;
1793 num_tc_qps = min_t(int, num_tc_qps,
1794 i40e_pf_get_max_q_per_tc(pf));
1795 }
1796
1797 vsi->tc_config.numtc = numtc;
1798 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1799
1800
1801 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1802 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1803
1804
1805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1806
1807 if (vsi->tc_config.enabled_tc & BIT(i)) {
1808
1809 int pow, num_qps;
1810
1811 switch (vsi->type) {
1812 case I40E_VSI_MAIN:
1813 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1814 I40E_FLAG_FD_ATR_ENABLED)) ||
1815 vsi->tc_config.enabled_tc != 1) {
1816 qcount = min_t(int, pf->alloc_rss_size,
1817 num_tc_qps);
1818 break;
1819 }
1820
1821 case I40E_VSI_FDIR:
1822 case I40E_VSI_SRIOV:
1823 case I40E_VSI_VMDQ2:
1824 default:
1825 qcount = num_tc_qps;
1826 WARN_ON(i != 0);
1827 break;
1828 }
1829 vsi->tc_config.tc_info[i].qoffset = offset;
1830 vsi->tc_config.tc_info[i].qcount = qcount;
1831
1832
1833 num_qps = qcount;
1834 pow = 0;
1835 while (num_qps && (BIT_ULL(pow) < qcount)) {
1836 pow++;
1837 num_qps >>= 1;
1838 }
1839
1840 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1841 qmap =
1842 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1843 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1844
1845 offset += qcount;
1846 } else {
1847
1848
1849
1850
1851 vsi->tc_config.tc_info[i].qoffset = 0;
1852 vsi->tc_config.tc_info[i].qcount = 1;
1853 vsi->tc_config.tc_info[i].netdev_tc = 0;
1854
1855 qmap = 0;
1856 }
1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1858 }
1859
1860
1861 vsi->num_queue_pairs = offset;
1862 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1863 if (vsi->req_queue_pairs > 0)
1864 vsi->num_queue_pairs = vsi->req_queue_pairs;
1865 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1866 vsi->num_queue_pairs = pf->num_lan_msix;
1867 }
1868
1869
1870 if (is_add) {
1871 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1872
1873 ctxt->info.up_enable_bits = enabled_tc;
1874 }
1875 if (vsi->type == I40E_VSI_SRIOV) {
1876 ctxt->info.mapping_flags |=
1877 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1878 for (i = 0; i < vsi->num_queue_pairs; i++)
1879 ctxt->info.queue_mapping[i] =
1880 cpu_to_le16(vsi->base_queue + i);
1881 } else {
1882 ctxt->info.mapping_flags |=
1883 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1884 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1885 }
1886 ctxt->info.valid_sections |= cpu_to_le16(sections);
1887 }
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1898 {
1899 struct i40e_netdev_priv *np = netdev_priv(netdev);
1900 struct i40e_vsi *vsi = np->vsi;
1901
1902 if (i40e_add_mac_filter(vsi, addr))
1903 return 0;
1904 else
1905 return -ENOMEM;
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1917 {
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1920
1921
1922
1923
1924
1925
1926 if (ether_addr_equal(addr, netdev->dev_addr))
1927 return 0;
1928
1929 i40e_del_mac_filter(vsi, addr);
1930
1931 return 0;
1932 }
1933
1934
1935
1936
1937
1938 static void i40e_set_rx_mode(struct net_device *netdev)
1939 {
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1942
1943 spin_lock_bh(&vsi->mac_filter_hash_lock);
1944
1945 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1947
1948 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1949
1950
1951 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1952 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1953 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1954 }
1955 }
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1966 struct hlist_head *from)
1967 {
1968 struct i40e_mac_filter *f;
1969 struct hlist_node *h;
1970
1971 hlist_for_each_entry_safe(f, h, from, hlist) {
1972 u64 key = i40e_addr_to_hkey(f->macaddr);
1973
1974
1975 hlist_del(&f->hlist);
1976 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1977 }
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1989 struct hlist_head *from)
1990 {
1991 struct i40e_new_mac_filter *new;
1992 struct hlist_node *h;
1993
1994 hlist_for_each_entry_safe(new, h, from, hlist) {
1995
1996 hlist_del(&new->hlist);
1997 kfree(new);
1998 }
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009 static
2010 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2011 {
2012 hlist_for_each_entry_continue(next, hlist) {
2013 if (!is_broadcast_ether_addr(next->f->macaddr))
2014 return next;
2015 }
2016
2017 return NULL;
2018 }
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static int
2031 i40e_update_filter_state(int count,
2032 struct i40e_aqc_add_macvlan_element_data *add_list,
2033 struct i40e_new_mac_filter *add_head)
2034 {
2035 int retval = 0;
2036 int i;
2037
2038 for (i = 0; i < count; i++) {
2039
2040
2041
2042
2043
2044
2045 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2046 add_head->state = I40E_FILTER_FAILED;
2047 } else {
2048 add_head->state = I40E_FILTER_ACTIVE;
2049 retval++;
2050 }
2051
2052 add_head = i40e_next_filter(add_head);
2053 if (!add_head)
2054 break;
2055 }
2056
2057 return retval;
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 static
2074 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2075 struct i40e_aqc_remove_macvlan_element_data *list,
2076 int num_del, int *retval)
2077 {
2078 struct i40e_hw *hw = &vsi->back->hw;
2079 i40e_status aq_ret;
2080 int aq_err;
2081
2082 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2083 aq_err = hw->aq.asq_last_status;
2084
2085
2086 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2087 *retval = -EIO;
2088 dev_info(&vsi->back->pdev->dev,
2089 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2090 vsi_name, i40e_stat_str(hw, aq_ret),
2091 i40e_aq_str(hw, aq_err));
2092 }
2093 }
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 static
2108 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2109 struct i40e_aqc_add_macvlan_element_data *list,
2110 struct i40e_new_mac_filter *add_head,
2111 int num_add)
2112 {
2113 struct i40e_hw *hw = &vsi->back->hw;
2114 int aq_err, fcnt;
2115
2116 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2117 aq_err = hw->aq.asq_last_status;
2118 fcnt = i40e_update_filter_state(num_add, list, add_head);
2119
2120 if (fcnt != num_add) {
2121 if (vsi->type == I40E_VSI_MAIN) {
2122 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2123 dev_warn(&vsi->back->pdev->dev,
2124 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2125 i40e_aq_str(hw, aq_err), vsi_name);
2126 } else if (vsi->type == I40E_VSI_SRIOV ||
2127 vsi->type == I40E_VSI_VMDQ1 ||
2128 vsi->type == I40E_VSI_VMDQ2) {
2129 dev_warn(&vsi->back->pdev->dev,
2130 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2131 i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
2132 } else {
2133 dev_warn(&vsi->back->pdev->dev,
2134 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2135 i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
2136 }
2137 }
2138 }
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 static i40e_status
2153 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2154 struct i40e_mac_filter *f)
2155 {
2156 bool enable = f->state == I40E_FILTER_NEW;
2157 struct i40e_hw *hw = &vsi->back->hw;
2158 i40e_status aq_ret;
2159
2160 if (f->vlan == I40E_VLAN_ANY) {
2161 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2162 vsi->seid,
2163 enable,
2164 NULL);
2165 } else {
2166 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2167 vsi->seid,
2168 enable,
2169 f->vlan,
2170 NULL);
2171 }
2172
2173 if (aq_ret) {
2174 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2175 dev_warn(&vsi->back->pdev->dev,
2176 "Error %s, forcing overflow promiscuous on %s\n",
2177 i40e_aq_str(hw, hw->aq.asq_last_status),
2178 vsi_name);
2179 }
2180
2181 return aq_ret;
2182 }
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2194 {
2195 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2196 struct i40e_hw *hw = &pf->hw;
2197 i40e_status aq_ret;
2198
2199 if (vsi->type == I40E_VSI_MAIN &&
2200 pf->lan_veb != I40E_NO_VEB &&
2201 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2202
2203
2204
2205
2206
2207 if (promisc)
2208 aq_ret = i40e_aq_set_default_vsi(hw,
2209 vsi->seid,
2210 NULL);
2211 else
2212 aq_ret = i40e_aq_clear_default_vsi(hw,
2213 vsi->seid,
2214 NULL);
2215 if (aq_ret) {
2216 dev_info(&pf->pdev->dev,
2217 "Set default VSI failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2220 }
2221 } else {
2222 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2223 hw,
2224 vsi->seid,
2225 promisc, NULL,
2226 true);
2227 if (aq_ret) {
2228 dev_info(&pf->pdev->dev,
2229 "set unicast promisc failed, err %s, aq_err %s\n",
2230 i40e_stat_str(hw, aq_ret),
2231 i40e_aq_str(hw, hw->aq.asq_last_status));
2232 }
2233 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2234 hw,
2235 vsi->seid,
2236 promisc, NULL);
2237 if (aq_ret) {
2238 dev_info(&pf->pdev->dev,
2239 "set multicast promisc failed, err %s, aq_err %s\n",
2240 i40e_stat_str(hw, aq_ret),
2241 i40e_aq_str(hw, hw->aq.asq_last_status));
2242 }
2243 }
2244
2245 if (!aq_ret)
2246 pf->cur_promisc = promisc;
2247
2248 return aq_ret;
2249 }
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2260 {
2261 struct hlist_head tmp_add_list, tmp_del_list;
2262 struct i40e_mac_filter *f;
2263 struct i40e_new_mac_filter *new, *add_head = NULL;
2264 struct i40e_hw *hw = &vsi->back->hw;
2265 bool old_overflow, new_overflow;
2266 unsigned int failed_filters = 0;
2267 unsigned int vlan_filters = 0;
2268 char vsi_name[16] = "PF";
2269 int filter_list_len = 0;
2270 i40e_status aq_ret = 0;
2271 u32 changed_flags = 0;
2272 struct hlist_node *h;
2273 struct i40e_pf *pf;
2274 int num_add = 0;
2275 int num_del = 0;
2276 int retval = 0;
2277 u16 cmd_flags;
2278 int list_size;
2279 int bkt;
2280
2281
2282 struct i40e_aqc_add_macvlan_element_data *add_list;
2283 struct i40e_aqc_remove_macvlan_element_data *del_list;
2284
2285 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2286 usleep_range(1000, 2000);
2287 pf = vsi->back;
2288
2289 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2290
2291 if (vsi->netdev) {
2292 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2293 vsi->current_netdev_flags = vsi->netdev->flags;
2294 }
2295
2296 INIT_HLIST_HEAD(&tmp_add_list);
2297 INIT_HLIST_HEAD(&tmp_del_list);
2298
2299 if (vsi->type == I40E_VSI_SRIOV)
2300 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2301 else if (vsi->type != I40E_VSI_MAIN)
2302 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2303
2304 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2305 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2306
2307 spin_lock_bh(&vsi->mac_filter_hash_lock);
2308
2309 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2310 if (f->state == I40E_FILTER_REMOVE) {
2311
2312 hash_del(&f->hlist);
2313 hlist_add_head(&f->hlist, &tmp_del_list);
2314
2315
2316 continue;
2317 }
2318 if (f->state == I40E_FILTER_NEW) {
2319
2320 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2321 if (!new)
2322 goto err_no_memory_locked;
2323
2324
2325 new->f = f;
2326 new->state = f->state;
2327
2328
2329 hlist_add_head(&new->hlist, &tmp_add_list);
2330 }
2331
2332
2333
2334
2335
2336 if (f->vlan > 0)
2337 vlan_filters++;
2338 }
2339
2340 retval = i40e_correct_mac_vlan_filters(vsi,
2341 &tmp_add_list,
2342 &tmp_del_list,
2343 vlan_filters);
2344 if (retval)
2345 goto err_no_memory_locked;
2346
2347 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2348 }
2349
2350
2351 if (!hlist_empty(&tmp_del_list)) {
2352 filter_list_len = hw->aq.asq_buf_size /
2353 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2354 list_size = filter_list_len *
2355 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2356 del_list = kzalloc(list_size, GFP_ATOMIC);
2357 if (!del_list)
2358 goto err_no_memory;
2359
2360 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2361 cmd_flags = 0;
2362
2363
2364
2365
2366 if (is_broadcast_ether_addr(f->macaddr)) {
2367 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2368
2369 hlist_del(&f->hlist);
2370 kfree(f);
2371 continue;
2372 }
2373
2374
2375 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2376 if (f->vlan == I40E_VLAN_ANY) {
2377 del_list[num_del].vlan_tag = 0;
2378 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2379 } else {
2380 del_list[num_del].vlan_tag =
2381 cpu_to_le16((u16)(f->vlan));
2382 }
2383
2384 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2385 del_list[num_del].flags = cmd_flags;
2386 num_del++;
2387
2388
2389 if (num_del == filter_list_len) {
2390 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2391 num_del, &retval);
2392 memset(del_list, 0, list_size);
2393 num_del = 0;
2394 }
2395
2396
2397
2398 hlist_del(&f->hlist);
2399 kfree(f);
2400 }
2401
2402 if (num_del) {
2403 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2404 num_del, &retval);
2405 }
2406
2407 kfree(del_list);
2408 del_list = NULL;
2409 }
2410
2411 if (!hlist_empty(&tmp_add_list)) {
2412
2413 filter_list_len = hw->aq.asq_buf_size /
2414 sizeof(struct i40e_aqc_add_macvlan_element_data);
2415 list_size = filter_list_len *
2416 sizeof(struct i40e_aqc_add_macvlan_element_data);
2417 add_list = kzalloc(list_size, GFP_ATOMIC);
2418 if (!add_list)
2419 goto err_no_memory;
2420
2421 num_add = 0;
2422 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2423
2424
2425
2426 if (is_broadcast_ether_addr(new->f->macaddr)) {
2427 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2428 new->f))
2429 new->state = I40E_FILTER_FAILED;
2430 else
2431 new->state = I40E_FILTER_ACTIVE;
2432 continue;
2433 }
2434
2435
2436 if (num_add == 0)
2437 add_head = new;
2438 cmd_flags = 0;
2439 ether_addr_copy(add_list[num_add].mac_addr,
2440 new->f->macaddr);
2441 if (new->f->vlan == I40E_VLAN_ANY) {
2442 add_list[num_add].vlan_tag = 0;
2443 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2444 } else {
2445 add_list[num_add].vlan_tag =
2446 cpu_to_le16((u16)(new->f->vlan));
2447 }
2448 add_list[num_add].queue_number = 0;
2449
2450 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2451 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2452 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2453 num_add++;
2454
2455
2456 if (num_add == filter_list_len) {
2457 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2458 add_head, num_add);
2459 memset(add_list, 0, list_size);
2460 num_add = 0;
2461 }
2462 }
2463 if (num_add) {
2464 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2465 num_add);
2466 }
2467
2468
2469
2470 spin_lock_bh(&vsi->mac_filter_hash_lock);
2471 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2472
2473 if (new->f->state == I40E_FILTER_NEW)
2474 new->f->state = new->state;
2475 hlist_del(&new->hlist);
2476 kfree(new);
2477 }
2478 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2479 kfree(add_list);
2480 add_list = NULL;
2481 }
2482
2483
2484 spin_lock_bh(&vsi->mac_filter_hash_lock);
2485 vsi->active_filters = 0;
2486 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2487 if (f->state == I40E_FILTER_ACTIVE)
2488 vsi->active_filters++;
2489 else if (f->state == I40E_FILTER_FAILED)
2490 failed_filters++;
2491 }
2492 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2493
2494
2495
2496
2497
2498 if (old_overflow && !failed_filters &&
2499 vsi->active_filters < vsi->promisc_threshold) {
2500 dev_info(&pf->pdev->dev,
2501 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2502 vsi_name);
2503 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2504 vsi->promisc_threshold = 0;
2505 }
2506
2507
2508 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2509 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2510 goto out;
2511 }
2512
2513 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2514
2515
2516
2517
2518 if (!old_overflow && new_overflow)
2519 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2520
2521
2522 if (changed_flags & IFF_ALLMULTI) {
2523 bool cur_multipromisc;
2524
2525 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2526 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2527 vsi->seid,
2528 cur_multipromisc,
2529 NULL);
2530 if (aq_ret) {
2531 retval = i40e_aq_rc_to_posix(aq_ret,
2532 hw->aq.asq_last_status);
2533 dev_info(&pf->pdev->dev,
2534 "set multi promisc failed on %s, err %s aq_err %s\n",
2535 vsi_name,
2536 i40e_stat_str(hw, aq_ret),
2537 i40e_aq_str(hw, hw->aq.asq_last_status));
2538 } else {
2539 dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
2540 vsi->netdev->name,
2541 cur_multipromisc ? "entering" : "leaving");
2542 }
2543 }
2544
2545 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2546 bool cur_promisc;
2547
2548 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2549 new_overflow);
2550 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2551 if (aq_ret) {
2552 retval = i40e_aq_rc_to_posix(aq_ret,
2553 hw->aq.asq_last_status);
2554 dev_info(&pf->pdev->dev,
2555 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2556 cur_promisc ? "on" : "off",
2557 vsi_name,
2558 i40e_stat_str(hw, aq_ret),
2559 i40e_aq_str(hw, hw->aq.asq_last_status));
2560 }
2561 }
2562 out:
2563
2564 if (retval)
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2566
2567 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2568 return retval;
2569
2570 err_no_memory:
2571
2572 spin_lock_bh(&vsi->mac_filter_hash_lock);
2573 err_no_memory_locked:
2574 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2575 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2576 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2577
2578 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2579 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2580 return -ENOMEM;
2581 }
2582
2583
2584
2585
2586
2587 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2588 {
2589 int v;
2590
2591 if (!pf)
2592 return;
2593 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2594 return;
2595 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
2596 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2597 return;
2598 }
2599
2600 for (v = 0; v < pf->num_alloc_vsi; v++) {
2601 if (pf->vsi[v] &&
2602 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2603 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2604
2605 if (ret) {
2606
2607 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2608 pf->state);
2609 break;
2610 }
2611 }
2612 }
2613 clear_bit(__I40E_VF_DISABLE, pf->state);
2614 }
2615
2616
2617
2618
2619
2620 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2621 {
2622 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2623 return I40E_RXBUFFER_2048;
2624 else
2625 return I40E_RXBUFFER_3072;
2626 }
2627
2628
2629
2630
2631
2632
2633
2634
2635 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2636 {
2637 struct i40e_netdev_priv *np = netdev_priv(netdev);
2638 struct i40e_vsi *vsi = np->vsi;
2639 struct i40e_pf *pf = vsi->back;
2640
2641 if (i40e_enabled_xdp_vsi(vsi)) {
2642 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2643
2644 if (frame_size > i40e_max_xdp_frame_size(vsi))
2645 return -EINVAL;
2646 }
2647
2648 netdev_info(netdev, "changing MTU from %d to %d\n",
2649 netdev->mtu, new_mtu);
2650 netdev->mtu = new_mtu;
2651 if (netif_running(netdev))
2652 i40e_vsi_reinit_locked(vsi);
2653 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2654 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2655 return 0;
2656 }
2657
2658
2659
2660
2661
2662
2663
2664 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2665 {
2666 struct i40e_netdev_priv *np = netdev_priv(netdev);
2667 struct i40e_pf *pf = np->vsi->back;
2668
2669 switch (cmd) {
2670 case SIOCGHWTSTAMP:
2671 return i40e_ptp_get_ts_config(pf, ifr);
2672 case SIOCSHWTSTAMP:
2673 return i40e_ptp_set_ts_config(pf, ifr);
2674 default:
2675 return -EOPNOTSUPP;
2676 }
2677 }
2678
2679
2680
2681
2682
2683 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2684 {
2685 struct i40e_vsi_context ctxt;
2686 i40e_status ret;
2687
2688
2689 if (vsi->info.pvid)
2690 return;
2691
2692 if ((vsi->info.valid_sections &
2693 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2694 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2695 return;
2696
2697 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2698 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2699 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2700
2701 ctxt.seid = vsi->seid;
2702 ctxt.info = vsi->info;
2703 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2704 if (ret) {
2705 dev_info(&vsi->back->pdev->dev,
2706 "update vlan stripping failed, err %s aq_err %s\n",
2707 i40e_stat_str(&vsi->back->hw, ret),
2708 i40e_aq_str(&vsi->back->hw,
2709 vsi->back->hw.aq.asq_last_status));
2710 }
2711 }
2712
2713
2714
2715
2716
2717 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2718 {
2719 struct i40e_vsi_context ctxt;
2720 i40e_status ret;
2721
2722
2723 if (vsi->info.pvid)
2724 return;
2725
2726 if ((vsi->info.valid_sections &
2727 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2728 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2729 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2730 return;
2731
2732 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2733 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2734 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2735
2736 ctxt.seid = vsi->seid;
2737 ctxt.info = vsi->info;
2738 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2739 if (ret) {
2740 dev_info(&vsi->back->pdev->dev,
2741 "update vlan stripping failed, err %s aq_err %s\n",
2742 i40e_stat_str(&vsi->back->hw, ret),
2743 i40e_aq_str(&vsi->back->hw,
2744 vsi->back->hw.aq.asq_last_status));
2745 }
2746 }
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2762 {
2763 struct i40e_mac_filter *f, *add_f;
2764 struct hlist_node *h;
2765 int bkt;
2766
2767 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2768 if (f->state == I40E_FILTER_REMOVE)
2769 continue;
2770 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2771 if (!add_f) {
2772 dev_info(&vsi->back->pdev->dev,
2773 "Could not add vlan filter %d for %pM\n",
2774 vid, f->macaddr);
2775 return -ENOMEM;
2776 }
2777 }
2778
2779 return 0;
2780 }
2781
2782
2783
2784
2785
2786
2787 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2788 {
2789 int err;
2790
2791 if (vsi->info.pvid)
2792 return -EINVAL;
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 if (!vid)
2803 return 0;
2804
2805
2806 spin_lock_bh(&vsi->mac_filter_hash_lock);
2807 err = i40e_add_vlan_all_mac(vsi, vid);
2808 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2809 if (err)
2810 return err;
2811
2812
2813
2814
2815 i40e_service_event_schedule(vsi->back);
2816 return 0;
2817 }
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2833 {
2834 struct i40e_mac_filter *f;
2835 struct hlist_node *h;
2836 int bkt;
2837
2838 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2839 if (f->vlan == vid)
2840 __i40e_del_filter(vsi, f);
2841 }
2842 }
2843
2844
2845
2846
2847
2848
2849 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2850 {
2851 if (!vid || vsi->info.pvid)
2852 return;
2853
2854 spin_lock_bh(&vsi->mac_filter_hash_lock);
2855 i40e_rm_vlan_all_mac(vsi, vid);
2856 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2857
2858
2859
2860
2861 i40e_service_event_schedule(vsi->back);
2862 }
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2873 __always_unused __be16 proto, u16 vid)
2874 {
2875 struct i40e_netdev_priv *np = netdev_priv(netdev);
2876 struct i40e_vsi *vsi = np->vsi;
2877 int ret = 0;
2878
2879 if (vid >= VLAN_N_VID)
2880 return -EINVAL;
2881
2882 ret = i40e_vsi_add_vlan(vsi, vid);
2883 if (!ret)
2884 set_bit(vid, vsi->active_vlans);
2885
2886 return ret;
2887 }
2888
2889
2890
2891
2892
2893
2894
2895 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2896 __always_unused __be16 proto, u16 vid)
2897 {
2898 struct i40e_netdev_priv *np = netdev_priv(netdev);
2899 struct i40e_vsi *vsi = np->vsi;
2900
2901 if (vid >= VLAN_N_VID)
2902 return;
2903 set_bit(vid, vsi->active_vlans);
2904 }
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2915 __always_unused __be16 proto, u16 vid)
2916 {
2917 struct i40e_netdev_priv *np = netdev_priv(netdev);
2918 struct i40e_vsi *vsi = np->vsi;
2919
2920
2921
2922
2923
2924 i40e_vsi_kill_vlan(vsi, vid);
2925
2926 clear_bit(vid, vsi->active_vlans);
2927
2928 return 0;
2929 }
2930
2931
2932
2933
2934
2935 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2936 {
2937 u16 vid;
2938
2939 if (!vsi->netdev)
2940 return;
2941
2942 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2943 i40e_vlan_stripping_enable(vsi);
2944 else
2945 i40e_vlan_stripping_disable(vsi);
2946
2947 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2948 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2949 vid);
2950 }
2951
2952
2953
2954
2955
2956
2957 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2958 {
2959 struct i40e_vsi_context ctxt;
2960 i40e_status ret;
2961
2962 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2963 vsi->info.pvid = cpu_to_le16(vid);
2964 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2965 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2966 I40E_AQ_VSI_PVLAN_EMOD_STR;
2967
2968 ctxt.seid = vsi->seid;
2969 ctxt.info = vsi->info;
2970 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2971 if (ret) {
2972 dev_info(&vsi->back->pdev->dev,
2973 "add pvid failed, err %s aq_err %s\n",
2974 i40e_stat_str(&vsi->back->hw, ret),
2975 i40e_aq_str(&vsi->back->hw,
2976 vsi->back->hw.aq.asq_last_status));
2977 return -ENOENT;
2978 }
2979
2980 return 0;
2981 }
2982
2983
2984
2985
2986
2987
2988
2989 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2990 {
2991 vsi->info.pvid = 0;
2992
2993 i40e_vlan_stripping_disable(vsi);
2994 }
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3007 {
3008 int i, err = 0;
3009
3010 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3011 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3012
3013 if (!i40e_enabled_xdp_vsi(vsi))
3014 return err;
3015
3016 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3017 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3018
3019 return err;
3020 }
3021
3022
3023
3024
3025
3026
3027
3028 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3029 {
3030 int i;
3031
3032 if (vsi->tx_rings) {
3033 for (i = 0; i < vsi->num_queue_pairs; i++)
3034 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3035 i40e_free_tx_resources(vsi->tx_rings[i]);
3036 }
3037
3038 if (vsi->xdp_rings) {
3039 for (i = 0; i < vsi->num_queue_pairs; i++)
3040 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3041 i40e_free_tx_resources(vsi->xdp_rings[i]);
3042 }
3043 }
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3056 {
3057 int i, err = 0;
3058
3059 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3060 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3061 return err;
3062 }
3063
3064
3065
3066
3067
3068
3069
3070 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3071 {
3072 int i;
3073
3074 if (!vsi->rx_rings)
3075 return;
3076
3077 for (i = 0; i < vsi->num_queue_pairs; i++)
3078 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3079 i40e_free_rx_resources(vsi->rx_rings[i]);
3080 }
3081
3082
3083
3084
3085
3086
3087
3088
3089 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3090 {
3091 int cpu;
3092
3093 if (!ring->q_vector || !ring->netdev || ring->ch)
3094 return;
3095
3096
3097 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3098 return;
3099
3100 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3101 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3102 ring->queue_index);
3103 }
3104
3105
3106
3107
3108
3109
3110
3111 static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3112 {
3113 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3114 int qid = ring->queue_index;
3115
3116 if (ring_is_xdp(ring))
3117 qid -= ring->vsi->alloc_queue_pairs;
3118
3119 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3120 return NULL;
3121
3122 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3123 }
3124
3125
3126
3127
3128
3129
3130
3131 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3132 {
3133 struct i40e_vsi *vsi = ring->vsi;
3134 u16 pf_q = vsi->base_queue + ring->queue_index;
3135 struct i40e_hw *hw = &vsi->back->hw;
3136 struct i40e_hmc_obj_txq tx_ctx;
3137 i40e_status err = 0;
3138 u32 qtx_ctl = 0;
3139
3140 if (ring_is_xdp(ring))
3141 ring->xsk_umem = i40e_xsk_umem(ring);
3142
3143
3144 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3145 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3146 ring->atr_count = 0;
3147 } else {
3148 ring->atr_sample_rate = 0;
3149 }
3150
3151
3152 i40e_config_xps_tx_ring(ring);
3153
3154
3155 memset(&tx_ctx, 0, sizeof(tx_ctx));
3156
3157 tx_ctx.new_context = 1;
3158 tx_ctx.base = (ring->dma / 128);
3159 tx_ctx.qlen = ring->count;
3160 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3161 I40E_FLAG_FD_ATR_ENABLED));
3162 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3163
3164 if (vsi->type != I40E_VSI_FDIR)
3165 tx_ctx.head_wb_ena = 1;
3166 tx_ctx.head_wb_addr = ring->dma +
3167 (ring->count * sizeof(struct i40e_tx_desc));
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180 if (ring->ch)
3181 tx_ctx.rdylist =
3182 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3183
3184 else
3185 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3186
3187 tx_ctx.rdylist_act = 0;
3188
3189
3190 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3191 if (err) {
3192 dev_info(&vsi->back->pdev->dev,
3193 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3194 ring->queue_index, pf_q, err);
3195 return -ENOMEM;
3196 }
3197
3198
3199 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3200 if (err) {
3201 dev_info(&vsi->back->pdev->dev,
3202 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3203 ring->queue_index, pf_q, err);
3204 return -ENOMEM;
3205 }
3206
3207
3208 if (ring->ch) {
3209 if (ring->ch->type == I40E_VSI_VMDQ2)
3210 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3211 else
3212 return -EINVAL;
3213
3214 qtx_ctl |= (ring->ch->vsi_number <<
3215 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3216 I40E_QTX_CTL_VFVM_INDX_MASK;
3217 } else {
3218 if (vsi->type == I40E_VSI_VMDQ2) {
3219 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3220 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3221 I40E_QTX_CTL_VFVM_INDX_MASK;
3222 } else {
3223 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3224 }
3225 }
3226
3227 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3228 I40E_QTX_CTL_PF_INDX_MASK);
3229 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3230 i40e_flush(hw);
3231
3232
3233 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3234
3235 return 0;
3236 }
3237
3238
3239
3240
3241
3242
3243
3244 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3245 {
3246 struct i40e_vsi *vsi = ring->vsi;
3247 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3248 u16 pf_q = vsi->base_queue + ring->queue_index;
3249 struct i40e_hw *hw = &vsi->back->hw;
3250 struct i40e_hmc_obj_rxq rx_ctx;
3251 i40e_status err = 0;
3252 bool ok;
3253 int ret;
3254
3255 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3256
3257
3258 memset(&rx_ctx, 0, sizeof(rx_ctx));
3259
3260 if (ring->vsi->type == I40E_VSI_MAIN)
3261 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3262
3263 ring->xsk_umem = i40e_xsk_umem(ring);
3264 if (ring->xsk_umem) {
3265 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3266 XDP_PACKET_HEADROOM;
3267
3268
3269
3270
3271 chain_len = 1;
3272 ring->zca.free = i40e_zca_free;
3273 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3274 MEM_TYPE_ZERO_COPY,
3275 &ring->zca);
3276 if (ret)
3277 return ret;
3278 dev_info(&vsi->back->pdev->dev,
3279 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3280 ring->queue_index);
3281
3282 } else {
3283 ring->rx_buf_len = vsi->rx_buf_len;
3284 if (ring->vsi->type == I40E_VSI_MAIN) {
3285 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3286 MEM_TYPE_PAGE_SHARED,
3287 NULL);
3288 if (ret)
3289 return ret;
3290 }
3291 }
3292
3293 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3294 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3295
3296 rx_ctx.base = (ring->dma / 128);
3297 rx_ctx.qlen = ring->count;
3298
3299
3300 rx_ctx.dsize = 1;
3301
3302
3303
3304
3305 rx_ctx.hsplit_0 = 0;
3306
3307 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3308 if (hw->revision_id == 0)
3309 rx_ctx.lrxqthresh = 0;
3310 else
3311 rx_ctx.lrxqthresh = 1;
3312 rx_ctx.crcstrip = 1;
3313 rx_ctx.l2tsel = 1;
3314
3315 rx_ctx.showiv = 0;
3316
3317 rx_ctx.prefena = 1;
3318
3319
3320 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3321 if (err) {
3322 dev_info(&vsi->back->pdev->dev,
3323 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3324 ring->queue_index, pf_q, err);
3325 return -ENOMEM;
3326 }
3327
3328
3329 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3330 if (err) {
3331 dev_info(&vsi->back->pdev->dev,
3332 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3333 ring->queue_index, pf_q, err);
3334 return -ENOMEM;
3335 }
3336
3337
3338 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3339 clear_ring_build_skb_enabled(ring);
3340 else
3341 set_ring_build_skb_enabled(ring);
3342
3343
3344 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3345 writel(0, ring->tail);
3346
3347 ok = ring->xsk_umem ?
3348 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3349 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3350 if (!ok) {
3351
3352
3353
3354 dev_info(&vsi->back->pdev->dev,
3355 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3356 ring->xsk_umem ? "UMEM enabled " : "",
3357 ring->queue_index, pf_q);
3358 }
3359
3360 return 0;
3361 }
3362
3363
3364
3365
3366
3367
3368
3369 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3370 {
3371 int err = 0;
3372 u16 i;
3373
3374 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3375 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3376
3377 if (err || !i40e_enabled_xdp_vsi(vsi))
3378 return err;
3379
3380 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3381 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3382
3383 return err;
3384 }
3385
3386
3387
3388
3389
3390
3391
3392 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3393 {
3394 int err = 0;
3395 u16 i;
3396
3397 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3398 vsi->max_frame = I40E_MAX_RXBUFFER;
3399 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3400 #if (PAGE_SIZE < 8192)
3401 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3402 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3403 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3404 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3405 #endif
3406 } else {
3407 vsi->max_frame = I40E_MAX_RXBUFFER;
3408 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3409 I40E_RXBUFFER_2048;
3410 }
3411
3412
3413 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3414 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3415
3416 return err;
3417 }
3418
3419
3420
3421
3422
3423 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3424 {
3425 struct i40e_ring *tx_ring, *rx_ring;
3426 u16 qoffset, qcount;
3427 int i, n;
3428
3429 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3430
3431 for (i = 0; i < vsi->num_queue_pairs; i++) {
3432 rx_ring = vsi->rx_rings[i];
3433 tx_ring = vsi->tx_rings[i];
3434 rx_ring->dcb_tc = 0;
3435 tx_ring->dcb_tc = 0;
3436 }
3437 return;
3438 }
3439
3440 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3441 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3442 continue;
3443
3444 qoffset = vsi->tc_config.tc_info[n].qoffset;
3445 qcount = vsi->tc_config.tc_info[n].qcount;
3446 for (i = qoffset; i < (qoffset + qcount); i++) {
3447 rx_ring = vsi->rx_rings[i];
3448 tx_ring = vsi->tx_rings[i];
3449 rx_ring->dcb_tc = n;
3450 tx_ring->dcb_tc = n;
3451 }
3452 }
3453 }
3454
3455
3456
3457
3458
3459 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3460 {
3461 if (vsi->netdev)
3462 i40e_set_rx_mode(vsi->netdev);
3463 }
3464
3465
3466
3467
3468
3469
3470
3471
3472 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3473 {
3474 struct i40e_fdir_filter *filter;
3475 struct i40e_pf *pf = vsi->back;
3476 struct hlist_node *node;
3477
3478 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3479 return;
3480
3481
3482 pf->fd_tcp4_filter_cnt = 0;
3483 pf->fd_udp4_filter_cnt = 0;
3484 pf->fd_sctp4_filter_cnt = 0;
3485 pf->fd_ip4_filter_cnt = 0;
3486
3487 hlist_for_each_entry_safe(filter, node,
3488 &pf->fdir_filter_list, fdir_node) {
3489 i40e_add_del_fdir(vsi, filter, true);
3490 }
3491 }
3492
3493
3494
3495
3496
3497 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3498 {
3499 int err;
3500
3501 i40e_set_vsi_rx_mode(vsi);
3502 i40e_restore_vlan(vsi);
3503 i40e_vsi_config_dcb_rings(vsi);
3504 err = i40e_vsi_configure_tx(vsi);
3505 if (!err)
3506 err = i40e_vsi_configure_rx(vsi);
3507
3508 return err;
3509 }
3510
3511
3512
3513
3514
3515 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3516 {
3517 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3518 struct i40e_pf *pf = vsi->back;
3519 struct i40e_hw *hw = &pf->hw;
3520 u16 vector;
3521 int i, q;
3522 u32 qp;
3523
3524
3525
3526
3527
3528 qp = vsi->base_queue;
3529 vector = vsi->base_vector;
3530 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3531 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3532
3533 q_vector->rx.next_update = jiffies + 1;
3534 q_vector->rx.target_itr =
3535 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3536 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3537 q_vector->rx.target_itr >> 1);
3538 q_vector->rx.current_itr = q_vector->rx.target_itr;
3539
3540 q_vector->tx.next_update = jiffies + 1;
3541 q_vector->tx.target_itr =
3542 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3543 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3544 q_vector->tx.target_itr >> 1);
3545 q_vector->tx.current_itr = q_vector->tx.target_itr;
3546
3547 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3548 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3549
3550
3551 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3552 for (q = 0; q < q_vector->num_ringpairs; q++) {
3553 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3554 u32 val;
3555
3556 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3557 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3558 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3559 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3560 (I40E_QUEUE_TYPE_TX <<
3561 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3562
3563 wr32(hw, I40E_QINT_RQCTL(qp), val);
3564
3565 if (has_xdp) {
3566 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3567 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3568 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3569 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3570 (I40E_QUEUE_TYPE_TX <<
3571 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3572
3573 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3574 }
3575
3576 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3577 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3578 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3579 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3580 (I40E_QUEUE_TYPE_RX <<
3581 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3582
3583
3584 if (q == (q_vector->num_ringpairs - 1))
3585 val |= (I40E_QUEUE_END_OF_LIST <<
3586 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3587
3588 wr32(hw, I40E_QINT_TQCTL(qp), val);
3589 qp++;
3590 }
3591 }
3592
3593 i40e_flush(hw);
3594 }
3595
3596
3597
3598
3599
3600 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3601 {
3602 struct i40e_hw *hw = &pf->hw;
3603 u32 val;
3604
3605
3606 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3607 rd32(hw, I40E_PFINT_ICR0);
3608
3609 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3610 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3611 I40E_PFINT_ICR0_ENA_GRST_MASK |
3612 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3613 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3614 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3615 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3616 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3617
3618 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3619 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3620
3621 if (pf->flags & I40E_FLAG_PTP)
3622 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3623
3624 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3625
3626
3627 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3628 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3629
3630
3631 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3632 }
3633
3634
3635
3636
3637
3638 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3639 {
3640 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3641 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3642 struct i40e_pf *pf = vsi->back;
3643 struct i40e_hw *hw = &pf->hw;
3644 u32 val;
3645
3646
3647 q_vector->rx.next_update = jiffies + 1;
3648 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3649 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3650 q_vector->rx.current_itr = q_vector->rx.target_itr;
3651 q_vector->tx.next_update = jiffies + 1;
3652 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3653 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3654 q_vector->tx.current_itr = q_vector->tx.target_itr;
3655
3656 i40e_enable_misc_int_causes(pf);
3657
3658
3659 wr32(hw, I40E_PFINT_LNKLST0, 0);
3660
3661
3662 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3663 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3664 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3665 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3666
3667 wr32(hw, I40E_QINT_RQCTL(0), val);
3668
3669 if (i40e_enabled_xdp_vsi(vsi)) {
3670 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3671 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3672 (I40E_QUEUE_TYPE_TX
3673 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3674
3675 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3676 }
3677
3678 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3679 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3680 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3681
3682 wr32(hw, I40E_QINT_TQCTL(0), val);
3683 i40e_flush(hw);
3684 }
3685
3686
3687
3688
3689
3690 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3691 {
3692 struct i40e_hw *hw = &pf->hw;
3693
3694 wr32(hw, I40E_PFINT_DYN_CTL0,
3695 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3696 i40e_flush(hw);
3697 }
3698
3699
3700
3701
3702
3703 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3704 {
3705 struct i40e_hw *hw = &pf->hw;
3706 u32 val;
3707
3708 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3709 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3710 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3711
3712 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3713 i40e_flush(hw);
3714 }
3715
3716
3717
3718
3719
3720
3721 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3722 {
3723 struct i40e_q_vector *q_vector = data;
3724
3725 if (!q_vector->tx.ring && !q_vector->rx.ring)
3726 return IRQ_HANDLED;
3727
3728 napi_schedule_irqoff(&q_vector->napi);
3729
3730 return IRQ_HANDLED;
3731 }
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3742 const cpumask_t *mask)
3743 {
3744 struct i40e_q_vector *q_vector =
3745 container_of(notify, struct i40e_q_vector, affinity_notify);
3746
3747 cpumask_copy(&q_vector->affinity_mask, mask);
3748 }
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758 static void i40e_irq_affinity_release(struct kref *ref) {}
3759
3760
3761
3762
3763
3764
3765
3766
3767 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3768 {
3769 int q_vectors = vsi->num_q_vectors;
3770 struct i40e_pf *pf = vsi->back;
3771 int base = vsi->base_vector;
3772 int rx_int_idx = 0;
3773 int tx_int_idx = 0;
3774 int vector, err;
3775 int irq_num;
3776 int cpu;
3777
3778 for (vector = 0; vector < q_vectors; vector++) {
3779 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3780
3781 irq_num = pf->msix_entries[base + vector].vector;
3782
3783 if (q_vector->tx.ring && q_vector->rx.ring) {
3784 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3785 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3786 tx_int_idx++;
3787 } else if (q_vector->rx.ring) {
3788 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3789 "%s-%s-%d", basename, "rx", rx_int_idx++);
3790 } else if (q_vector->tx.ring) {
3791 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3792 "%s-%s-%d", basename, "tx", tx_int_idx++);
3793 } else {
3794
3795 continue;
3796 }
3797 err = request_irq(irq_num,
3798 vsi->irq_handler,
3799 0,
3800 q_vector->name,
3801 q_vector);
3802 if (err) {
3803 dev_info(&pf->pdev->dev,
3804 "MSIX request_irq failed, error: %d\n", err);
3805 goto free_queue_irqs;
3806 }
3807
3808
3809 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3810 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3811 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3812
3813
3814
3815
3816
3817
3818 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3819 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3820 }
3821
3822 vsi->irqs_ready = true;
3823 return 0;
3824
3825 free_queue_irqs:
3826 while (vector) {
3827 vector--;
3828 irq_num = pf->msix_entries[base + vector].vector;
3829 irq_set_affinity_notifier(irq_num, NULL);
3830 irq_set_affinity_hint(irq_num, NULL);
3831 free_irq(irq_num, &vsi->q_vectors[vector]);
3832 }
3833 return err;
3834 }
3835
3836
3837
3838
3839
3840 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3841 {
3842 struct i40e_pf *pf = vsi->back;
3843 struct i40e_hw *hw = &pf->hw;
3844 int base = vsi->base_vector;
3845 int i;
3846
3847
3848 for (i = 0; i < vsi->num_queue_pairs; i++) {
3849 u32 val;
3850
3851 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3852 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3853 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3854
3855 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3856 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3857 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3858
3859 if (!i40e_enabled_xdp_vsi(vsi))
3860 continue;
3861 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3862 }
3863
3864
3865 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3866 for (i = vsi->base_vector;
3867 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3868 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3869
3870 i40e_flush(hw);
3871 for (i = 0; i < vsi->num_q_vectors; i++)
3872 synchronize_irq(pf->msix_entries[i + base].vector);
3873 } else {
3874
3875 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3876 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3877 i40e_flush(hw);
3878 synchronize_irq(pf->pdev->irq);
3879 }
3880 }
3881
3882
3883
3884
3885
3886 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3887 {
3888 struct i40e_pf *pf = vsi->back;
3889 int i;
3890
3891 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3892 for (i = 0; i < vsi->num_q_vectors; i++)
3893 i40e_irq_dynamic_enable(vsi, i);
3894 } else {
3895 i40e_irq_dynamic_enable_icr0(pf);
3896 }
3897
3898 i40e_flush(&pf->hw);
3899 return 0;
3900 }
3901
3902
3903
3904
3905
3906 static void i40e_free_misc_vector(struct i40e_pf *pf)
3907 {
3908
3909 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3910 i40e_flush(&pf->hw);
3911
3912 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3913 synchronize_irq(pf->msix_entries[0].vector);
3914 free_irq(pf->msix_entries[0].vector, pf);
3915 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3916 }
3917 }
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928 static irqreturn_t i40e_intr(int irq, void *data)
3929 {
3930 struct i40e_pf *pf = (struct i40e_pf *)data;
3931 struct i40e_hw *hw = &pf->hw;
3932 irqreturn_t ret = IRQ_NONE;
3933 u32 icr0, icr0_remaining;
3934 u32 val, ena_mask;
3935
3936 icr0 = rd32(hw, I40E_PFINT_ICR0);
3937 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3938
3939
3940 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3941 goto enable_intr;
3942
3943
3944 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3945 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3946 pf->sw_int_count++;
3947
3948 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3949 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3950 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3951 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3952 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3953 }
3954
3955
3956 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3957 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3958 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3959
3960
3961
3962
3963
3964
3965
3966 if (!test_bit(__I40E_DOWN, pf->state))
3967 napi_schedule_irqoff(&q_vector->napi);
3968 }
3969
3970 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3971 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3972 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3973 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3974 }
3975
3976 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3977 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3978 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3979 }
3980
3981 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3982 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3983 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3984 }
3985
3986 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3987 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3988 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3989 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3990 val = rd32(hw, I40E_GLGEN_RSTAT);
3991 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3992 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3993 if (val == I40E_RESET_CORER) {
3994 pf->corer_count++;
3995 } else if (val == I40E_RESET_GLOBR) {
3996 pf->globr_count++;
3997 } else if (val == I40E_RESET_EMPR) {
3998 pf->empr_count++;
3999 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
4000 }
4001 }
4002
4003 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4004 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4005 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4006 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4007 rd32(hw, I40E_PFHMC_ERRORINFO),
4008 rd32(hw, I40E_PFHMC_ERRORDATA));
4009 }
4010
4011 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4012 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4013
4014 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
4015 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4016 i40e_ptp_tx_hwtstamp(pf);
4017 }
4018 }
4019
4020
4021
4022
4023
4024 icr0_remaining = icr0 & ena_mask;
4025 if (icr0_remaining) {
4026 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4027 icr0_remaining);
4028 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4029 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4030 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4031 dev_info(&pf->pdev->dev, "device will be reset\n");
4032 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4033 i40e_service_event_schedule(pf);
4034 }
4035 ena_mask &= ~icr0_remaining;
4036 }
4037 ret = IRQ_HANDLED;
4038
4039 enable_intr:
4040
4041 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4042 if (!test_bit(__I40E_DOWN, pf->state) ||
4043 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4044 i40e_service_event_schedule(pf);
4045 i40e_irq_dynamic_enable_icr0(pf);
4046 }
4047
4048 return ret;
4049 }
4050
4051
4052
4053
4054
4055
4056
4057
4058 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4059 {
4060 struct i40e_vsi *vsi = tx_ring->vsi;
4061 u16 i = tx_ring->next_to_clean;
4062 struct i40e_tx_buffer *tx_buf;
4063 struct i40e_tx_desc *tx_desc;
4064
4065 tx_buf = &tx_ring->tx_bi[i];
4066 tx_desc = I40E_TX_DESC(tx_ring, i);
4067 i -= tx_ring->count;
4068
4069 do {
4070 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4071
4072
4073 if (!eop_desc)
4074 break;
4075
4076
4077 smp_rmb();
4078
4079
4080 if (!(eop_desc->cmd_type_offset_bsz &
4081 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4082 break;
4083
4084
4085 tx_buf->next_to_watch = NULL;
4086
4087 tx_desc->buffer_addr = 0;
4088 tx_desc->cmd_type_offset_bsz = 0;
4089
4090 tx_buf++;
4091 tx_desc++;
4092 i++;
4093 if (unlikely(!i)) {
4094 i -= tx_ring->count;
4095 tx_buf = tx_ring->tx_bi;
4096 tx_desc = I40E_TX_DESC(tx_ring, 0);
4097 }
4098
4099 dma_unmap_single(tx_ring->dev,
4100 dma_unmap_addr(tx_buf, dma),
4101 dma_unmap_len(tx_buf, len),
4102 DMA_TO_DEVICE);
4103 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4104 kfree(tx_buf->raw_buf);
4105
4106 tx_buf->raw_buf = NULL;
4107 tx_buf->tx_flags = 0;
4108 tx_buf->next_to_watch = NULL;
4109 dma_unmap_len_set(tx_buf, len, 0);
4110 tx_desc->buffer_addr = 0;
4111 tx_desc->cmd_type_offset_bsz = 0;
4112
4113
4114 tx_buf++;
4115 tx_desc++;
4116 i++;
4117 if (unlikely(!i)) {
4118 i -= tx_ring->count;
4119 tx_buf = tx_ring->tx_bi;
4120 tx_desc = I40E_TX_DESC(tx_ring, 0);
4121 }
4122
4123
4124 budget--;
4125 } while (likely(budget));
4126
4127 i += tx_ring->count;
4128 tx_ring->next_to_clean = i;
4129
4130 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4131 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4132
4133 return budget > 0;
4134 }
4135
4136
4137
4138
4139
4140
4141 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4142 {
4143 struct i40e_q_vector *q_vector = data;
4144 struct i40e_vsi *vsi;
4145
4146 if (!q_vector->tx.ring)
4147 return IRQ_HANDLED;
4148
4149 vsi = q_vector->tx.ring->vsi;
4150 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4151
4152 return IRQ_HANDLED;
4153 }
4154
4155
4156
4157
4158
4159
4160
4161 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4162 {
4163 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4164 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4165 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4166
4167 tx_ring->q_vector = q_vector;
4168 tx_ring->next = q_vector->tx.ring;
4169 q_vector->tx.ring = tx_ring;
4170 q_vector->tx.count++;
4171
4172
4173 if (i40e_enabled_xdp_vsi(vsi)) {
4174 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4175
4176 xdp_ring->q_vector = q_vector;
4177 xdp_ring->next = q_vector->tx.ring;
4178 q_vector->tx.ring = xdp_ring;
4179 q_vector->tx.count++;
4180 }
4181
4182 rx_ring->q_vector = q_vector;
4183 rx_ring->next = q_vector->rx.ring;
4184 q_vector->rx.ring = rx_ring;
4185 q_vector->rx.count++;
4186 }
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4198 {
4199 int qp_remaining = vsi->num_queue_pairs;
4200 int q_vectors = vsi->num_q_vectors;
4201 int num_ringpairs;
4202 int v_start = 0;
4203 int qp_idx = 0;
4204
4205
4206
4207
4208
4209
4210
4211
4212 for (; v_start < q_vectors; v_start++) {
4213 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4214
4215 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4216
4217 q_vector->num_ringpairs = num_ringpairs;
4218 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4219
4220 q_vector->rx.count = 0;
4221 q_vector->tx.count = 0;
4222 q_vector->rx.ring = NULL;
4223 q_vector->tx.ring = NULL;
4224
4225 while (num_ringpairs--) {
4226 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4227 qp_idx++;
4228 qp_remaining--;
4229 }
4230 }
4231 }
4232
4233
4234
4235
4236
4237
4238 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4239 {
4240 struct i40e_pf *pf = vsi->back;
4241 int err;
4242
4243 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4244 err = i40e_vsi_request_irq_msix(vsi, basename);
4245 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4246 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4247 pf->int_name, pf);
4248 else
4249 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4250 pf->int_name, pf);
4251
4252 if (err)
4253 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4254
4255 return err;
4256 }
4257
4258 #ifdef CONFIG_NET_POLL_CONTROLLER
4259
4260
4261
4262
4263
4264
4265
4266 static void i40e_netpoll(struct net_device *netdev)
4267 {
4268 struct i40e_netdev_priv *np = netdev_priv(netdev);
4269 struct i40e_vsi *vsi = np->vsi;
4270 struct i40e_pf *pf = vsi->back;
4271 int i;
4272
4273
4274 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4275 return;
4276
4277 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4278 for (i = 0; i < vsi->num_q_vectors; i++)
4279 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4280 } else {
4281 i40e_intr(pf->pdev->irq, netdev);
4282 }
4283 }
4284 #endif
4285
4286 #define I40E_QTX_ENA_WAIT_COUNT 50
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4300 {
4301 int i;
4302 u32 tx_reg;
4303
4304 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4305 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4306 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4307 break;
4308
4309 usleep_range(10, 20);
4310 }
4311 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4312 return -ETIMEDOUT;
4313
4314 return 0;
4315 }
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4328 {
4329 struct i40e_hw *hw = &pf->hw;
4330 u32 tx_reg;
4331 int i;
4332
4333
4334 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4335 if (!enable)
4336 usleep_range(10, 20);
4337
4338 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4339 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4340 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4341 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4342 break;
4343 usleep_range(1000, 2000);
4344 }
4345
4346
4347 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4348 return;
4349
4350
4351 if (enable) {
4352 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4353 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4354 } else {
4355 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4356 }
4357
4358 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4359 }
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4370 bool is_xdp, bool enable)
4371 {
4372 int ret;
4373
4374 i40e_control_tx_q(pf, pf_q, enable);
4375
4376
4377 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4378 if (ret) {
4379 dev_info(&pf->pdev->dev,
4380 "VSI seid %d %sTx ring %d %sable timeout\n",
4381 seid, (is_xdp ? "XDP " : ""), pf_q,
4382 (enable ? "en" : "dis"));
4383 }
4384
4385 return ret;
4386 }
4387
4388
4389
4390
4391
4392
4393 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4394 {
4395 struct i40e_pf *pf = vsi->back;
4396 int i, pf_q, ret = 0;
4397
4398 pf_q = vsi->base_queue;
4399 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4400 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4401 pf_q,
4402 false , enable);
4403 if (ret)
4404 break;
4405
4406 if (!i40e_enabled_xdp_vsi(vsi))
4407 continue;
4408
4409 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4410 pf_q + vsi->alloc_queue_pairs,
4411 true , enable);
4412 if (ret)
4413 break;
4414 }
4415 return ret;
4416 }
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4430 {
4431 int i;
4432 u32 rx_reg;
4433
4434 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4435 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4436 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4437 break;
4438
4439 usleep_range(10, 20);
4440 }
4441 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4442 return -ETIMEDOUT;
4443
4444 return 0;
4445 }
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4458 {
4459 struct i40e_hw *hw = &pf->hw;
4460 u32 rx_reg;
4461 int i;
4462
4463 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4464 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4465 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4466 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4467 break;
4468 usleep_range(1000, 2000);
4469 }
4470
4471
4472 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4473 return;
4474
4475
4476 if (enable)
4477 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4478 else
4479 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4480
4481 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4482 }
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4495 {
4496 int ret = 0;
4497
4498 i40e_control_rx_q(pf, pf_q, enable);
4499
4500
4501 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4502 if (ret)
4503 return ret;
4504
4505 return ret;
4506 }
4507
4508
4509
4510
4511
4512
4513 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4514 {
4515 struct i40e_pf *pf = vsi->back;
4516 int i, pf_q, ret = 0;
4517
4518 pf_q = vsi->base_queue;
4519 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4520 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4521 if (ret) {
4522 dev_info(&pf->pdev->dev,
4523 "VSI seid %d Rx ring %d %sable timeout\n",
4524 vsi->seid, pf_q, (enable ? "en" : "dis"));
4525 break;
4526 }
4527 }
4528
4529
4530
4531
4532 if (!enable)
4533 mdelay(50);
4534
4535 return ret;
4536 }
4537
4538
4539
4540
4541
4542 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4543 {
4544 int ret = 0;
4545
4546
4547 ret = i40e_vsi_control_rx(vsi, true);
4548 if (ret)
4549 return ret;
4550 ret = i40e_vsi_control_tx(vsi, true);
4551
4552 return ret;
4553 }
4554
4555
4556
4557
4558
4559 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4560 {
4561
4562 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4563 return i40e_vsi_stop_rings_no_wait(vsi);
4564
4565
4566
4567
4568 i40e_vsi_control_tx(vsi, false);
4569 i40e_vsi_control_rx(vsi, false);
4570 }
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4584 {
4585 struct i40e_pf *pf = vsi->back;
4586 int i, pf_q;
4587
4588 pf_q = vsi->base_queue;
4589 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4590 i40e_control_tx_q(pf, pf_q, false);
4591 i40e_control_rx_q(pf, pf_q, false);
4592 }
4593 }
4594
4595
4596
4597
4598
4599 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4600 {
4601 struct i40e_pf *pf = vsi->back;
4602 struct i40e_hw *hw = &pf->hw;
4603 int base = vsi->base_vector;
4604 u32 val, qp;
4605 int i;
4606
4607 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4608 if (!vsi->q_vectors)
4609 return;
4610
4611 if (!vsi->irqs_ready)
4612 return;
4613
4614 vsi->irqs_ready = false;
4615 for (i = 0; i < vsi->num_q_vectors; i++) {
4616 int irq_num;
4617 u16 vector;
4618
4619 vector = i + base;
4620 irq_num = pf->msix_entries[vector].vector;
4621
4622
4623 if (!vsi->q_vectors[i] ||
4624 !vsi->q_vectors[i]->num_ringpairs)
4625 continue;
4626
4627
4628 irq_set_affinity_notifier(irq_num, NULL);
4629
4630 irq_set_affinity_hint(irq_num, NULL);
4631 synchronize_irq(irq_num);
4632 free_irq(irq_num, vsi->q_vectors[i]);
4633
4634
4635
4636
4637
4638
4639
4640
4641 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4642 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4643 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4644 val |= I40E_QUEUE_END_OF_LIST
4645 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4646 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4647
4648 while (qp != I40E_QUEUE_END_OF_LIST) {
4649 u32 next;
4650
4651 val = rd32(hw, I40E_QINT_RQCTL(qp));
4652
4653 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4654 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4655 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4656 I40E_QINT_RQCTL_INTEVENT_MASK);
4657
4658 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4659 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4660
4661 wr32(hw, I40E_QINT_RQCTL(qp), val);
4662
4663 val = rd32(hw, I40E_QINT_TQCTL(qp));
4664
4665 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4666 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4667
4668 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4669 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4670 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4671 I40E_QINT_TQCTL_INTEVENT_MASK);
4672
4673 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4674 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4675
4676 wr32(hw, I40E_QINT_TQCTL(qp), val);
4677 qp = next;
4678 }
4679 }
4680 } else {
4681 free_irq(pf->pdev->irq, pf);
4682
4683 val = rd32(hw, I40E_PFINT_LNKLST0);
4684 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4685 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4686 val |= I40E_QUEUE_END_OF_LIST
4687 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4688 wr32(hw, I40E_PFINT_LNKLST0, val);
4689
4690 val = rd32(hw, I40E_QINT_RQCTL(qp));
4691 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4692 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4693 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4694 I40E_QINT_RQCTL_INTEVENT_MASK);
4695
4696 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4697 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4698
4699 wr32(hw, I40E_QINT_RQCTL(qp), val);
4700
4701 val = rd32(hw, I40E_QINT_TQCTL(qp));
4702
4703 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4704 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4705 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4706 I40E_QINT_TQCTL_INTEVENT_MASK);
4707
4708 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4709 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4710
4711 wr32(hw, I40E_QINT_TQCTL(qp), val);
4712 }
4713 }
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4725 {
4726 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4727 struct i40e_ring *ring;
4728
4729 if (!q_vector)
4730 return;
4731
4732
4733 i40e_for_each_ring(ring, q_vector->tx)
4734 ring->q_vector = NULL;
4735
4736 i40e_for_each_ring(ring, q_vector->rx)
4737 ring->q_vector = NULL;
4738
4739
4740 if (vsi->netdev)
4741 netif_napi_del(&q_vector->napi);
4742
4743 vsi->q_vectors[v_idx] = NULL;
4744
4745 kfree_rcu(q_vector, rcu);
4746 }
4747
4748
4749
4750
4751
4752
4753
4754
4755 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4756 {
4757 int v_idx;
4758
4759 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4760 i40e_free_q_vector(vsi, v_idx);
4761 }
4762
4763
4764
4765
4766
4767 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4768 {
4769
4770 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4771 pci_disable_msix(pf->pdev);
4772 kfree(pf->msix_entries);
4773 pf->msix_entries = NULL;
4774 kfree(pf->irq_pile);
4775 pf->irq_pile = NULL;
4776 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4777 pci_disable_msi(pf->pdev);
4778 }
4779 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4780 }
4781
4782
4783
4784
4785
4786
4787
4788
4789 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4790 {
4791 int i;
4792
4793 i40e_free_misc_vector(pf);
4794
4795 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4796 I40E_IWARP_IRQ_PILE_ID);
4797
4798 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4799 for (i = 0; i < pf->num_alloc_vsi; i++)
4800 if (pf->vsi[i])
4801 i40e_vsi_free_q_vectors(pf->vsi[i]);
4802 i40e_reset_interrupt_capability(pf);
4803 }
4804
4805
4806
4807
4808
4809 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4810 {
4811 int q_idx;
4812
4813 if (!vsi->netdev)
4814 return;
4815
4816 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4817 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4818
4819 if (q_vector->rx.ring || q_vector->tx.ring)
4820 napi_enable(&q_vector->napi);
4821 }
4822 }
4823
4824
4825
4826
4827
4828 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4829 {
4830 int q_idx;
4831
4832 if (!vsi->netdev)
4833 return;
4834
4835 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4836 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4837
4838 if (q_vector->rx.ring || q_vector->tx.ring)
4839 napi_disable(&q_vector->napi);
4840 }
4841 }
4842
4843
4844
4845
4846
4847 static void i40e_vsi_close(struct i40e_vsi *vsi)
4848 {
4849 struct i40e_pf *pf = vsi->back;
4850 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4851 i40e_down(vsi);
4852 i40e_vsi_free_irq(vsi);
4853 i40e_vsi_free_tx_resources(vsi);
4854 i40e_vsi_free_rx_resources(vsi);
4855 vsi->current_netdev_flags = 0;
4856 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4857 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4858 set_bit(__I40E_CLIENT_RESET, pf->state);
4859 }
4860
4861
4862
4863
4864
4865 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4866 {
4867 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4868 return;
4869
4870 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4871 if (vsi->netdev && netif_running(vsi->netdev))
4872 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4873 else
4874 i40e_vsi_close(vsi);
4875 }
4876
4877
4878
4879
4880
4881 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4882 {
4883 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4884 return;
4885
4886 if (vsi->netdev && netif_running(vsi->netdev))
4887 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4888 else
4889 i40e_vsi_open(vsi);
4890 }
4891
4892
4893
4894
4895
4896 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4897 {
4898 int v;
4899
4900 for (v = 0; v < pf->num_alloc_vsi; v++) {
4901 if (pf->vsi[v])
4902 i40e_quiesce_vsi(pf->vsi[v]);
4903 }
4904 }
4905
4906
4907
4908
4909
4910 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4911 {
4912 int v;
4913
4914 for (v = 0; v < pf->num_alloc_vsi; v++) {
4915 if (pf->vsi[v])
4916 i40e_unquiesce_vsi(pf->vsi[v]);
4917 }
4918 }
4919
4920
4921
4922
4923
4924
4925
4926 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4927 {
4928 struct i40e_pf *pf = vsi->back;
4929 int i, pf_q, ret;
4930
4931 pf_q = vsi->base_queue;
4932 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4933
4934 ret = i40e_pf_txq_wait(pf, pf_q, false);
4935 if (ret) {
4936 dev_info(&pf->pdev->dev,
4937 "VSI seid %d Tx ring %d disable timeout\n",
4938 vsi->seid, pf_q);
4939 return ret;
4940 }
4941
4942 if (!i40e_enabled_xdp_vsi(vsi))
4943 goto wait_rx;
4944
4945
4946 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4947 false);
4948 if (ret) {
4949 dev_info(&pf->pdev->dev,
4950 "VSI seid %d XDP Tx ring %d disable timeout\n",
4951 vsi->seid, pf_q);
4952 return ret;
4953 }
4954 wait_rx:
4955
4956 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4957 if (ret) {
4958 dev_info(&pf->pdev->dev,
4959 "VSI seid %d Rx ring %d disable timeout\n",
4960 vsi->seid, pf_q);
4961 return ret;
4962 }
4963 }
4964
4965 return 0;
4966 }
4967
4968 #ifdef CONFIG_I40E_DCB
4969
4970
4971
4972
4973
4974
4975
4976 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4977 {
4978 int v, ret = 0;
4979
4980 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4981 if (pf->vsi[v]) {
4982 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4983 if (ret)
4984 break;
4985 }
4986 }
4987
4988 return ret;
4989 }
4990
4991 #endif
4992
4993
4994
4995
4996
4997
4998
4999
5000 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5001 {
5002 struct i40e_dcb_app_priority_table app;
5003 struct i40e_hw *hw = &pf->hw;
5004 u8 enabled_tc = 1;
5005 u8 tc, i;
5006
5007 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5008
5009 for (i = 0; i < dcbcfg->numapps; i++) {
5010 app = dcbcfg->app[i];
5011 if (app.selector == I40E_APP_SEL_TCPIP &&
5012 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5013 tc = dcbcfg->etscfg.prioritytable[app.priority];
5014 enabled_tc |= BIT(tc);
5015 break;
5016 }
5017 }
5018
5019 return enabled_tc;
5020 }
5021
5022
5023
5024
5025
5026
5027
5028 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5029 {
5030 int i, tc_unused = 0;
5031 u8 num_tc = 0;
5032 u8 ret = 0;
5033
5034
5035
5036
5037
5038 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5039 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5040
5041
5042
5043
5044 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5045 if (num_tc & BIT(i)) {
5046 if (!tc_unused) {
5047 ret++;
5048 } else {
5049 pr_err("Non-contiguous TC - Disabling DCB\n");
5050 return 1;
5051 }
5052 } else {
5053 tc_unused = 1;
5054 }
5055 }
5056
5057
5058 if (!ret)
5059 ret = 1;
5060
5061 return ret;
5062 }
5063
5064
5065
5066
5067
5068
5069
5070
5071 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5072 {
5073 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5074 u8 enabled_tc = 1;
5075 u8 i;
5076
5077 for (i = 0; i < num_tc; i++)
5078 enabled_tc |= BIT(i);
5079
5080 return enabled_tc;
5081 }
5082
5083
5084
5085
5086
5087
5088
5089
5090 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5091 {
5092 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5093 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5094 u8 enabled_tc = 1, i;
5095
5096 for (i = 1; i < num_tc; i++)
5097 enabled_tc |= BIT(i);
5098 return enabled_tc;
5099 }
5100
5101
5102
5103
5104
5105
5106
5107 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5108 {
5109 struct i40e_hw *hw = &pf->hw;
5110 u8 i, enabled_tc = 1;
5111 u8 num_tc = 0;
5112 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5113
5114 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5115 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5116
5117
5118 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5119 return 1;
5120
5121
5122 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5123 return i40e_dcb_get_num_tc(dcbcfg);
5124
5125
5126 if (pf->hw.func_caps.iscsi)
5127 enabled_tc = i40e_get_iscsi_tc_map(pf);
5128 else
5129 return 1;
5130
5131 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5132 if (enabled_tc & BIT(i))
5133 num_tc++;
5134 }
5135 return num_tc;
5136 }
5137
5138
5139
5140
5141
5142
5143
5144 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5145 {
5146 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5147 return i40e_mqprio_get_enabled_tc(pf);
5148
5149
5150
5151
5152 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5153 return I40E_DEFAULT_TRAFFIC_CLASS;
5154
5155
5156 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5157 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5158
5159
5160 if (pf->hw.func_caps.iscsi)
5161 return i40e_get_iscsi_tc_map(pf);
5162 else
5163 return I40E_DEFAULT_TRAFFIC_CLASS;
5164 }
5165
5166
5167
5168
5169
5170
5171
5172 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5173 {
5174 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5175 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5176 struct i40e_pf *pf = vsi->back;
5177 struct i40e_hw *hw = &pf->hw;
5178 i40e_status ret;
5179 u32 tc_bw_max;
5180 int i;
5181
5182
5183 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5184 if (ret) {
5185 dev_info(&pf->pdev->dev,
5186 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5187 i40e_stat_str(&pf->hw, ret),
5188 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5189 return -EINVAL;
5190 }
5191
5192
5193 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5194 NULL);
5195 if (ret) {
5196 dev_info(&pf->pdev->dev,
5197 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5198 i40e_stat_str(&pf->hw, ret),
5199 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5200 return -EINVAL;
5201 }
5202
5203 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5204 dev_info(&pf->pdev->dev,
5205 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5206 bw_config.tc_valid_bits,
5207 bw_ets_config.tc_valid_bits);
5208
5209 }
5210
5211 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5212 vsi->bw_max_quanta = bw_config.max_bw;
5213 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5214 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5215 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5216 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5217 vsi->bw_ets_limit_credits[i] =
5218 le16_to_cpu(bw_ets_config.credits[i]);
5219
5220 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5221 }
5222
5223 return 0;
5224 }
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5235 u8 *bw_share)
5236 {
5237 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5238 struct i40e_pf *pf = vsi->back;
5239 i40e_status ret;
5240 int i;
5241
5242
5243 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5244 return 0;
5245 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5246 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5247 if (ret)
5248 dev_info(&pf->pdev->dev,
5249 "Failed to reset tx rate for vsi->seid %u\n",
5250 vsi->seid);
5251 return ret;
5252 }
5253 bw_data.tc_valid_bits = enabled_tc;
5254 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5255 bw_data.tc_bw_credits[i] = bw_share[i];
5256
5257 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5258 if (ret) {
5259 dev_info(&pf->pdev->dev,
5260 "AQ command Config VSI BW allocation per TC failed = %d\n",
5261 pf->hw.aq.asq_last_status);
5262 return -EINVAL;
5263 }
5264
5265 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5266 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5267
5268 return 0;
5269 }
5270
5271
5272
5273
5274
5275
5276
5277 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5278 {
5279 struct net_device *netdev = vsi->netdev;
5280 struct i40e_pf *pf = vsi->back;
5281 struct i40e_hw *hw = &pf->hw;
5282 u8 netdev_tc = 0;
5283 int i;
5284 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5285
5286 if (!netdev)
5287 return;
5288
5289 if (!enabled_tc) {
5290 netdev_reset_tc(netdev);
5291 return;
5292 }
5293
5294
5295 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5296 return;
5297
5298
5299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5300
5301
5302
5303
5304
5305
5306
5307 if (vsi->tc_config.enabled_tc & BIT(i))
5308 netdev_set_tc_queue(netdev,
5309 vsi->tc_config.tc_info[i].netdev_tc,
5310 vsi->tc_config.tc_info[i].qcount,
5311 vsi->tc_config.tc_info[i].qoffset);
5312 }
5313
5314 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5315 return;
5316
5317
5318 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5319
5320 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5321
5322 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5323 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5324 }
5325 }
5326
5327
5328
5329
5330
5331
5332 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5333 struct i40e_vsi_context *ctxt)
5334 {
5335
5336
5337
5338
5339 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5340 memcpy(&vsi->info.queue_mapping,
5341 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5342 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5343 sizeof(vsi->info.tc_mapping));
5344 }
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5360 {
5361 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5362 struct i40e_pf *pf = vsi->back;
5363 struct i40e_hw *hw = &pf->hw;
5364 struct i40e_vsi_context ctxt;
5365 int ret = 0;
5366 int i;
5367
5368
5369 if (vsi->tc_config.enabled_tc == enabled_tc &&
5370 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5371 return ret;
5372
5373
5374 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5375 if (enabled_tc & BIT(i))
5376 bw_share[i] = 1;
5377 }
5378
5379 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5380 if (ret) {
5381 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5382
5383 dev_info(&pf->pdev->dev,
5384 "Failed configuring TC map %d for VSI %d\n",
5385 enabled_tc, vsi->seid);
5386 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5387 &bw_config, NULL);
5388 if (ret) {
5389 dev_info(&pf->pdev->dev,
5390 "Failed querying vsi bw info, err %s aq_err %s\n",
5391 i40e_stat_str(hw, ret),
5392 i40e_aq_str(hw, hw->aq.asq_last_status));
5393 goto out;
5394 }
5395 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5396 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5397
5398 if (!valid_tc)
5399 valid_tc = bw_config.tc_valid_bits;
5400
5401 valid_tc |= 1;
5402 dev_info(&pf->pdev->dev,
5403 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5404 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5405 enabled_tc = valid_tc;
5406 }
5407
5408 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5409 if (ret) {
5410 dev_err(&pf->pdev->dev,
5411 "Unable to configure TC map %d for VSI %d\n",
5412 enabled_tc, vsi->seid);
5413 goto out;
5414 }
5415 }
5416
5417
5418 ctxt.seid = vsi->seid;
5419 ctxt.pf_num = vsi->back->hw.pf_id;
5420 ctxt.vf_num = 0;
5421 ctxt.uplink_seid = vsi->uplink_seid;
5422 ctxt.info = vsi->info;
5423 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5424 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5425 if (ret)
5426 goto out;
5427 } else {
5428 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5429 }
5430
5431
5432
5433
5434 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5435 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5436 vsi->num_queue_pairs);
5437 ret = i40e_vsi_config_rss(vsi);
5438 if (ret) {
5439 dev_info(&vsi->back->pdev->dev,
5440 "Failed to reconfig rss for num_queues\n");
5441 return ret;
5442 }
5443 vsi->reconfig_rss = false;
5444 }
5445 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5446 ctxt.info.valid_sections |=
5447 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5448 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5449 }
5450
5451
5452
5453
5454 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5455 if (ret) {
5456 dev_info(&pf->pdev->dev,
5457 "Update vsi tc config failed, err %s aq_err %s\n",
5458 i40e_stat_str(hw, ret),
5459 i40e_aq_str(hw, hw->aq.asq_last_status));
5460 goto out;
5461 }
5462
5463 i40e_vsi_update_queue_map(vsi, &ctxt);
5464 vsi->info.valid_sections = 0;
5465
5466
5467 ret = i40e_vsi_get_bw_info(vsi);
5468 if (ret) {
5469 dev_info(&pf->pdev->dev,
5470 "Failed updating vsi bw info, err %s aq_err %s\n",
5471 i40e_stat_str(hw, ret),
5472 i40e_aq_str(hw, hw->aq.asq_last_status));
5473 goto out;
5474 }
5475
5476
5477 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5478 out:
5479 return ret;
5480 }
5481
5482
5483
5484
5485
5486
5487 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5488 {
5489 struct i40e_pf *pf = vsi->back;
5490
5491 switch (pf->hw.phy.link_info.link_speed) {
5492 case I40E_LINK_SPEED_40GB:
5493 return 40000;
5494 case I40E_LINK_SPEED_25GB:
5495 return 25000;
5496 case I40E_LINK_SPEED_20GB:
5497 return 20000;
5498 case I40E_LINK_SPEED_10GB:
5499 return 10000;
5500 case I40E_LINK_SPEED_1GB:
5501 return 1000;
5502 default:
5503 return -EINVAL;
5504 }
5505 }
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5516 {
5517 struct i40e_pf *pf = vsi->back;
5518 u64 credits = 0;
5519 int speed = 0;
5520 int ret = 0;
5521
5522 speed = i40e_get_link_speed(vsi);
5523 if (max_tx_rate > speed) {
5524 dev_err(&pf->pdev->dev,
5525 "Invalid max tx rate %llu specified for VSI seid %d.",
5526 max_tx_rate, seid);
5527 return -EINVAL;
5528 }
5529 if (max_tx_rate && max_tx_rate < 50) {
5530 dev_warn(&pf->pdev->dev,
5531 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5532 max_tx_rate = 50;
5533 }
5534
5535
5536 credits = max_tx_rate;
5537 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5538 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5539 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5540 if (ret)
5541 dev_err(&pf->pdev->dev,
5542 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5543 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5544 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5545 return ret;
5546 }
5547
5548
5549
5550
5551
5552
5553
5554 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5555 {
5556 enum i40e_admin_queue_err last_aq_status;
5557 struct i40e_cloud_filter *cfilter;
5558 struct i40e_channel *ch, *ch_tmp;
5559 struct i40e_pf *pf = vsi->back;
5560 struct hlist_node *node;
5561 int ret, i;
5562
5563
5564
5565
5566 vsi->current_rss_size = 0;
5567
5568
5569 if (list_empty(&vsi->ch_list))
5570 return;
5571
5572 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5573 struct i40e_vsi *p_vsi;
5574
5575 list_del(&ch->list);
5576 p_vsi = ch->parent_vsi;
5577 if (!p_vsi || !ch->initialized) {
5578 kfree(ch);
5579 continue;
5580 }
5581
5582 for (i = 0; i < ch->num_queue_pairs; i++) {
5583 struct i40e_ring *tx_ring, *rx_ring;
5584 u16 pf_q;
5585
5586 pf_q = ch->base_queue + i;
5587 tx_ring = vsi->tx_rings[pf_q];
5588 tx_ring->ch = NULL;
5589
5590 rx_ring = vsi->rx_rings[pf_q];
5591 rx_ring->ch = NULL;
5592 }
5593
5594
5595 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5596 if (ret)
5597 dev_info(&vsi->back->pdev->dev,
5598 "Failed to reset tx rate for ch->seid %u\n",
5599 ch->seid);
5600
5601
5602 hlist_for_each_entry_safe(cfilter, node,
5603 &pf->cloud_filter_list, cloud_node) {
5604 if (cfilter->seid != ch->seid)
5605 continue;
5606
5607 hash_del(&cfilter->cloud_node);
5608 if (cfilter->dst_port)
5609 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5610 cfilter,
5611 false);
5612 else
5613 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5614 false);
5615 last_aq_status = pf->hw.aq.asq_last_status;
5616 if (ret)
5617 dev_info(&pf->pdev->dev,
5618 "Failed to delete cloud filter, err %s aq_err %s\n",
5619 i40e_stat_str(&pf->hw, ret),
5620 i40e_aq_str(&pf->hw, last_aq_status));
5621 kfree(cfilter);
5622 }
5623
5624
5625 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5626 NULL);
5627 if (ret)
5628 dev_err(&vsi->back->pdev->dev,
5629 "unable to remove channel (%d) for parent VSI(%d)\n",
5630 ch->seid, p_vsi->seid);
5631 kfree(ch);
5632 }
5633 INIT_LIST_HEAD(&vsi->ch_list);
5634 }
5635
5636
5637
5638
5639
5640
5641
5642 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5643 {
5644 struct i40e_channel *ch, *ch_tmp;
5645
5646 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5647 if (ch->initialized)
5648 return true;
5649 }
5650
5651 return false;
5652 }
5653
5654
5655
5656
5657
5658
5659
5660
5661 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5662 {
5663 struct i40e_channel *ch, *ch_tmp;
5664 int max = 0;
5665
5666 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5667 if (!ch->initialized)
5668 continue;
5669 if (ch->num_queue_pairs > max)
5670 max = ch->num_queue_pairs;
5671 }
5672
5673 return max;
5674 }
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5688 struct i40e_vsi *vsi, bool *reconfig_rss)
5689 {
5690 int max_ch_queues;
5691
5692 if (!reconfig_rss)
5693 return -EINVAL;
5694
5695 *reconfig_rss = false;
5696 if (vsi->current_rss_size) {
5697 if (num_queues > vsi->current_rss_size) {
5698 dev_dbg(&pf->pdev->dev,
5699 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5700 num_queues, vsi->current_rss_size);
5701 return -EINVAL;
5702 } else if ((num_queues < vsi->current_rss_size) &&
5703 (!is_power_of_2(num_queues))) {
5704 dev_dbg(&pf->pdev->dev,
5705 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5706 num_queues, vsi->current_rss_size);
5707 return -EINVAL;
5708 }
5709 }
5710
5711 if (!is_power_of_2(num_queues)) {
5712
5713
5714
5715
5716
5717 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5718 if (num_queues < max_ch_queues) {
5719 dev_dbg(&pf->pdev->dev,
5720 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5721 num_queues, max_ch_queues);
5722 return -EINVAL;
5723 }
5724 *reconfig_rss = true;
5725 }
5726
5727 return 0;
5728 }
5729
5730
5731
5732
5733
5734
5735
5736
5737 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5738 {
5739 struct i40e_pf *pf = vsi->back;
5740 u8 seed[I40E_HKEY_ARRAY_SIZE];
5741 struct i40e_hw *hw = &pf->hw;
5742 int local_rss_size;
5743 u8 *lut;
5744 int ret;
5745
5746 if (!vsi->rss_size)
5747 return -EINVAL;
5748
5749 if (rss_size > vsi->rss_size)
5750 return -EINVAL;
5751
5752 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5753 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5754 if (!lut)
5755 return -ENOMEM;
5756
5757
5758 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5759
5760
5761
5762
5763 if (vsi->rss_hkey_user)
5764 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5765 else
5766 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5767
5768 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5769 if (ret) {
5770 dev_info(&pf->pdev->dev,
5771 "Cannot set RSS lut, err %s aq_err %s\n",
5772 i40e_stat_str(hw, ret),
5773 i40e_aq_str(hw, hw->aq.asq_last_status));
5774 kfree(lut);
5775 return ret;
5776 }
5777 kfree(lut);
5778
5779
5780 if (!vsi->orig_rss_size)
5781 vsi->orig_rss_size = vsi->rss_size;
5782 vsi->current_rss_size = local_rss_size;
5783
5784 return ret;
5785 }
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5797 struct i40e_vsi_context *ctxt,
5798 struct i40e_channel *ch)
5799 {
5800 u16 qcount, qmap, sections = 0;
5801 u8 offset = 0;
5802 int pow;
5803
5804 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5805 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5806
5807 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5808 ch->num_queue_pairs = qcount;
5809
5810
5811 pow = ilog2(qcount);
5812 if (!is_power_of_2(qcount))
5813 pow++;
5814
5815 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5816 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5817
5818
5819 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5820
5821 ctxt->info.up_enable_bits = 0x1;
5822 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5823 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5824 ctxt->info.valid_sections |= cpu_to_le16(sections);
5825 }
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5836 struct i40e_channel *ch)
5837 {
5838 struct i40e_hw *hw = &pf->hw;
5839 struct i40e_vsi_context ctxt;
5840 u8 enabled_tc = 0x1;
5841 int ret;
5842
5843 if (ch->type != I40E_VSI_VMDQ2) {
5844 dev_info(&pf->pdev->dev,
5845 "add new vsi failed, ch->type %d\n", ch->type);
5846 return -EINVAL;
5847 }
5848
5849 memset(&ctxt, 0, sizeof(ctxt));
5850 ctxt.pf_num = hw->pf_id;
5851 ctxt.vf_num = 0;
5852 ctxt.uplink_seid = uplink_seid;
5853 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5854 if (ch->type == I40E_VSI_VMDQ2)
5855 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5856
5857 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5858 ctxt.info.valid_sections |=
5859 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5860 ctxt.info.switch_id =
5861 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5862 }
5863
5864
5865 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5866
5867
5868 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5869 if (ret) {
5870 dev_info(&pf->pdev->dev,
5871 "add new vsi failed, err %s aq_err %s\n",
5872 i40e_stat_str(&pf->hw, ret),
5873 i40e_aq_str(&pf->hw,
5874 pf->hw.aq.asq_last_status));
5875 return -ENOENT;
5876 }
5877
5878
5879
5880
5881 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
5882 ch->seid = ctxt.seid;
5883 ch->vsi_number = ctxt.vsi_number;
5884 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5885
5886
5887
5888
5889
5890 ch->info.mapping_flags = ctxt.info.mapping_flags;
5891 memcpy(&ch->info.queue_mapping,
5892 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5893 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5894 sizeof(ctxt.info.tc_mapping));
5895
5896 return 0;
5897 }
5898
5899 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5900 u8 *bw_share)
5901 {
5902 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5903 i40e_status ret;
5904 int i;
5905
5906 bw_data.tc_valid_bits = ch->enabled_tc;
5907 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5908 bw_data.tc_bw_credits[i] = bw_share[i];
5909
5910 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5911 &bw_data, NULL);
5912 if (ret) {
5913 dev_info(&vsi->back->pdev->dev,
5914 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5915 vsi->back->hw.aq.asq_last_status, ch->seid);
5916 return -EINVAL;
5917 }
5918
5919 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5920 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5921
5922 return 0;
5923 }
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5935 struct i40e_vsi *vsi,
5936 struct i40e_channel *ch)
5937 {
5938 i40e_status ret;
5939 int i;
5940 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5941
5942
5943 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5944 if (ch->enabled_tc & BIT(i))
5945 bw_share[i] = 1;
5946 }
5947
5948
5949 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5950 if (ret) {
5951 dev_info(&vsi->back->pdev->dev,
5952 "Failed configuring TC map %d for channel (seid %u)\n",
5953 ch->enabled_tc, ch->seid);
5954 return ret;
5955 }
5956
5957 for (i = 0; i < ch->num_queue_pairs; i++) {
5958 struct i40e_ring *tx_ring, *rx_ring;
5959 u16 pf_q;
5960
5961 pf_q = ch->base_queue + i;
5962
5963
5964
5965
5966 tx_ring = vsi->tx_rings[pf_q];
5967 tx_ring->ch = ch;
5968
5969
5970 rx_ring = vsi->rx_rings[pf_q];
5971 rx_ring->ch = ch;
5972 }
5973
5974 return 0;
5975 }
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5989 struct i40e_vsi *vsi,
5990 struct i40e_channel *ch,
5991 u16 uplink_seid, u8 type)
5992 {
5993 int ret;
5994
5995 ch->initialized = false;
5996 ch->base_queue = vsi->next_base_queue;
5997 ch->type = type;
5998
5999
6000 ret = i40e_add_channel(pf, uplink_seid, ch);
6001 if (ret) {
6002 dev_info(&pf->pdev->dev,
6003 "failed to add_channel using uplink_seid %u\n",
6004 uplink_seid);
6005 return ret;
6006 }
6007
6008
6009 ch->initialized = true;
6010
6011
6012 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6013 if (ret) {
6014 dev_info(&pf->pdev->dev,
6015 "failed to configure TX rings for channel %u\n",
6016 ch->seid);
6017 return ret;
6018 }
6019
6020
6021 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6022 dev_dbg(&pf->pdev->dev,
6023 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6024 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6025 ch->num_queue_pairs,
6026 vsi->next_base_queue);
6027 return ret;
6028 }
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6041 struct i40e_channel *ch)
6042 {
6043 u8 vsi_type;
6044 u16 seid;
6045 int ret;
6046
6047 if (vsi->type == I40E_VSI_MAIN) {
6048 vsi_type = I40E_VSI_VMDQ2;
6049 } else {
6050 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6051 vsi->type);
6052 return false;
6053 }
6054
6055
6056 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6057
6058
6059 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6060 if (ret) {
6061 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6062 return false;
6063 }
6064
6065 return ch->initialized ? true : false;
6066 }
6067
6068
6069
6070
6071
6072
6073
6074
6075 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6076 {
6077 u8 mode;
6078 struct i40e_pf *pf = vsi->back;
6079 struct i40e_hw *hw = &pf->hw;
6080 int ret;
6081
6082 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6083 if (ret)
6084 return -EINVAL;
6085
6086 if (hw->dev_caps.switch_mode) {
6087
6088
6089
6090 u32 switch_mode = hw->dev_caps.switch_mode &
6091 I40E_SWITCH_MODE_MASK;
6092 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6093 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6094 return 0;
6095 dev_err(&pf->pdev->dev,
6096 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6097 hw->dev_caps.switch_mode);
6098 return -EINVAL;
6099 }
6100 }
6101
6102
6103 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6104
6105
6106 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6107
6108
6109 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6110
6111
6112 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6113 pf->last_sw_conf_valid_flags,
6114 mode, NULL);
6115 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6116 dev_err(&pf->pdev->dev,
6117 "couldn't set switch config bits, err %s aq_err %s\n",
6118 i40e_stat_str(hw, ret),
6119 i40e_aq_str(hw,
6120 hw->aq.asq_last_status));
6121
6122 return ret;
6123 }
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6134 struct i40e_channel *ch)
6135 {
6136 struct i40e_pf *pf = vsi->back;
6137 bool reconfig_rss;
6138 int err;
6139
6140 if (!ch)
6141 return -EINVAL;
6142
6143 if (!ch->num_queue_pairs) {
6144 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6145 ch->num_queue_pairs);
6146 return -EINVAL;
6147 }
6148
6149
6150 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6151 &reconfig_rss);
6152 if (err) {
6153 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6154 ch->num_queue_pairs);
6155 return -EINVAL;
6156 }
6157
6158
6159
6160
6161 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6162 (!i40e_is_any_channel(vsi))) {
6163 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6164 dev_dbg(&pf->pdev->dev,
6165 "Failed to create channel. Override queues (%u) not power of 2\n",
6166 vsi->tc_config.tc_info[0].qcount);
6167 return -EINVAL;
6168 }
6169
6170 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6171 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6172
6173 if (vsi->type == I40E_VSI_MAIN) {
6174 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6175 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6176 true);
6177 else
6178 i40e_do_reset_safe(pf,
6179 I40E_PF_RESET_FLAG);
6180 }
6181 }
6182
6183
6184
6185 }
6186
6187
6188
6189
6190 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6191 dev_dbg(&pf->pdev->dev,
6192 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6193 vsi->cnt_q_avail, ch->num_queue_pairs);
6194 return -EINVAL;
6195 }
6196
6197
6198 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6199 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6200 if (err) {
6201 dev_info(&pf->pdev->dev,
6202 "Error: unable to reconfig rss for num_queues (%u)\n",
6203 ch->num_queue_pairs);
6204 return -EINVAL;
6205 }
6206 }
6207
6208 if (!i40e_setup_channel(pf, vsi, ch)) {
6209 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6210 return -EINVAL;
6211 }
6212
6213 dev_info(&pf->pdev->dev,
6214 "Setup channel (id:%u) utilizing num_queues %d\n",
6215 ch->seid, ch->num_queue_pairs);
6216
6217
6218 if (ch->max_tx_rate) {
6219 u64 credits = ch->max_tx_rate;
6220
6221 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6222 return -EINVAL;
6223
6224 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6225 dev_dbg(&pf->pdev->dev,
6226 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6227 ch->max_tx_rate,
6228 credits,
6229 ch->seid);
6230 }
6231
6232
6233 ch->parent_vsi = vsi;
6234
6235
6236 vsi->cnt_q_avail -= ch->num_queue_pairs;
6237
6238 return 0;
6239 }
6240
6241
6242
6243
6244
6245
6246
6247 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6248 {
6249 struct i40e_channel *ch;
6250 u64 max_rate = 0;
6251 int ret = 0, i;
6252
6253
6254 vsi->tc_seid_map[0] = vsi->seid;
6255 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6256 if (vsi->tc_config.enabled_tc & BIT(i)) {
6257 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6258 if (!ch) {
6259 ret = -ENOMEM;
6260 goto err_free;
6261 }
6262
6263 INIT_LIST_HEAD(&ch->list);
6264 ch->num_queue_pairs =
6265 vsi->tc_config.tc_info[i].qcount;
6266 ch->base_queue =
6267 vsi->tc_config.tc_info[i].qoffset;
6268
6269
6270
6271
6272 max_rate = vsi->mqprio_qopt.max_rate[i];
6273 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6274 ch->max_tx_rate = max_rate;
6275
6276 list_add_tail(&ch->list, &vsi->ch_list);
6277
6278 ret = i40e_create_queue_channel(vsi, ch);
6279 if (ret) {
6280 dev_err(&vsi->back->pdev->dev,
6281 "Failed creating queue channel with TC%d: queues %d\n",
6282 i, ch->num_queue_pairs);
6283 goto err_free;
6284 }
6285 vsi->tc_seid_map[i] = ch->seid;
6286 }
6287 }
6288 return ret;
6289
6290 err_free:
6291 i40e_remove_queue_channels(vsi);
6292 return ret;
6293 }
6294
6295
6296
6297
6298
6299
6300
6301
6302 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6303 {
6304 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6305 struct i40e_pf *pf = veb->pf;
6306 int ret = 0;
6307 int i;
6308
6309
6310 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6311 return ret;
6312
6313 bw_data.tc_valid_bits = enabled_tc;
6314
6315
6316
6317 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6318 if (enabled_tc & BIT(i))
6319 bw_data.tc_bw_share_credits[i] = 1;
6320 }
6321
6322 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6323 &bw_data, NULL);
6324 if (ret) {
6325 dev_info(&pf->pdev->dev,
6326 "VEB bw config failed, err %s aq_err %s\n",
6327 i40e_stat_str(&pf->hw, ret),
6328 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6329 goto out;
6330 }
6331
6332
6333 ret = i40e_veb_get_bw_info(veb);
6334 if (ret) {
6335 dev_info(&pf->pdev->dev,
6336 "Failed getting veb bw config, err %s aq_err %s\n",
6337 i40e_stat_str(&pf->hw, ret),
6338 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6339 }
6340
6341 out:
6342 return ret;
6343 }
6344
6345 #ifdef CONFIG_I40E_DCB
6346
6347
6348
6349
6350
6351
6352
6353
6354 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6355 {
6356 u8 tc_map = 0;
6357 int ret;
6358 u8 v;
6359
6360
6361 tc_map = i40e_pf_get_tc_map(pf);
6362 for (v = 0; v < I40E_MAX_VEB; v++) {
6363 if (!pf->veb[v])
6364 continue;
6365 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6366 if (ret) {
6367 dev_info(&pf->pdev->dev,
6368 "Failed configuring TC for VEB seid=%d\n",
6369 pf->veb[v]->seid);
6370
6371 }
6372 }
6373
6374
6375 for (v = 0; v < pf->num_alloc_vsi; v++) {
6376 if (!pf->vsi[v])
6377 continue;
6378
6379
6380
6381
6382 if (v == pf->lan_vsi)
6383 tc_map = i40e_pf_get_tc_map(pf);
6384 else
6385 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6386
6387 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6388 if (ret) {
6389 dev_info(&pf->pdev->dev,
6390 "Failed configuring TC for VSI seid=%d\n",
6391 pf->vsi[v]->seid);
6392
6393 } else {
6394
6395 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6396 if (pf->vsi[v]->netdev)
6397 i40e_dcbnl_set_all(pf->vsi[v]);
6398 }
6399 }
6400 }
6401
6402
6403
6404
6405
6406
6407
6408
6409 static int i40e_resume_port_tx(struct i40e_pf *pf)
6410 {
6411 struct i40e_hw *hw = &pf->hw;
6412 int ret;
6413
6414 ret = i40e_aq_resume_port_tx(hw, NULL);
6415 if (ret) {
6416 dev_info(&pf->pdev->dev,
6417 "Resume Port Tx failed, err %s aq_err %s\n",
6418 i40e_stat_str(&pf->hw, ret),
6419 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6420
6421 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6422 i40e_service_event_schedule(pf);
6423 }
6424
6425 return ret;
6426 }
6427
6428
6429
6430
6431
6432
6433
6434
6435 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6436 {
6437 struct i40e_hw *hw = &pf->hw;
6438 int err = 0;
6439
6440
6441
6442
6443 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6444 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
6445 dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
6446 err = I40E_NOT_SUPPORTED;
6447 goto out;
6448 }
6449
6450 err = i40e_init_dcb(hw, true);
6451 if (!err) {
6452
6453 if ((!hw->func_caps.dcb) ||
6454 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6455 dev_info(&pf->pdev->dev,
6456 "DCBX offload is not supported or is disabled for this PF.\n");
6457 } else {
6458
6459 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6460 DCB_CAP_DCBX_VER_IEEE;
6461
6462 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6463
6464
6465
6466 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6467 pf->flags |= I40E_FLAG_DCB_ENABLED;
6468 else
6469 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6470 dev_dbg(&pf->pdev->dev,
6471 "DCBX offload is supported for this PF.\n");
6472 }
6473 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6474 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6475 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6476 } else {
6477 dev_info(&pf->pdev->dev,
6478 "Query for DCB configuration failed, err %s aq_err %s\n",
6479 i40e_stat_str(&pf->hw, err),
6480 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6481 }
6482
6483 out:
6484 return err;
6485 }
6486 #endif
6487 #define SPEED_SIZE 14
6488 #define FC_SIZE 8
6489
6490
6491
6492
6493
6494 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6495 {
6496 enum i40e_aq_link_speed new_speed;
6497 struct i40e_pf *pf = vsi->back;
6498 char *speed = "Unknown";
6499 char *fc = "Unknown";
6500 char *fec = "";
6501 char *req_fec = "";
6502 char *an = "";
6503
6504 if (isup)
6505 new_speed = pf->hw.phy.link_info.link_speed;
6506 else
6507 new_speed = I40E_LINK_SPEED_UNKNOWN;
6508
6509 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6510 return;
6511 vsi->current_isup = isup;
6512 vsi->current_speed = new_speed;
6513 if (!isup) {
6514 netdev_info(vsi->netdev, "NIC Link is Down\n");
6515 return;
6516 }
6517
6518
6519
6520
6521 if (pf->hw.func_caps.npar_enable &&
6522 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6523 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6524 netdev_warn(vsi->netdev,
6525 "The partition detected link speed that is less than 10Gbps\n");
6526
6527 switch (pf->hw.phy.link_info.link_speed) {
6528 case I40E_LINK_SPEED_40GB:
6529 speed = "40 G";
6530 break;
6531 case I40E_LINK_SPEED_20GB:
6532 speed = "20 G";
6533 break;
6534 case I40E_LINK_SPEED_25GB:
6535 speed = "25 G";
6536 break;
6537 case I40E_LINK_SPEED_10GB:
6538 speed = "10 G";
6539 break;
6540 case I40E_LINK_SPEED_5GB:
6541 speed = "5 G";
6542 break;
6543 case I40E_LINK_SPEED_2_5GB:
6544 speed = "2.5 G";
6545 break;
6546 case I40E_LINK_SPEED_1GB:
6547 speed = "1000 M";
6548 break;
6549 case I40E_LINK_SPEED_100MB:
6550 speed = "100 M";
6551 break;
6552 default:
6553 break;
6554 }
6555
6556 switch (pf->hw.fc.current_mode) {
6557 case I40E_FC_FULL:
6558 fc = "RX/TX";
6559 break;
6560 case I40E_FC_TX_PAUSE:
6561 fc = "TX";
6562 break;
6563 case I40E_FC_RX_PAUSE:
6564 fc = "RX";
6565 break;
6566 default:
6567 fc = "None";
6568 break;
6569 }
6570
6571 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6572 req_fec = "None";
6573 fec = "None";
6574 an = "False";
6575
6576 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6577 an = "True";
6578
6579 if (pf->hw.phy.link_info.fec_info &
6580 I40E_AQ_CONFIG_FEC_KR_ENA)
6581 fec = "CL74 FC-FEC/BASE-R";
6582 else if (pf->hw.phy.link_info.fec_info &
6583 I40E_AQ_CONFIG_FEC_RS_ENA)
6584 fec = "CL108 RS-FEC";
6585
6586
6587
6588
6589 if (vsi->back->hw.phy.link_info.req_fec_info &
6590 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6591 if (vsi->back->hw.phy.link_info.req_fec_info &
6592 I40E_AQ_REQUEST_FEC_RS)
6593 req_fec = "CL108 RS-FEC";
6594 else
6595 req_fec = "CL74 FC-FEC/BASE-R";
6596 }
6597 netdev_info(vsi->netdev,
6598 "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
6599 speed, req_fec, fec, an, fc);
6600 } else {
6601 netdev_info(vsi->netdev,
6602 "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
6603 speed, fc);
6604 }
6605
6606 }
6607
6608
6609
6610
6611
6612 static int i40e_up_complete(struct i40e_vsi *vsi)
6613 {
6614 struct i40e_pf *pf = vsi->back;
6615 int err;
6616
6617 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6618 i40e_vsi_configure_msix(vsi);
6619 else
6620 i40e_configure_msi_and_legacy(vsi);
6621
6622
6623 err = i40e_vsi_start_rings(vsi);
6624 if (err)
6625 return err;
6626
6627 clear_bit(__I40E_VSI_DOWN, vsi->state);
6628 i40e_napi_enable_all(vsi);
6629 i40e_vsi_enable_irq(vsi);
6630
6631 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6632 (vsi->netdev)) {
6633 i40e_print_link_message(vsi, true);
6634 netif_tx_start_all_queues(vsi->netdev);
6635 netif_carrier_on(vsi->netdev);
6636 }
6637
6638
6639 if (vsi->type == I40E_VSI_FDIR) {
6640
6641 pf->fd_add_err = 0;
6642 pf->fd_atr_cnt = 0;
6643 i40e_fdir_filter_restore(vsi);
6644 }
6645
6646
6647
6648
6649 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6650 i40e_service_event_schedule(pf);
6651
6652 return 0;
6653 }
6654
6655
6656
6657
6658
6659
6660
6661
6662 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6663 {
6664 struct i40e_pf *pf = vsi->back;
6665
6666 WARN_ON(in_interrupt());
6667 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6668 usleep_range(1000, 2000);
6669 i40e_down(vsi);
6670
6671 i40e_up(vsi);
6672 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6673 }
6674
6675
6676
6677
6678
6679 int i40e_up(struct i40e_vsi *vsi)
6680 {
6681 int err;
6682
6683 err = i40e_vsi_configure(vsi);
6684 if (!err)
6685 err = i40e_up_complete(vsi);
6686
6687 return err;
6688 }
6689
6690
6691
6692
6693
6694
6695 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6696 {
6697 struct i40e_aq_get_phy_abilities_resp abilities;
6698 struct i40e_aq_set_phy_config config = {0};
6699 struct i40e_hw *hw = &pf->hw;
6700 i40e_status err;
6701 u64 mask;
6702 u8 speed;
6703
6704
6705
6706
6707
6708
6709
6710 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6711 NULL);
6712 if (err) {
6713 dev_err(&pf->pdev->dev,
6714 "failed to get phy cap., ret = %s last_status = %s\n",
6715 i40e_stat_str(hw, err),
6716 i40e_aq_str(hw, hw->aq.asq_last_status));
6717 return err;
6718 }
6719 speed = abilities.link_speed;
6720
6721
6722 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6723 NULL);
6724 if (err) {
6725 dev_err(&pf->pdev->dev,
6726 "failed to get phy cap., ret = %s last_status = %s\n",
6727 i40e_stat_str(hw, err),
6728 i40e_aq_str(hw, hw->aq.asq_last_status));
6729 return err;
6730 }
6731
6732
6733
6734
6735 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6736 return I40E_SUCCESS;
6737
6738
6739
6740
6741
6742 mask = I40E_PHY_TYPES_BITMASK;
6743 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6744 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6745
6746 config.abilities = abilities.abilities;
6747 if (abilities.link_speed != 0)
6748 config.link_speed = abilities.link_speed;
6749 else
6750 config.link_speed = speed;
6751 config.eee_capability = abilities.eee_capability;
6752 config.eeer = abilities.eeer_val;
6753 config.low_power_ctrl = abilities.d3_lpan;
6754 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6755 I40E_AQ_PHY_FEC_CONFIG_MASK;
6756 err = i40e_aq_set_phy_config(hw, &config, NULL);
6757
6758 if (err) {
6759 dev_err(&pf->pdev->dev,
6760 "set phy config ret = %s last_status = %s\n",
6761 i40e_stat_str(&pf->hw, err),
6762 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6763 return err;
6764 }
6765
6766
6767 err = i40e_update_link_info(hw);
6768 if (err) {
6769
6770
6771
6772
6773 msleep(1000);
6774 i40e_update_link_info(hw);
6775 }
6776
6777 i40e_aq_set_link_restart_an(hw, true, NULL);
6778
6779 return I40E_SUCCESS;
6780 }
6781
6782
6783
6784
6785
6786 void i40e_down(struct i40e_vsi *vsi)
6787 {
6788 int i;
6789
6790
6791
6792
6793 if (vsi->netdev) {
6794 netif_carrier_off(vsi->netdev);
6795 netif_tx_disable(vsi->netdev);
6796 }
6797 i40e_vsi_disable_irq(vsi);
6798 i40e_vsi_stop_rings(vsi);
6799 if (vsi->type == I40E_VSI_MAIN &&
6800 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6801 i40e_force_link_state(vsi->back, false);
6802 i40e_napi_disable_all(vsi);
6803
6804 for (i = 0; i < vsi->num_queue_pairs; i++) {
6805 i40e_clean_tx_ring(vsi->tx_rings[i]);
6806 if (i40e_enabled_xdp_vsi(vsi)) {
6807
6808
6809
6810 synchronize_rcu();
6811 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6812 }
6813 i40e_clean_rx_ring(vsi->rx_rings[i]);
6814 }
6815
6816 }
6817
6818
6819
6820
6821
6822
6823 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6824 struct tc_mqprio_qopt_offload *mqprio_qopt)
6825 {
6826 u64 sum_max_rate = 0;
6827 u64 max_rate = 0;
6828 int i;
6829
6830 if (mqprio_qopt->qopt.offset[0] != 0 ||
6831 mqprio_qopt->qopt.num_tc < 1 ||
6832 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6833 return -EINVAL;
6834 for (i = 0; ; i++) {
6835 if (!mqprio_qopt->qopt.count[i])
6836 return -EINVAL;
6837 if (mqprio_qopt->min_rate[i]) {
6838 dev_err(&vsi->back->pdev->dev,
6839 "Invalid min tx rate (greater than 0) specified\n");
6840 return -EINVAL;
6841 }
6842 max_rate = mqprio_qopt->max_rate[i];
6843 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6844 sum_max_rate += max_rate;
6845
6846 if (i >= mqprio_qopt->qopt.num_tc - 1)
6847 break;
6848 if (mqprio_qopt->qopt.offset[i + 1] !=
6849 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6850 return -EINVAL;
6851 }
6852 if (vsi->num_queue_pairs <
6853 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6854 return -EINVAL;
6855 }
6856 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6857 dev_err(&vsi->back->pdev->dev,
6858 "Invalid max tx rate specified\n");
6859 return -EINVAL;
6860 }
6861 return 0;
6862 }
6863
6864
6865
6866
6867
6868 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6869 {
6870 u16 qcount;
6871 int i;
6872
6873
6874 vsi->tc_config.numtc = 1;
6875 vsi->tc_config.enabled_tc = 1;
6876 qcount = min_t(int, vsi->alloc_queue_pairs,
6877 i40e_pf_get_max_q_per_tc(vsi->back));
6878 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6879
6880
6881
6882 vsi->tc_config.tc_info[i].qoffset = 0;
6883 if (i == 0)
6884 vsi->tc_config.tc_info[i].qcount = qcount;
6885 else
6886 vsi->tc_config.tc_info[i].qcount = 1;
6887 vsi->tc_config.tc_info[i].netdev_tc = 0;
6888 }
6889 }
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901 static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
6902 const u8 *macaddr, int *aq_err)
6903 {
6904 struct i40e_aqc_remove_macvlan_element_data element;
6905 i40e_status status;
6906
6907 memset(&element, 0, sizeof(element));
6908 ether_addr_copy(element.mac_addr, macaddr);
6909 element.vlan_tag = 0;
6910 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6911 status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL);
6912 *aq_err = hw->aq.asq_last_status;
6913
6914 return status;
6915 }
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927 static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
6928 const u8 *macaddr, int *aq_err)
6929 {
6930 struct i40e_aqc_add_macvlan_element_data element;
6931 i40e_status status;
6932 u16 cmd_flags = 0;
6933
6934 ether_addr_copy(element.mac_addr, macaddr);
6935 element.vlan_tag = 0;
6936 element.queue_number = 0;
6937 element.match_method = I40E_AQC_MM_ERR_NO_RES;
6938 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6939 element.flags = cpu_to_le16(cmd_flags);
6940 status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL);
6941 *aq_err = hw->aq.asq_last_status;
6942
6943 return status;
6944 }
6945
6946
6947
6948
6949
6950
6951 static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
6952 {
6953 struct i40e_ring *tx_ring, *rx_ring;
6954 u16 pf_q;
6955 int i;
6956
6957 for (i = 0; i < ch->num_queue_pairs; i++) {
6958 pf_q = ch->base_queue + i;
6959 tx_ring = vsi->tx_rings[pf_q];
6960 tx_ring->ch = NULL;
6961 rx_ring = vsi->rx_rings[pf_q];
6962 rx_ring->ch = NULL;
6963 }
6964 }
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974 static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
6975 {
6976 struct i40e_channel *ch, *ch_tmp;
6977 int ret;
6978
6979 if (list_empty(&vsi->macvlan_list))
6980 return;
6981
6982 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
6983 struct i40e_vsi *parent_vsi;
6984
6985 if (i40e_is_channel_macvlan(ch)) {
6986 i40e_reset_ch_rings(vsi, ch);
6987 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
6988 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev);
6989 netdev_set_sb_channel(ch->fwd->netdev, 0);
6990 kfree(ch->fwd);
6991 ch->fwd = NULL;
6992 }
6993
6994 list_del(&ch->list);
6995 parent_vsi = ch->parent_vsi;
6996 if (!parent_vsi || !ch->initialized) {
6997 kfree(ch);
6998 continue;
6999 }
7000
7001
7002 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
7003 NULL);
7004 if (ret)
7005 dev_err(&vsi->back->pdev->dev,
7006 "unable to remove channel (%d) for parent VSI(%d)\n",
7007 ch->seid, parent_vsi->seid);
7008 kfree(ch);
7009 }
7010 vsi->macvlan_cnt = 0;
7011 }
7012
7013
7014
7015
7016
7017
7018
7019 static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7020 struct i40e_fwd_adapter *fwd)
7021 {
7022 int ret = 0, num_tc = 1, i, aq_err;
7023 struct i40e_channel *ch, *ch_tmp;
7024 struct i40e_pf *pf = vsi->back;
7025 struct i40e_hw *hw = &pf->hw;
7026
7027 if (list_empty(&vsi->macvlan_list))
7028 return -EINVAL;
7029
7030
7031 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7032 if (!i40e_is_channel_macvlan(ch)) {
7033 ch->fwd = fwd;
7034
7035 for (i = 0; i < num_tc; i++)
7036 netdev_bind_sb_channel_queue(vsi->netdev, vdev,
7037 i,
7038 ch->num_queue_pairs,
7039 ch->base_queue);
7040 for (i = 0; i < ch->num_queue_pairs; i++) {
7041 struct i40e_ring *tx_ring, *rx_ring;
7042 u16 pf_q;
7043
7044 pf_q = ch->base_queue + i;
7045
7046
7047 tx_ring = vsi->tx_rings[pf_q];
7048 tx_ring->ch = ch;
7049
7050
7051 rx_ring = vsi->rx_rings[pf_q];
7052 rx_ring->ch = ch;
7053 }
7054 break;
7055 }
7056 }
7057
7058
7059
7060
7061 wmb();
7062
7063
7064 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err);
7065 if (ret) {
7066
7067 macvlan_release_l2fw_offload(vdev);
7068 for (i = 0; i < ch->num_queue_pairs; i++) {
7069 struct i40e_ring *rx_ring;
7070 u16 pf_q;
7071
7072 pf_q = ch->base_queue + i;
7073 rx_ring = vsi->rx_rings[pf_q];
7074 rx_ring->netdev = NULL;
7075 }
7076 dev_info(&pf->pdev->dev,
7077 "Error adding mac filter on macvlan err %s, aq_err %s\n",
7078 i40e_stat_str(hw, ret),
7079 i40e_aq_str(hw, aq_err));
7080 netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
7081 }
7082
7083 return ret;
7084 }
7085
7086
7087
7088
7089
7090
7091
7092
7093 static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7094 struct net_device *vdev)
7095 {
7096 struct i40e_pf *pf = vsi->back;
7097 struct i40e_hw *hw = &pf->hw;
7098 struct i40e_vsi_context ctxt;
7099 u16 sections, qmap, num_qps;
7100 struct i40e_channel *ch;
7101 int i, pow, ret = 0;
7102 u8 offset = 0;
7103
7104 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7105 return -EINVAL;
7106
7107 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7108
7109
7110 pow = fls(roundup_pow_of_two(num_qps) - 1);
7111
7112 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7113 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7114
7115
7116 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7117 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7118 memset(&ctxt, 0, sizeof(ctxt));
7119 ctxt.seid = vsi->seid;
7120 ctxt.pf_num = vsi->back->hw.pf_id;
7121 ctxt.vf_num = 0;
7122 ctxt.uplink_seid = vsi->uplink_seid;
7123 ctxt.info = vsi->info;
7124 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7125 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7126 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7127 ctxt.info.valid_sections |= cpu_to_le16(sections);
7128
7129
7130 vsi->rss_size = max_t(u16, num_qps, qcnt);
7131 ret = i40e_vsi_config_rss(vsi);
7132 if (ret) {
7133 dev_info(&pf->pdev->dev,
7134 "Failed to reconfig RSS for num_queues (%u)\n",
7135 vsi->rss_size);
7136 return ret;
7137 }
7138 vsi->reconfig_rss = true;
7139 dev_dbg(&vsi->back->pdev->dev,
7140 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7141 vsi->next_base_queue = num_qps;
7142 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7143
7144
7145
7146
7147 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7148 if (ret) {
7149 dev_info(&pf->pdev->dev,
7150 "Update vsi tc config failed, err %s aq_err %s\n",
7151 i40e_stat_str(hw, ret),
7152 i40e_aq_str(hw, hw->aq.asq_last_status));
7153 return ret;
7154 }
7155
7156 i40e_vsi_update_queue_map(vsi, &ctxt);
7157 vsi->info.valid_sections = 0;
7158
7159
7160 INIT_LIST_HEAD(&vsi->macvlan_list);
7161 for (i = 0; i < macvlan_cnt; i++) {
7162 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
7163 if (!ch) {
7164 ret = -ENOMEM;
7165 goto err_free;
7166 }
7167 INIT_LIST_HEAD(&ch->list);
7168 ch->num_queue_pairs = qcnt;
7169 if (!i40e_setup_channel(pf, vsi, ch)) {
7170 ret = -EINVAL;
7171 kfree(ch);
7172 goto err_free;
7173 }
7174 ch->parent_vsi = vsi;
7175 vsi->cnt_q_avail -= ch->num_queue_pairs;
7176 vsi->macvlan_cnt++;
7177 list_add_tail(&ch->list, &vsi->macvlan_list);
7178 }
7179
7180 return ret;
7181
7182 err_free:
7183 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7184 i40e_free_macvlan_channels(vsi);
7185
7186 return ret;
7187 }
7188
7189
7190
7191
7192
7193
7194 static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7195 {
7196 struct i40e_netdev_priv *np = netdev_priv(netdev);
7197 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7198 struct i40e_vsi *vsi = np->vsi;
7199 struct i40e_pf *pf = vsi->back;
7200 struct i40e_fwd_adapter *fwd;
7201 int avail_macvlan, ret;
7202
7203 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7204 netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
7205 return ERR_PTR(-EINVAL);
7206 }
7207 if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
7208 netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
7209 return ERR_PTR(-EINVAL);
7210 }
7211 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7212 netdev_info(netdev, "Not enough vectors available to support macvlans\n");
7213 return ERR_PTR(-EINVAL);
7214 }
7215
7216
7217
7218
7219 if (netif_is_multiqueue(vdev))
7220 return ERR_PTR(-ERANGE);
7221
7222 if (!vsi->macvlan_cnt) {
7223
7224 set_bit(0, vsi->fwd_bitmask);
7225
7226
7227
7228
7229
7230 vectors = pf->num_lan_msix;
7231 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
7232
7233 q_per_macvlan = 4;
7234 macvlan_cnt = (vectors - 32) / 4;
7235 } else if (vectors <= 64 && vectors > 32) {
7236
7237 q_per_macvlan = 2;
7238 macvlan_cnt = (vectors - 16) / 2;
7239 } else if (vectors <= 32 && vectors > 16) {
7240
7241 q_per_macvlan = 1;
7242 macvlan_cnt = vectors - 16;
7243 } else if (vectors <= 16 && vectors > 8) {
7244
7245 q_per_macvlan = 1;
7246 macvlan_cnt = vectors - 8;
7247 } else {
7248
7249 q_per_macvlan = 1;
7250 macvlan_cnt = vectors - 1;
7251 }
7252
7253 if (macvlan_cnt == 0)
7254 return ERR_PTR(-EBUSY);
7255
7256
7257 i40e_quiesce_vsi(vsi);
7258
7259
7260 ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan,
7261 vdev);
7262 if (ret)
7263 return ERR_PTR(ret);
7264
7265
7266 i40e_unquiesce_vsi(vsi);
7267 }
7268 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask,
7269 vsi->macvlan_cnt);
7270 if (avail_macvlan >= I40E_MAX_MACVLANS)
7271 return ERR_PTR(-EBUSY);
7272
7273
7274 fwd = kzalloc(sizeof(*fwd), GFP_KERNEL);
7275 if (!fwd)
7276 return ERR_PTR(-ENOMEM);
7277
7278 set_bit(avail_macvlan, vsi->fwd_bitmask);
7279 fwd->bit_no = avail_macvlan;
7280 netdev_set_sb_channel(vdev, avail_macvlan);
7281 fwd->netdev = vdev;
7282
7283 if (!netif_running(netdev))
7284 return fwd;
7285
7286
7287 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
7288 if (ret) {
7289
7290 netdev_unbind_sb_channel(netdev, vdev);
7291 netdev_set_sb_channel(vdev, 0);
7292
7293 kfree(fwd);
7294 return ERR_PTR(-EINVAL);
7295 }
7296
7297 return fwd;
7298 }
7299
7300
7301
7302
7303
7304 static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
7305 {
7306 struct i40e_channel *ch, *ch_tmp;
7307 struct i40e_pf *pf = vsi->back;
7308 struct i40e_hw *hw = &pf->hw;
7309 int aq_err, ret = 0;
7310
7311 if (list_empty(&vsi->macvlan_list))
7312 return;
7313
7314 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7315 if (i40e_is_channel_macvlan(ch)) {
7316 ret = i40e_del_macvlan_filter(hw, ch->seid,
7317 i40e_channel_mac(ch),
7318 &aq_err);
7319 if (!ret) {
7320
7321 i40e_reset_ch_rings(vsi, ch);
7322 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7323 netdev_unbind_sb_channel(vsi->netdev,
7324 ch->fwd->netdev);
7325 netdev_set_sb_channel(ch->fwd->netdev, 0);
7326 kfree(ch->fwd);
7327 ch->fwd = NULL;
7328 }
7329 }
7330 }
7331 }
7332
7333
7334
7335
7336
7337
7338 static void i40e_fwd_del(struct net_device *netdev, void *vdev)
7339 {
7340 struct i40e_netdev_priv *np = netdev_priv(netdev);
7341 struct i40e_fwd_adapter *fwd = vdev;
7342 struct i40e_channel *ch, *ch_tmp;
7343 struct i40e_vsi *vsi = np->vsi;
7344 struct i40e_pf *pf = vsi->back;
7345 struct i40e_hw *hw = &pf->hw;
7346 int aq_err, ret = 0;
7347
7348
7349 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7350 if (i40e_is_channel_macvlan(ch) &&
7351 ether_addr_equal(i40e_channel_mac(ch),
7352 fwd->netdev->dev_addr)) {
7353 ret = i40e_del_macvlan_filter(hw, ch->seid,
7354 i40e_channel_mac(ch),
7355 &aq_err);
7356 if (!ret) {
7357
7358 i40e_reset_ch_rings(vsi, ch);
7359 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
7360 netdev_unbind_sb_channel(netdev, fwd->netdev);
7361 netdev_set_sb_channel(fwd->netdev, 0);
7362 kfree(ch->fwd);
7363 ch->fwd = NULL;
7364 } else {
7365 dev_info(&pf->pdev->dev,
7366 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
7367 i40e_stat_str(hw, ret),
7368 i40e_aq_str(hw, aq_err));
7369 }
7370 break;
7371 }
7372 }
7373 }
7374
7375
7376
7377
7378
7379
7380 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
7381 {
7382 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
7383 struct i40e_netdev_priv *np = netdev_priv(netdev);
7384 struct i40e_vsi *vsi = np->vsi;
7385 struct i40e_pf *pf = vsi->back;
7386 u8 enabled_tc = 0, num_tc, hw;
7387 bool need_reset = false;
7388 int old_queue_pairs;
7389 int ret = -EINVAL;
7390 u16 mode;
7391 int i;
7392
7393 old_queue_pairs = vsi->num_queue_pairs;
7394 num_tc = mqprio_qopt->qopt.num_tc;
7395 hw = mqprio_qopt->qopt.hw;
7396 mode = mqprio_qopt->mode;
7397 if (!hw) {
7398 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7399 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
7400 goto config_tc;
7401 }
7402
7403
7404 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7405 netdev_info(netdev,
7406 "Configuring TC not supported in MFP mode\n");
7407 return ret;
7408 }
7409 switch (mode) {
7410 case TC_MQPRIO_MODE_DCB:
7411 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
7412
7413
7414 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7415 netdev_info(netdev,
7416 "DCB is not enabled for adapter\n");
7417 return ret;
7418 }
7419
7420
7421 if (num_tc > i40e_pf_get_num_tc(pf)) {
7422 netdev_info(netdev,
7423 "TC count greater than enabled on link for adapter\n");
7424 return ret;
7425 }
7426 break;
7427 case TC_MQPRIO_MODE_CHANNEL:
7428 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
7429 netdev_info(netdev,
7430 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
7431 return ret;
7432 }
7433 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7434 return ret;
7435 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
7436 if (ret)
7437 return ret;
7438 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
7439 sizeof(*mqprio_qopt));
7440 pf->flags |= I40E_FLAG_TC_MQPRIO;
7441 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7442 break;
7443 default:
7444 return -EINVAL;
7445 }
7446
7447 config_tc:
7448
7449 for (i = 0; i < num_tc; i++)
7450 enabled_tc |= BIT(i);
7451
7452
7453 if (enabled_tc == vsi->tc_config.enabled_tc &&
7454 mode != TC_MQPRIO_MODE_CHANNEL)
7455 return 0;
7456
7457
7458 i40e_quiesce_vsi(vsi);
7459
7460 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
7461 i40e_remove_queue_channels(vsi);
7462
7463
7464 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7465 if (ret) {
7466 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
7467 vsi->seid);
7468 need_reset = true;
7469 goto exit;
7470 } else {
7471 dev_info(&vsi->back->pdev->dev,
7472 "Setup channel (id:%u) utilizing num_queues %d\n",
7473 vsi->seid, vsi->tc_config.tc_info[0].qcount);
7474 }
7475
7476 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
7477 if (vsi->mqprio_qopt.max_rate[0]) {
7478 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
7479
7480 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
7481 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
7482 if (!ret) {
7483 u64 credits = max_tx_rate;
7484
7485 do_div(credits, I40E_BW_CREDIT_DIVISOR);
7486 dev_dbg(&vsi->back->pdev->dev,
7487 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
7488 max_tx_rate,
7489 credits,
7490 vsi->seid);
7491 } else {
7492 need_reset = true;
7493 goto exit;
7494 }
7495 }
7496 ret = i40e_configure_queue_channels(vsi);
7497 if (ret) {
7498 vsi->num_queue_pairs = old_queue_pairs;
7499 netdev_info(netdev,
7500 "Failed configuring queue channels\n");
7501 need_reset = true;
7502 goto exit;
7503 }
7504 }
7505
7506 exit:
7507
7508 if (need_reset) {
7509 i40e_vsi_set_default_tc_config(vsi);
7510 need_reset = false;
7511 }
7512
7513
7514 i40e_unquiesce_vsi(vsi);
7515 return ret;
7516 }
7517
7518
7519
7520
7521
7522
7523
7524
7525 static inline void
7526 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7527 struct i40e_aqc_cloud_filters_element_data *cld)
7528 {
7529 int i, j;
7530 u32 ipa;
7531
7532 memset(cld, 0, sizeof(*cld));
7533 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7534 ether_addr_copy(cld->inner_mac, filter->src_mac);
7535
7536 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7537 return;
7538
7539 if (filter->n_proto == ETH_P_IPV6) {
7540 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7541 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7542 i++, j += 2) {
7543 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7544 ipa = cpu_to_le32(ipa);
7545 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7546 }
7547 } else {
7548 ipa = be32_to_cpu(filter->dst_ipv4);
7549 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7550 }
7551
7552 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7553
7554
7555
7556
7557 if (filter->tenant_id)
7558 return;
7559 }
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569
7570 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7571 struct i40e_cloud_filter *filter, bool add)
7572 {
7573 struct i40e_aqc_cloud_filters_element_data cld_filter;
7574 struct i40e_pf *pf = vsi->back;
7575 int ret;
7576 static const u16 flag_table[128] = {
7577 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7578 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7579 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7580 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7581 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7582 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7583 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7584 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7585 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7586 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7587 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7588 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7589 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7590 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7591 };
7592
7593 if (filter->flags >= ARRAY_SIZE(flag_table))
7594 return I40E_ERR_CONFIG;
7595
7596
7597 i40e_set_cld_element(filter, &cld_filter);
7598
7599 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7600 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7601 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7602
7603 if (filter->n_proto == ETH_P_IPV6)
7604 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7605 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7606 else
7607 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7608 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7609
7610 if (add)
7611 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7612 &cld_filter, 1);
7613 else
7614 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7615 &cld_filter, 1);
7616 if (ret)
7617 dev_dbg(&pf->pdev->dev,
7618 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7619 add ? "add" : "delete", filter->dst_port, ret,
7620 pf->hw.aq.asq_last_status);
7621 else
7622 dev_info(&pf->pdev->dev,
7623 "%s cloud filter for VSI: %d\n",
7624 add ? "Added" : "Deleted", filter->seid);
7625 return ret;
7626 }
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7638 struct i40e_cloud_filter *filter,
7639 bool add)
7640 {
7641 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7642 struct i40e_pf *pf = vsi->back;
7643 int ret;
7644
7645
7646 if ((is_valid_ether_addr(filter->dst_mac) &&
7647 is_valid_ether_addr(filter->src_mac)) ||
7648 (is_multicast_ether_addr(filter->dst_mac) &&
7649 is_multicast_ether_addr(filter->src_mac)))
7650 return -EOPNOTSUPP;
7651
7652
7653
7654
7655 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7656 return -EOPNOTSUPP;
7657
7658
7659 if (filter->src_port || filter->src_ipv4 ||
7660 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7661 return -EOPNOTSUPP;
7662
7663
7664 i40e_set_cld_element(filter, &cld_filter.element);
7665
7666 if (is_valid_ether_addr(filter->dst_mac) ||
7667 is_valid_ether_addr(filter->src_mac) ||
7668 is_multicast_ether_addr(filter->dst_mac) ||
7669 is_multicast_ether_addr(filter->src_mac)) {
7670
7671 if (filter->dst_ipv4)
7672 return -EOPNOTSUPP;
7673
7674
7675
7676
7677
7678 cld_filter.element.flags =
7679 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7680
7681 if (filter->vlan_id) {
7682 cld_filter.element.flags =
7683 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7684 }
7685
7686 } else if (filter->dst_ipv4 ||
7687 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7688 cld_filter.element.flags =
7689 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7690 if (filter->n_proto == ETH_P_IPV6)
7691 cld_filter.element.flags |=
7692 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7693 else
7694 cld_filter.element.flags |=
7695 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7696 } else {
7697 dev_err(&pf->pdev->dev,
7698 "either mac or ip has to be valid for cloud filter\n");
7699 return -EINVAL;
7700 }
7701
7702
7703 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7704 be16_to_cpu(filter->dst_port);
7705
7706 if (add) {
7707
7708 ret = i40e_validate_and_set_switch_mode(vsi);
7709 if (ret) {
7710 dev_err(&pf->pdev->dev,
7711 "failed to set switch mode, ret %d\n",
7712 ret);
7713 return ret;
7714 }
7715
7716 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7717 &cld_filter, 1);
7718 } else {
7719 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7720 &cld_filter, 1);
7721 }
7722
7723 if (ret)
7724 dev_dbg(&pf->pdev->dev,
7725 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7726 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7727 else
7728 dev_info(&pf->pdev->dev,
7729 "%s cloud filter for VSI: %d, L4 port: %d\n",
7730 add ? "add" : "delete", filter->seid,
7731 ntohs(filter->dst_port));
7732 return ret;
7733 }
7734
7735
7736
7737
7738
7739
7740
7741
7742 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7743 struct flow_cls_offload *f,
7744 struct i40e_cloud_filter *filter)
7745 {
7746 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
7747 struct flow_dissector *dissector = rule->match.dissector;
7748 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7749 struct i40e_pf *pf = vsi->back;
7750 u8 field_flags = 0;
7751
7752 if (dissector->used_keys &
7753 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7754 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7755 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7756 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7757 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7758 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7759 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7760 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7761 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7762 dissector->used_keys);
7763 return -EOPNOTSUPP;
7764 }
7765
7766 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7767 struct flow_match_enc_keyid match;
7768
7769 flow_rule_match_enc_keyid(rule, &match);
7770 if (match.mask->keyid != 0)
7771 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7772
7773 filter->tenant_id = be32_to_cpu(match.key->keyid);
7774 }
7775
7776 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7777 struct flow_match_basic match;
7778
7779 flow_rule_match_basic(rule, &match);
7780 n_proto_key = ntohs(match.key->n_proto);
7781 n_proto_mask = ntohs(match.mask->n_proto);
7782
7783 if (n_proto_key == ETH_P_ALL) {
7784 n_proto_key = 0;
7785 n_proto_mask = 0;
7786 }
7787 filter->n_proto = n_proto_key & n_proto_mask;
7788 filter->ip_proto = match.key->ip_proto;
7789 }
7790
7791 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7792 struct flow_match_eth_addrs match;
7793
7794 flow_rule_match_eth_addrs(rule, &match);
7795
7796
7797 if (!is_zero_ether_addr(match.mask->dst)) {
7798 if (is_broadcast_ether_addr(match.mask->dst)) {
7799 field_flags |= I40E_CLOUD_FIELD_OMAC;
7800 } else {
7801 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7802 match.mask->dst);
7803 return I40E_ERR_CONFIG;
7804 }
7805 }
7806
7807 if (!is_zero_ether_addr(match.mask->src)) {
7808 if (is_broadcast_ether_addr(match.mask->src)) {
7809 field_flags |= I40E_CLOUD_FIELD_IMAC;
7810 } else {
7811 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7812 match.mask->src);
7813 return I40E_ERR_CONFIG;
7814 }
7815 }
7816 ether_addr_copy(filter->dst_mac, match.key->dst);
7817 ether_addr_copy(filter->src_mac, match.key->src);
7818 }
7819
7820 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7821 struct flow_match_vlan match;
7822
7823 flow_rule_match_vlan(rule, &match);
7824 if (match.mask->vlan_id) {
7825 if (match.mask->vlan_id == VLAN_VID_MASK) {
7826 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7827
7828 } else {
7829 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7830 match.mask->vlan_id);
7831 return I40E_ERR_CONFIG;
7832 }
7833 }
7834
7835 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7836 }
7837
7838 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7839 struct flow_match_control match;
7840
7841 flow_rule_match_control(rule, &match);
7842 addr_type = match.key->addr_type;
7843 }
7844
7845 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7846 struct flow_match_ipv4_addrs match;
7847
7848 flow_rule_match_ipv4_addrs(rule, &match);
7849 if (match.mask->dst) {
7850 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7851 field_flags |= I40E_CLOUD_FIELD_IIP;
7852 } else {
7853 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7854 &match.mask->dst);
7855 return I40E_ERR_CONFIG;
7856 }
7857 }
7858
7859 if (match.mask->src) {
7860 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7861 field_flags |= I40E_CLOUD_FIELD_IIP;
7862 } else {
7863 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7864 &match.mask->src);
7865 return I40E_ERR_CONFIG;
7866 }
7867 }
7868
7869 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7870 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7871 return I40E_ERR_CONFIG;
7872 }
7873 filter->dst_ipv4 = match.key->dst;
7874 filter->src_ipv4 = match.key->src;
7875 }
7876
7877 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7878 struct flow_match_ipv6_addrs match;
7879
7880 flow_rule_match_ipv6_addrs(rule, &match);
7881
7882
7883
7884
7885 if (ipv6_addr_loopback(&match.key->dst) ||
7886 ipv6_addr_loopback(&match.key->src)) {
7887 dev_err(&pf->pdev->dev,
7888 "Bad ipv6, addr is LOOPBACK\n");
7889 return I40E_ERR_CONFIG;
7890 }
7891 if (!ipv6_addr_any(&match.mask->dst) ||
7892 !ipv6_addr_any(&match.mask->src))
7893 field_flags |= I40E_CLOUD_FIELD_IIP;
7894
7895 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7896 sizeof(filter->src_ipv6));
7897 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7898 sizeof(filter->dst_ipv6));
7899 }
7900
7901 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7902 struct flow_match_ports match;
7903
7904 flow_rule_match_ports(rule, &match);
7905 if (match.mask->src) {
7906 if (match.mask->src == cpu_to_be16(0xffff)) {
7907 field_flags |= I40E_CLOUD_FIELD_IIP;
7908 } else {
7909 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7910 be16_to_cpu(match.mask->src));
7911 return I40E_ERR_CONFIG;
7912 }
7913 }
7914
7915 if (match.mask->dst) {
7916 if (match.mask->dst == cpu_to_be16(0xffff)) {
7917 field_flags |= I40E_CLOUD_FIELD_IIP;
7918 } else {
7919 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7920 be16_to_cpu(match.mask->dst));
7921 return I40E_ERR_CONFIG;
7922 }
7923 }
7924
7925 filter->dst_port = match.key->dst;
7926 filter->src_port = match.key->src;
7927
7928 switch (filter->ip_proto) {
7929 case IPPROTO_TCP:
7930 case IPPROTO_UDP:
7931 break;
7932 default:
7933 dev_err(&pf->pdev->dev,
7934 "Only UDP and TCP transport are supported\n");
7935 return -EINVAL;
7936 }
7937 }
7938 filter->flags = field_flags;
7939 return 0;
7940 }
7941
7942
7943
7944
7945
7946
7947
7948
7949 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7950 struct i40e_cloud_filter *filter)
7951 {
7952 struct i40e_channel *ch, *ch_tmp;
7953
7954
7955 if (tc == 0) {
7956 filter->seid = vsi->seid;
7957 return 0;
7958 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7959 if (!filter->dst_port) {
7960 dev_err(&vsi->back->pdev->dev,
7961 "Specify destination port to direct to traffic class that is not default\n");
7962 return -EINVAL;
7963 }
7964 if (list_empty(&vsi->ch_list))
7965 return -EINVAL;
7966 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7967 list) {
7968 if (ch->seid == vsi->tc_seid_map[tc])
7969 filter->seid = ch->seid;
7970 }
7971 return 0;
7972 }
7973 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7974 return -EINVAL;
7975 }
7976
7977
7978
7979
7980
7981
7982
7983 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7984 struct flow_cls_offload *cls_flower)
7985 {
7986 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7987 struct i40e_cloud_filter *filter = NULL;
7988 struct i40e_pf *pf = vsi->back;
7989 int err = 0;
7990
7991 if (tc < 0) {
7992 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7993 return -EOPNOTSUPP;
7994 }
7995
7996 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7997 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7998 return -EBUSY;
7999
8000 if (pf->fdir_pf_active_filters ||
8001 (!hlist_empty(&pf->fdir_filter_list))) {
8002 dev_err(&vsi->back->pdev->dev,
8003 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8004 return -EINVAL;
8005 }
8006
8007 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8008 dev_err(&vsi->back->pdev->dev,
8009 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8010 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8011 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8012 }
8013
8014 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
8015 if (!filter)
8016 return -ENOMEM;
8017
8018 filter->cookie = cls_flower->cookie;
8019
8020 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
8021 if (err < 0)
8022 goto err;
8023
8024 err = i40e_handle_tclass(vsi, tc, filter);
8025 if (err < 0)
8026 goto err;
8027
8028
8029 if (filter->dst_port)
8030 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
8031 else
8032 err = i40e_add_del_cloud_filter(vsi, filter, true);
8033
8034 if (err) {
8035 dev_err(&pf->pdev->dev,
8036 "Failed to add cloud filter, err %s\n",
8037 i40e_stat_str(&pf->hw, err));
8038 goto err;
8039 }
8040
8041
8042 INIT_HLIST_NODE(&filter->cloud_node);
8043
8044 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
8045
8046 pf->num_cloud_filters++;
8047
8048 return err;
8049 err:
8050 kfree(filter);
8051 return err;
8052 }
8053
8054
8055
8056
8057
8058
8059
8060 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8061 unsigned long *cookie)
8062 {
8063 struct i40e_cloud_filter *filter = NULL;
8064 struct hlist_node *node2;
8065
8066 hlist_for_each_entry_safe(filter, node2,
8067 &vsi->back->cloud_filter_list, cloud_node)
8068 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
8069 return filter;
8070 return NULL;
8071 }
8072
8073
8074
8075
8076
8077
8078
8079 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8080 struct flow_cls_offload *cls_flower)
8081 {
8082 struct i40e_cloud_filter *filter = NULL;
8083 struct i40e_pf *pf = vsi->back;
8084 int err = 0;
8085
8086 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
8087
8088 if (!filter)
8089 return -EINVAL;
8090
8091 hash_del(&filter->cloud_node);
8092
8093 if (filter->dst_port)
8094 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
8095 else
8096 err = i40e_add_del_cloud_filter(vsi, filter, false);
8097
8098 kfree(filter);
8099 if (err) {
8100 dev_err(&pf->pdev->dev,
8101 "Failed to delete cloud filter, err %s\n",
8102 i40e_stat_str(&pf->hw, err));
8103 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
8104 }
8105
8106 pf->num_cloud_filters--;
8107 if (!pf->num_cloud_filters)
8108 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8109 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8110 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8111 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8112 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8113 }
8114 return 0;
8115 }
8116
8117
8118
8119
8120
8121
8122 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8123 struct flow_cls_offload *cls_flower)
8124 {
8125 struct i40e_vsi *vsi = np->vsi;
8126
8127 switch (cls_flower->command) {
8128 case FLOW_CLS_REPLACE:
8129 return i40e_configure_clsflower(vsi, cls_flower);
8130 case FLOW_CLS_DESTROY:
8131 return i40e_delete_clsflower(vsi, cls_flower);
8132 case FLOW_CLS_STATS:
8133 return -EOPNOTSUPP;
8134 default:
8135 return -EOPNOTSUPP;
8136 }
8137 }
8138
8139 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8140 void *cb_priv)
8141 {
8142 struct i40e_netdev_priv *np = cb_priv;
8143
8144 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
8145 return -EOPNOTSUPP;
8146
8147 switch (type) {
8148 case TC_SETUP_CLSFLOWER:
8149 return i40e_setup_tc_cls_flower(np, type_data);
8150
8151 default:
8152 return -EOPNOTSUPP;
8153 }
8154 }
8155
8156 static LIST_HEAD(i40e_block_cb_list);
8157
8158 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8159 void *type_data)
8160 {
8161 struct i40e_netdev_priv *np = netdev_priv(netdev);
8162
8163 switch (type) {
8164 case TC_SETUP_QDISC_MQPRIO:
8165 return i40e_setup_tc(netdev, type_data);
8166 case TC_SETUP_BLOCK:
8167 return flow_block_cb_setup_simple(type_data,
8168 &i40e_block_cb_list,
8169 i40e_setup_tc_block_cb,
8170 np, np, true);
8171 default:
8172 return -EOPNOTSUPP;
8173 }
8174 }
8175
8176
8177
8178
8179
8180
8181
8182
8183
8184
8185
8186
8187
8188 int i40e_open(struct net_device *netdev)
8189 {
8190 struct i40e_netdev_priv *np = netdev_priv(netdev);
8191 struct i40e_vsi *vsi = np->vsi;
8192 struct i40e_pf *pf = vsi->back;
8193 int err;
8194
8195
8196 if (test_bit(__I40E_TESTING, pf->state) ||
8197 test_bit(__I40E_BAD_EEPROM, pf->state))
8198 return -EBUSY;
8199
8200 netif_carrier_off(netdev);
8201
8202 if (i40e_force_link_state(pf, true))
8203 return -EAGAIN;
8204
8205 err = i40e_vsi_open(vsi);
8206 if (err)
8207 return err;
8208
8209
8210 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
8211 TCP_FLAG_FIN) >> 16);
8212 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
8213 TCP_FLAG_FIN |
8214 TCP_FLAG_CWR) >> 16);
8215 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
8216
8217 udp_tunnel_get_rx_info(netdev);
8218
8219 return 0;
8220 }
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231
8232 int i40e_vsi_open(struct i40e_vsi *vsi)
8233 {
8234 struct i40e_pf *pf = vsi->back;
8235 char int_name[I40E_INT_NAME_STR_LEN];
8236 int err;
8237
8238
8239 err = i40e_vsi_setup_tx_resources(vsi);
8240 if (err)
8241 goto err_setup_tx;
8242 err = i40e_vsi_setup_rx_resources(vsi);
8243 if (err)
8244 goto err_setup_rx;
8245
8246 err = i40e_vsi_configure(vsi);
8247 if (err)
8248 goto err_setup_rx;
8249
8250 if (vsi->netdev) {
8251 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
8252 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
8253 err = i40e_vsi_request_irq(vsi, int_name);
8254 if (err)
8255 goto err_setup_rx;
8256
8257
8258 err = netif_set_real_num_tx_queues(vsi->netdev,
8259 vsi->num_queue_pairs);
8260 if (err)
8261 goto err_set_queues;
8262
8263 err = netif_set_real_num_rx_queues(vsi->netdev,
8264 vsi->num_queue_pairs);
8265 if (err)
8266 goto err_set_queues;
8267
8268 } else if (vsi->type == I40E_VSI_FDIR) {
8269 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
8270 dev_driver_string(&pf->pdev->dev),
8271 dev_name(&pf->pdev->dev));
8272 err = i40e_vsi_request_irq(vsi, int_name);
8273
8274 } else {
8275 err = -EINVAL;
8276 goto err_setup_rx;
8277 }
8278
8279 err = i40e_up_complete(vsi);
8280 if (err)
8281 goto err_up_complete;
8282
8283 return 0;
8284
8285 err_up_complete:
8286 i40e_down(vsi);
8287 err_set_queues:
8288 i40e_vsi_free_irq(vsi);
8289 err_setup_rx:
8290 i40e_vsi_free_rx_resources(vsi);
8291 err_setup_tx:
8292 i40e_vsi_free_tx_resources(vsi);
8293 if (vsi == pf->vsi[pf->lan_vsi])
8294 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
8295
8296 return err;
8297 }
8298
8299
8300
8301
8302
8303
8304
8305
8306 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
8307 {
8308 struct i40e_fdir_filter *filter;
8309 struct i40e_flex_pit *pit_entry, *tmp;
8310 struct hlist_node *node2;
8311
8312 hlist_for_each_entry_safe(filter, node2,
8313 &pf->fdir_filter_list, fdir_node) {
8314 hlist_del(&filter->fdir_node);
8315 kfree(filter);
8316 }
8317
8318 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
8319 list_del(&pit_entry->list);
8320 kfree(pit_entry);
8321 }
8322 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
8323
8324 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
8325 list_del(&pit_entry->list);
8326 kfree(pit_entry);
8327 }
8328 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
8329
8330 pf->fdir_pf_active_filters = 0;
8331 pf->fd_tcp4_filter_cnt = 0;
8332 pf->fd_udp4_filter_cnt = 0;
8333 pf->fd_sctp4_filter_cnt = 0;
8334 pf->fd_ip4_filter_cnt = 0;
8335
8336
8337 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8338 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8339 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8340
8341
8342 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8343 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8344 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8345
8346
8347 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8348 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8349 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8350
8351
8352 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8353 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8354
8355 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
8356 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
8357 }
8358
8359
8360
8361
8362
8363
8364
8365
8366 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
8367 {
8368 struct i40e_cloud_filter *cfilter;
8369 struct hlist_node *node;
8370
8371 hlist_for_each_entry_safe(cfilter, node,
8372 &pf->cloud_filter_list, cloud_node) {
8373 hlist_del(&cfilter->cloud_node);
8374 kfree(cfilter);
8375 }
8376 pf->num_cloud_filters = 0;
8377
8378 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8379 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8380 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8381 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8382 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8383 }
8384 }
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395
8396 int i40e_close(struct net_device *netdev)
8397 {
8398 struct i40e_netdev_priv *np = netdev_priv(netdev);
8399 struct i40e_vsi *vsi = np->vsi;
8400
8401 i40e_vsi_close(vsi);
8402
8403 return 0;
8404 }
8405
8406
8407
8408
8409
8410
8411
8412
8413
8414
8415
8416
8417 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
8418 {
8419 u32 val;
8420
8421 WARN_ON(in_interrupt());
8422
8423
8424
8425 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8426
8427
8428
8429
8430
8431
8432
8433
8434
8435 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8436 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8437 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8438 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8439
8440 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8441
8442
8443
8444
8445
8446 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8447 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8448 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8449 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8450 i40e_flush(&pf->hw);
8451
8452 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8453
8454
8455
8456
8457
8458
8459
8460
8461
8462 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8463 i40e_handle_reset_warning(pf, lock_acquired);
8464
8465 dev_info(&pf->pdev->dev,
8466 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
8467 "FW LLDP is disabled\n" :
8468 "FW LLDP is enabled\n");
8469
8470 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8471 int v;
8472
8473
8474 dev_info(&pf->pdev->dev,
8475 "VSI reinit requested\n");
8476 for (v = 0; v < pf->num_alloc_vsi; v++) {
8477 struct i40e_vsi *vsi = pf->vsi[v];
8478
8479 if (vsi != NULL &&
8480 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8481 vsi->state))
8482 i40e_vsi_reinit_locked(pf->vsi[v]);
8483 }
8484 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8485 int v;
8486
8487
8488 dev_info(&pf->pdev->dev, "VSI down requested\n");
8489 for (v = 0; v < pf->num_alloc_vsi; v++) {
8490 struct i40e_vsi *vsi = pf->vsi[v];
8491
8492 if (vsi != NULL &&
8493 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8494 vsi->state)) {
8495 set_bit(__I40E_VSI_DOWN, vsi->state);
8496 i40e_down(vsi);
8497 }
8498 }
8499 } else {
8500 dev_info(&pf->pdev->dev,
8501 "bad reset request 0x%08x\n", reset_flags);
8502 }
8503 }
8504
8505 #ifdef CONFIG_I40E_DCB
8506
8507
8508
8509
8510
8511
8512 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8513 struct i40e_dcbx_config *old_cfg,
8514 struct i40e_dcbx_config *new_cfg)
8515 {
8516 bool need_reconfig = false;
8517
8518
8519 if (memcmp(&new_cfg->etscfg,
8520 &old_cfg->etscfg,
8521 sizeof(new_cfg->etscfg))) {
8522
8523 if (memcmp(&new_cfg->etscfg.prioritytable,
8524 &old_cfg->etscfg.prioritytable,
8525 sizeof(new_cfg->etscfg.prioritytable))) {
8526 need_reconfig = true;
8527 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8528 }
8529
8530 if (memcmp(&new_cfg->etscfg.tcbwtable,
8531 &old_cfg->etscfg.tcbwtable,
8532 sizeof(new_cfg->etscfg.tcbwtable)))
8533 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8534
8535 if (memcmp(&new_cfg->etscfg.tsatable,
8536 &old_cfg->etscfg.tsatable,
8537 sizeof(new_cfg->etscfg.tsatable)))
8538 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8539 }
8540
8541
8542 if (memcmp(&new_cfg->pfc,
8543 &old_cfg->pfc,
8544 sizeof(new_cfg->pfc))) {
8545 need_reconfig = true;
8546 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8547 }
8548
8549
8550 if (memcmp(&new_cfg->app,
8551 &old_cfg->app,
8552 sizeof(new_cfg->app))) {
8553 need_reconfig = true;
8554 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8555 }
8556
8557 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8558 return need_reconfig;
8559 }
8560
8561
8562
8563
8564
8565
8566 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8567 struct i40e_arq_event_info *e)
8568 {
8569 struct i40e_aqc_lldp_get_mib *mib =
8570 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8571 struct i40e_hw *hw = &pf->hw;
8572 struct i40e_dcbx_config tmp_dcbx_cfg;
8573 bool need_reconfig = false;
8574 int ret = 0;
8575 u8 type;
8576
8577
8578 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8579 return ret;
8580
8581
8582 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8583 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8584 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8585 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8586 return ret;
8587
8588
8589 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8590 dev_dbg(&pf->pdev->dev,
8591 "LLDP event mib type %s\n", type ? "remote" : "local");
8592 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8593
8594 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8595 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8596 &hw->remote_dcbx_config);
8597 goto exit;
8598 }
8599
8600
8601 tmp_dcbx_cfg = hw->local_dcbx_config;
8602
8603
8604 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8605
8606 ret = i40e_get_dcb_config(&pf->hw);
8607 if (ret) {
8608 dev_info(&pf->pdev->dev,
8609 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8610 i40e_stat_str(&pf->hw, ret),
8611 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8612 goto exit;
8613 }
8614
8615
8616 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8617 sizeof(tmp_dcbx_cfg))) {
8618 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8619 goto exit;
8620 }
8621
8622 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8623 &hw->local_dcbx_config);
8624
8625 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8626
8627 if (!need_reconfig)
8628 goto exit;
8629
8630
8631 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8632 pf->flags |= I40E_FLAG_DCB_ENABLED;
8633 else
8634 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8635
8636 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8637
8638 i40e_pf_quiesce_all_vsi(pf);
8639
8640
8641 i40e_dcb_reconfigure(pf);
8642
8643 ret = i40e_resume_port_tx(pf);
8644
8645 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8646
8647 if (ret)
8648 goto exit;
8649
8650
8651 ret = i40e_pf_wait_queues_disabled(pf);
8652 if (ret) {
8653
8654 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8655 i40e_service_event_schedule(pf);
8656 } else {
8657 i40e_pf_unquiesce_all_vsi(pf);
8658 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8659 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8660 }
8661
8662 exit:
8663 return ret;
8664 }
8665 #endif
8666
8667
8668
8669
8670
8671
8672
8673 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8674 {
8675 rtnl_lock();
8676 i40e_do_reset(pf, reset_flags, true);
8677 rtnl_unlock();
8678 }
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8689 struct i40e_arq_event_info *e)
8690 {
8691 struct i40e_aqc_lan_overflow *data =
8692 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8693 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8694 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8695 struct i40e_hw *hw = &pf->hw;
8696 struct i40e_vf *vf;
8697 u16 vf_id;
8698
8699 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8700 queue, qtx_ctl);
8701
8702
8703 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8704 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8705 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8706 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8707 vf_id -= hw->func_caps.vf_base_id;
8708 vf = &pf->vf[vf_id];
8709 i40e_vc_notify_vf_reset(vf);
8710
8711 msleep(20);
8712 i40e_reset_vf(vf, false);
8713 }
8714 }
8715
8716
8717
8718
8719
8720 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8721 {
8722 u32 val, fcnt_prog;
8723
8724 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8725 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8726 return fcnt_prog;
8727 }
8728
8729
8730
8731
8732
8733 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8734 {
8735 u32 val, fcnt_prog;
8736
8737 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8738 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8739 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8740 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8741 return fcnt_prog;
8742 }
8743
8744
8745
8746
8747
8748 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8749 {
8750 u32 val, fcnt_prog;
8751
8752 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8753 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8754 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8755 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8756 return fcnt_prog;
8757 }
8758
8759
8760
8761
8762
8763 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8764 {
8765 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8766 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8767 (I40E_DEBUG_FD & pf->hw.debug_mask))
8768 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8769 }
8770
8771
8772
8773
8774
8775 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8776 {
8777 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8778
8779
8780
8781
8782
8783 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8784 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8785 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8786
8787 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8788 (I40E_DEBUG_FD & pf->hw.debug_mask))
8789 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8790 }
8791 }
8792
8793
8794
8795
8796
8797
8798 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8799 struct i40e_fdir_filter *filter)
8800 {
8801
8802 pf->fdir_pf_active_filters--;
8803 pf->fd_inv = 0;
8804
8805 switch (filter->flow_type) {
8806 case TCP_V4_FLOW:
8807 pf->fd_tcp4_filter_cnt--;
8808 break;
8809 case UDP_V4_FLOW:
8810 pf->fd_udp4_filter_cnt--;
8811 break;
8812 case SCTP_V4_FLOW:
8813 pf->fd_sctp4_filter_cnt--;
8814 break;
8815 case IP_USER_FLOW:
8816 switch (filter->ip4_proto) {
8817 case IPPROTO_TCP:
8818 pf->fd_tcp4_filter_cnt--;
8819 break;
8820 case IPPROTO_UDP:
8821 pf->fd_udp4_filter_cnt--;
8822 break;
8823 case IPPROTO_SCTP:
8824 pf->fd_sctp4_filter_cnt--;
8825 break;
8826 case IPPROTO_IP:
8827 pf->fd_ip4_filter_cnt--;
8828 break;
8829 }
8830 break;
8831 }
8832
8833
8834 hlist_del(&filter->fdir_node);
8835 kfree(filter);
8836 }
8837
8838
8839
8840
8841
8842 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8843 {
8844 struct i40e_fdir_filter *filter;
8845 u32 fcnt_prog, fcnt_avail;
8846 struct hlist_node *node;
8847
8848 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8849 return;
8850
8851
8852 fcnt_prog = i40e_get_global_fd_count(pf);
8853 fcnt_avail = pf->fdir_pf_filter_count;
8854 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8855 (pf->fd_add_err == 0) ||
8856 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8857 i40e_reenable_fdir_sb(pf);
8858
8859
8860
8861
8862
8863 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8864 (pf->fd_tcp4_filter_cnt == 0))
8865 i40e_reenable_fdir_atr(pf);
8866
8867
8868 if (pf->fd_inv > 0) {
8869 hlist_for_each_entry_safe(filter, node,
8870 &pf->fdir_filter_list, fdir_node)
8871 if (filter->fd_id == pf->fd_inv)
8872 i40e_delete_invalid_filter(pf, filter);
8873 }
8874 }
8875
8876 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8877 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8878
8879
8880
8881
8882 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8883 {
8884 unsigned long min_flush_time;
8885 int flush_wait_retry = 50;
8886 bool disable_atr = false;
8887 int fd_room;
8888 int reg;
8889
8890 if (!time_after(jiffies, pf->fd_flush_timestamp +
8891 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8892 return;
8893
8894
8895
8896
8897 min_flush_time = pf->fd_flush_timestamp +
8898 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8899 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8900
8901 if (!(time_after(jiffies, min_flush_time)) &&
8902 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8903 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8904 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8905 disable_atr = true;
8906 }
8907
8908 pf->fd_flush_timestamp = jiffies;
8909 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8910
8911 wr32(&pf->hw, I40E_PFQF_CTL_1,
8912 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8913 i40e_flush(&pf->hw);
8914 pf->fd_flush_cnt++;
8915 pf->fd_add_err = 0;
8916 do {
8917
8918 usleep_range(5000, 6000);
8919 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8920 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8921 break;
8922 } while (flush_wait_retry--);
8923 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8924 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8925 } else {
8926
8927 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8928 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8929 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8930 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8931 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8932 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8933 }
8934 }
8935
8936
8937
8938
8939
8940 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8941 {
8942 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8943 }
8944
8945
8946
8947
8948
8949
8950 #define I40E_MAX_FD_PROGRAM_ERROR 256
8951
8952
8953
8954
8955
8956 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8957 {
8958
8959
8960 if (test_bit(__I40E_DOWN, pf->state))
8961 return;
8962
8963 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8964 i40e_fdir_flush_and_replay(pf);
8965
8966 i40e_fdir_check_and_reenable(pf);
8967
8968 }
8969
8970
8971
8972
8973
8974
8975 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8976 {
8977 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8978 return;
8979
8980 switch (vsi->type) {
8981 case I40E_VSI_MAIN:
8982 if (!vsi->netdev || !vsi->netdev_registered)
8983 break;
8984
8985 if (link_up) {
8986 netif_carrier_on(vsi->netdev);
8987 netif_tx_wake_all_queues(vsi->netdev);
8988 } else {
8989 netif_carrier_off(vsi->netdev);
8990 netif_tx_stop_all_queues(vsi->netdev);
8991 }
8992 break;
8993
8994 case I40E_VSI_SRIOV:
8995 case I40E_VSI_VMDQ2:
8996 case I40E_VSI_CTRL:
8997 case I40E_VSI_IWARP:
8998 case I40E_VSI_MIRROR:
8999 default:
9000
9001 break;
9002 }
9003 }
9004
9005
9006
9007
9008
9009
9010 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9011 {
9012 struct i40e_pf *pf;
9013 int i;
9014
9015 if (!veb || !veb->pf)
9016 return;
9017 pf = veb->pf;
9018
9019
9020 for (i = 0; i < I40E_MAX_VEB; i++)
9021 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9022 i40e_veb_link_event(pf->veb[i], link_up);
9023
9024
9025 for (i = 0; i < pf->num_alloc_vsi; i++)
9026 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9027 i40e_vsi_link_event(pf->vsi[i], link_up);
9028 }
9029
9030
9031
9032
9033
9034 static void i40e_link_event(struct i40e_pf *pf)
9035 {
9036 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9037 u8 new_link_speed, old_link_speed;
9038 i40e_status status;
9039 bool new_link, old_link;
9040
9041
9042 pf->hw.phy.get_link_info = true;
9043 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9044 status = i40e_get_link_status(&pf->hw, &new_link);
9045
9046
9047 if (status == I40E_SUCCESS) {
9048 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9049 } else {
9050
9051
9052
9053 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
9054 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9055 status);
9056 return;
9057 }
9058
9059 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9060 new_link_speed = pf->hw.phy.link_info.link_speed;
9061
9062 if (new_link == old_link &&
9063 new_link_speed == old_link_speed &&
9064 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9065 new_link == netif_carrier_ok(vsi->netdev)))
9066 return;
9067
9068 i40e_print_link_message(vsi, new_link);
9069
9070
9071
9072
9073 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9074 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
9075 else
9076 i40e_vsi_link_event(vsi, new_link);
9077
9078 if (pf->vf)
9079 i40e_vc_notify_link_state(pf);
9080
9081 if (pf->flags & I40E_FLAG_PTP)
9082 i40e_ptp_set_increment(pf);
9083 }
9084
9085
9086
9087
9088
9089 static void i40e_watchdog_subtask(struct i40e_pf *pf)
9090 {
9091 int i;
9092
9093
9094 if (test_bit(__I40E_DOWN, pf->state) ||
9095 test_bit(__I40E_CONFIG_BUSY, pf->state))
9096 return;
9097
9098
9099 if (time_before(jiffies, (pf->service_timer_previous +
9100 pf->service_timer_period)))
9101 return;
9102 pf->service_timer_previous = jiffies;
9103
9104 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9105 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9106 i40e_link_event(pf);
9107
9108
9109
9110
9111 for (i = 0; i < pf->num_alloc_vsi; i++)
9112 if (pf->vsi[i] && pf->vsi[i]->netdev)
9113 i40e_update_stats(pf->vsi[i]);
9114
9115 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
9116
9117 for (i = 0; i < I40E_MAX_VEB; i++)
9118 if (pf->veb[i])
9119 i40e_update_veb_stats(pf->veb[i]);
9120 }
9121
9122 i40e_ptp_rx_hang(pf);
9123 i40e_ptp_tx_hang(pf);
9124 }
9125
9126
9127
9128
9129
9130 static void i40e_reset_subtask(struct i40e_pf *pf)
9131 {
9132 u32 reset_flags = 0;
9133
9134 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
9135 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
9136 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
9137 }
9138 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
9139 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
9140 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9141 }
9142 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
9143 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
9144 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
9145 }
9146 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
9147 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
9148 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
9149 }
9150 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
9151 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
9152 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
9153 }
9154
9155
9156
9157
9158 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
9159 i40e_prep_for_reset(pf, false);
9160 i40e_reset(pf);
9161 i40e_rebuild(pf, false, false);
9162 }
9163
9164
9165 if (reset_flags &&
9166 !test_bit(__I40E_DOWN, pf->state) &&
9167 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
9168 i40e_do_reset(pf, reset_flags, false);
9169 }
9170 }
9171
9172
9173
9174
9175
9176
9177 static void i40e_handle_link_event(struct i40e_pf *pf,
9178 struct i40e_arq_event_info *e)
9179 {
9180 struct i40e_aqc_get_link_status *status =
9181 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
9182
9183
9184
9185
9186
9187
9188
9189 i40e_link_event(pf);
9190
9191
9192 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
9193 dev_err(&pf->pdev->dev,
9194 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
9195 dev_err(&pf->pdev->dev,
9196 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9197 } else {
9198
9199
9200
9201 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
9202 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
9203 (!(status->link_info & I40E_AQ_LINK_UP)) &&
9204 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
9205 dev_err(&pf->pdev->dev,
9206 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
9207 dev_err(&pf->pdev->dev,
9208 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
9209 }
9210 }
9211 }
9212
9213
9214
9215
9216
9217 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
9218 {
9219 struct i40e_arq_event_info event;
9220 struct i40e_hw *hw = &pf->hw;
9221 u16 pending, i = 0;
9222 i40e_status ret;
9223 u16 opcode;
9224 u32 oldval;
9225 u32 val;
9226
9227
9228 if (test_bit(__I40E_RESET_FAILED, pf->state))
9229 return;
9230
9231
9232 val = rd32(&pf->hw, pf->hw.aq.arq.len);
9233 oldval = val;
9234 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
9235 if (hw->debug_mask & I40E_DEBUG_AQ)
9236 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
9237 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
9238 }
9239 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
9240 if (hw->debug_mask & I40E_DEBUG_AQ)
9241 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
9242 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
9243 pf->arq_overflows++;
9244 }
9245 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
9246 if (hw->debug_mask & I40E_DEBUG_AQ)
9247 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
9248 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
9249 }
9250 if (oldval != val)
9251 wr32(&pf->hw, pf->hw.aq.arq.len, val);
9252
9253 val = rd32(&pf->hw, pf->hw.aq.asq.len);
9254 oldval = val;
9255 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
9256 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9257 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
9258 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
9259 }
9260 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
9261 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9262 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
9263 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
9264 }
9265 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
9266 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
9267 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
9268 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
9269 }
9270 if (oldval != val)
9271 wr32(&pf->hw, pf->hw.aq.asq.len, val);
9272
9273 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
9274 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
9275 if (!event.msg_buf)
9276 return;
9277
9278 do {
9279 ret = i40e_clean_arq_element(hw, &event, &pending);
9280 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
9281 break;
9282 else if (ret) {
9283 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
9284 break;
9285 }
9286
9287 opcode = le16_to_cpu(event.desc.opcode);
9288 switch (opcode) {
9289
9290 case i40e_aqc_opc_get_link_status:
9291 i40e_handle_link_event(pf, &event);
9292 break;
9293 case i40e_aqc_opc_send_msg_to_pf:
9294 ret = i40e_vc_process_vf_msg(pf,
9295 le16_to_cpu(event.desc.retval),
9296 le32_to_cpu(event.desc.cookie_high),
9297 le32_to_cpu(event.desc.cookie_low),
9298 event.msg_buf,
9299 event.msg_len);
9300 break;
9301 case i40e_aqc_opc_lldp_update_mib:
9302 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
9303 #ifdef CONFIG_I40E_DCB
9304 rtnl_lock();
9305 ret = i40e_handle_lldp_event(pf, &event);
9306 rtnl_unlock();
9307 #endif
9308 break;
9309 case i40e_aqc_opc_event_lan_overflow:
9310 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
9311 i40e_handle_lan_overflow_event(pf, &event);
9312 break;
9313 case i40e_aqc_opc_send_msg_to_peer:
9314 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
9315 break;
9316 case i40e_aqc_opc_nvm_erase:
9317 case i40e_aqc_opc_nvm_update:
9318 case i40e_aqc_opc_oem_post_update:
9319 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
9320 "ARQ NVM operation 0x%04x completed\n",
9321 opcode);
9322 break;
9323 default:
9324 dev_info(&pf->pdev->dev,
9325 "ARQ: Unknown event 0x%04x ignored\n",
9326 opcode);
9327 break;
9328 }
9329 } while (i++ < pf->adminq_work_limit);
9330
9331 if (i < pf->adminq_work_limit)
9332 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
9333
9334
9335 val = rd32(hw, I40E_PFINT_ICR0_ENA);
9336 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
9337 wr32(hw, I40E_PFINT_ICR0_ENA, val);
9338 i40e_flush(hw);
9339
9340 kfree(event.msg_buf);
9341 }
9342
9343
9344
9345
9346
9347 static void i40e_verify_eeprom(struct i40e_pf *pf)
9348 {
9349 int err;
9350
9351 err = i40e_diag_eeprom_test(&pf->hw);
9352 if (err) {
9353
9354 err = i40e_diag_eeprom_test(&pf->hw);
9355 if (err) {
9356 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
9357 err);
9358 set_bit(__I40E_BAD_EEPROM, pf->state);
9359 }
9360 }
9361
9362 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
9363 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
9364 clear_bit(__I40E_BAD_EEPROM, pf->state);
9365 }
9366 }
9367
9368
9369
9370
9371
9372
9373
9374 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
9375 {
9376 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9377 struct i40e_vsi_context ctxt;
9378 int ret;
9379
9380 ctxt.seid = pf->main_vsi_seid;
9381 ctxt.pf_num = pf->hw.pf_id;
9382 ctxt.vf_num = 0;
9383 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9384 if (ret) {
9385 dev_info(&pf->pdev->dev,
9386 "couldn't get PF vsi config, err %s aq_err %s\n",
9387 i40e_stat_str(&pf->hw, ret),
9388 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9389 return;
9390 }
9391 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9392 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9393 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9394
9395 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9396 if (ret) {
9397 dev_info(&pf->pdev->dev,
9398 "update vsi switch failed, err %s aq_err %s\n",
9399 i40e_stat_str(&pf->hw, ret),
9400 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9401 }
9402 }
9403
9404
9405
9406
9407
9408
9409
9410 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
9411 {
9412 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9413 struct i40e_vsi_context ctxt;
9414 int ret;
9415
9416 ctxt.seid = pf->main_vsi_seid;
9417 ctxt.pf_num = pf->hw.pf_id;
9418 ctxt.vf_num = 0;
9419 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9420 if (ret) {
9421 dev_info(&pf->pdev->dev,
9422 "couldn't get PF vsi config, err %s aq_err %s\n",
9423 i40e_stat_str(&pf->hw, ret),
9424 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9425 return;
9426 }
9427 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9428 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9429 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9430
9431 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9432 if (ret) {
9433 dev_info(&pf->pdev->dev,
9434 "update vsi switch failed, err %s aq_err %s\n",
9435 i40e_stat_str(&pf->hw, ret),
9436 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9437 }
9438 }
9439
9440
9441
9442
9443
9444
9445
9446
9447
9448 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9449 {
9450 struct i40e_pf *pf = veb->pf;
9451
9452 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9453 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9454 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9455 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9456 i40e_disable_pf_switch_lb(pf);
9457 else
9458 i40e_enable_pf_switch_lb(pf);
9459 }
9460
9461
9462
9463
9464
9465
9466
9467
9468
9469
9470 static int i40e_reconstitute_veb(struct i40e_veb *veb)
9471 {
9472 struct i40e_vsi *ctl_vsi = NULL;
9473 struct i40e_pf *pf = veb->pf;
9474 int v, veb_idx;
9475 int ret;
9476
9477
9478 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9479 if (pf->vsi[v] &&
9480 pf->vsi[v]->veb_idx == veb->idx &&
9481 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9482 ctl_vsi = pf->vsi[v];
9483 break;
9484 }
9485 }
9486 if (!ctl_vsi) {
9487 dev_info(&pf->pdev->dev,
9488 "missing owner VSI for veb_idx %d\n", veb->idx);
9489 ret = -ENOENT;
9490 goto end_reconstitute;
9491 }
9492 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9493 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9494 ret = i40e_add_vsi(ctl_vsi);
9495 if (ret) {
9496 dev_info(&pf->pdev->dev,
9497 "rebuild of veb_idx %d owner VSI failed: %d\n",
9498 veb->idx, ret);
9499 goto end_reconstitute;
9500 }
9501 i40e_vsi_reset_stats(ctl_vsi);
9502
9503
9504 ret = i40e_add_veb(veb, ctl_vsi);
9505 if (ret)
9506 goto end_reconstitute;
9507
9508 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9509 veb->bridge_mode = BRIDGE_MODE_VEB;
9510 else
9511 veb->bridge_mode = BRIDGE_MODE_VEPA;
9512 i40e_config_bridge_mode(veb);
9513
9514
9515 for (v = 0; v < pf->num_alloc_vsi; v++) {
9516 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9517 continue;
9518
9519 if (pf->vsi[v]->veb_idx == veb->idx) {
9520 struct i40e_vsi *vsi = pf->vsi[v];
9521
9522 vsi->uplink_seid = veb->seid;
9523 ret = i40e_add_vsi(vsi);
9524 if (ret) {
9525 dev_info(&pf->pdev->dev,
9526 "rebuild of vsi_idx %d failed: %d\n",
9527 v, ret);
9528 goto end_reconstitute;
9529 }
9530 i40e_vsi_reset_stats(vsi);
9531 }
9532 }
9533
9534
9535 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9536 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9537 pf->veb[veb_idx]->uplink_seid = veb->seid;
9538 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9539 if (ret)
9540 break;
9541 }
9542 }
9543
9544 end_reconstitute:
9545 return ret;
9546 }
9547
9548
9549
9550
9551
9552 static int i40e_get_capabilities(struct i40e_pf *pf,
9553 enum i40e_admin_queue_opc list_type)
9554 {
9555 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9556 u16 data_size;
9557 int buf_len;
9558 int err;
9559
9560 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9561 do {
9562 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9563 if (!cap_buf)
9564 return -ENOMEM;
9565
9566
9567 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9568 &data_size, list_type,
9569 NULL);
9570
9571 kfree(cap_buf);
9572
9573 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9574
9575 buf_len = data_size;
9576 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9577 dev_info(&pf->pdev->dev,
9578 "capability discovery failed, err %s aq_err %s\n",
9579 i40e_stat_str(&pf->hw, err),
9580 i40e_aq_str(&pf->hw,
9581 pf->hw.aq.asq_last_status));
9582 return -ENODEV;
9583 }
9584 } while (err);
9585
9586 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9587 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9588 dev_info(&pf->pdev->dev,
9589 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9590 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9591 pf->hw.func_caps.num_msix_vectors,
9592 pf->hw.func_caps.num_msix_vectors_vf,
9593 pf->hw.func_caps.fd_filters_guaranteed,
9594 pf->hw.func_caps.fd_filters_best_effort,
9595 pf->hw.func_caps.num_tx_qp,
9596 pf->hw.func_caps.num_vsis);
9597 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9598 dev_info(&pf->pdev->dev,
9599 "switch_mode=0x%04x, function_valid=0x%08x\n",
9600 pf->hw.dev_caps.switch_mode,
9601 pf->hw.dev_caps.valid_functions);
9602 dev_info(&pf->pdev->dev,
9603 "SR-IOV=%d, num_vfs for all function=%u\n",
9604 pf->hw.dev_caps.sr_iov_1_1,
9605 pf->hw.dev_caps.num_vfs);
9606 dev_info(&pf->pdev->dev,
9607 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9608 pf->hw.dev_caps.num_vsis,
9609 pf->hw.dev_caps.num_rx_qp,
9610 pf->hw.dev_caps.num_tx_qp);
9611 }
9612 }
9613 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9614 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9615 + pf->hw.func_caps.num_vfs)
9616 if (pf->hw.revision_id == 0 &&
9617 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9618 dev_info(&pf->pdev->dev,
9619 "got num_vsis %d, setting num_vsis to %d\n",
9620 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9621 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9622 }
9623 }
9624 return 0;
9625 }
9626
9627 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9628
9629
9630
9631
9632
9633 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9634 {
9635 struct i40e_vsi *vsi;
9636
9637
9638
9639
9640 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9641 static const u32 hkey[] = {
9642 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9643 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9644 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9645 0x95b3a76d};
9646 int i;
9647
9648 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9649 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9650 }
9651
9652 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9653 return;
9654
9655
9656 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9657
9658
9659 if (!vsi) {
9660 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9661 pf->vsi[pf->lan_vsi]->seid, 0);
9662 if (!vsi) {
9663 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9664 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9665 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9666 return;
9667 }
9668 }
9669
9670 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9671 }
9672
9673
9674
9675
9676
9677 static void i40e_fdir_teardown(struct i40e_pf *pf)
9678 {
9679 struct i40e_vsi *vsi;
9680
9681 i40e_fdir_filter_exit(pf);
9682 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9683 if (vsi)
9684 i40e_vsi_release(vsi);
9685 }
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9696 {
9697 struct i40e_cloud_filter *cfilter;
9698 struct i40e_pf *pf = vsi->back;
9699 struct hlist_node *node;
9700 i40e_status ret;
9701
9702
9703 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9704 cloud_node) {
9705 if (cfilter->seid != seid)
9706 continue;
9707
9708 if (cfilter->dst_port)
9709 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9710 true);
9711 else
9712 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9713
9714 if (ret) {
9715 dev_dbg(&pf->pdev->dev,
9716 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9717 i40e_stat_str(&pf->hw, ret),
9718 i40e_aq_str(&pf->hw,
9719 pf->hw.aq.asq_last_status));
9720 return ret;
9721 }
9722 }
9723 return 0;
9724 }
9725
9726
9727
9728
9729
9730
9731
9732 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9733 {
9734 struct i40e_channel *ch, *ch_tmp;
9735 i40e_status ret;
9736
9737 if (list_empty(&vsi->ch_list))
9738 return 0;
9739
9740 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9741 if (!ch->initialized)
9742 break;
9743
9744 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9745 if (ret) {
9746 dev_info(&vsi->back->pdev->dev,
9747 "failed to rebuild channels using uplink_seid %u\n",
9748 vsi->uplink_seid);
9749 return ret;
9750 }
9751
9752 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9753 if (ret) {
9754 dev_info(&vsi->back->pdev->dev,
9755 "failed to configure TX rings for channel %u\n",
9756 ch->seid);
9757 return ret;
9758 }
9759
9760 vsi->next_base_queue = vsi->next_base_queue +
9761 ch->num_queue_pairs;
9762 if (ch->max_tx_rate) {
9763 u64 credits = ch->max_tx_rate;
9764
9765 if (i40e_set_bw_limit(vsi, ch->seid,
9766 ch->max_tx_rate))
9767 return -EINVAL;
9768
9769 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9770 dev_dbg(&vsi->back->pdev->dev,
9771 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9772 ch->max_tx_rate,
9773 credits,
9774 ch->seid);
9775 }
9776 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9777 if (ret) {
9778 dev_dbg(&vsi->back->pdev->dev,
9779 "Failed to rebuild cloud filters for channel VSI %u\n",
9780 ch->seid);
9781 return ret;
9782 }
9783 }
9784 return 0;
9785 }
9786
9787
9788
9789
9790
9791
9792
9793
9794
9795 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9796 {
9797 struct i40e_hw *hw = &pf->hw;
9798 i40e_status ret = 0;
9799 u32 v;
9800
9801 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9802 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9803 return;
9804 if (i40e_check_asq_alive(&pf->hw))
9805 i40e_vc_notify_reset(pf);
9806
9807 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9808
9809
9810
9811 if (!lock_acquired)
9812 rtnl_lock();
9813 i40e_pf_quiesce_all_vsi(pf);
9814 if (!lock_acquired)
9815 rtnl_unlock();
9816
9817 for (v = 0; v < pf->num_alloc_vsi; v++) {
9818 if (pf->vsi[v])
9819 pf->vsi[v]->seid = 0;
9820 }
9821
9822 i40e_shutdown_adminq(&pf->hw);
9823
9824
9825 if (hw->hmc.hmc_obj) {
9826 ret = i40e_shutdown_lan_hmc(hw);
9827 if (ret)
9828 dev_warn(&pf->pdev->dev,
9829 "shutdown_lan_hmc failed: %d\n", ret);
9830 }
9831
9832
9833
9834
9835 i40e_ptp_save_hw_time(pf);
9836 }
9837
9838
9839
9840
9841
9842 static void i40e_send_version(struct i40e_pf *pf)
9843 {
9844 struct i40e_driver_version dv;
9845
9846 dv.major_version = DRV_VERSION_MAJOR;
9847 dv.minor_version = DRV_VERSION_MINOR;
9848 dv.build_version = DRV_VERSION_BUILD;
9849 dv.subbuild_version = 0;
9850 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9851 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9852 }
9853
9854
9855
9856
9857
9858 static void i40e_get_oem_version(struct i40e_hw *hw)
9859 {
9860 u16 block_offset = 0xffff;
9861 u16 block_length = 0;
9862 u16 capabilities = 0;
9863 u16 gen_snap = 0;
9864 u16 release = 0;
9865
9866 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9867 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9868 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9869 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9870 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9871 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9872 #define I40E_NVM_OEM_LENGTH 3
9873
9874
9875 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9876 if (block_offset == 0xffff)
9877 return;
9878
9879
9880 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9881 &block_length);
9882 if (block_length < I40E_NVM_OEM_LENGTH)
9883 return;
9884
9885
9886 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9887 &capabilities);
9888 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9889 return;
9890
9891 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9892 &gen_snap);
9893 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9894 &release);
9895 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9896 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9897 }
9898
9899
9900
9901
9902
9903 static int i40e_reset(struct i40e_pf *pf)
9904 {
9905 struct i40e_hw *hw = &pf->hw;
9906 i40e_status ret;
9907
9908 ret = i40e_pf_reset(hw);
9909 if (ret) {
9910 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9911 set_bit(__I40E_RESET_FAILED, pf->state);
9912 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9913 } else {
9914 pf->pfr_count++;
9915 }
9916 return ret;
9917 }
9918
9919
9920
9921
9922
9923
9924
9925
9926 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9927 {
9928 int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
9929 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9930 struct i40e_hw *hw = &pf->hw;
9931 u8 set_fc_aq_fail = 0;
9932 i40e_status ret;
9933 u32 val;
9934 int v;
9935
9936 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9937 i40e_check_recovery_mode(pf)) {
9938 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
9939 }
9940
9941 if (test_bit(__I40E_DOWN, pf->state) &&
9942 !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
9943 !old_recovery_mode_bit)
9944 goto clear_recovery;
9945 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9946
9947
9948 ret = i40e_init_adminq(&pf->hw);
9949 if (ret) {
9950 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9951 i40e_stat_str(&pf->hw, ret),
9952 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9953 goto clear_recovery;
9954 }
9955 i40e_get_oem_version(&pf->hw);
9956
9957 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9958 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9959 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9960
9961
9962
9963
9964
9965 mdelay(300);
9966 }
9967
9968
9969 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9970 i40e_verify_eeprom(pf);
9971
9972
9973
9974
9975
9976 if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
9977 old_recovery_mode_bit) {
9978 if (i40e_get_capabilities(pf,
9979 i40e_aqc_opc_list_func_capabilities))
9980 goto end_unlock;
9981
9982 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
9983
9984
9985
9986 if (i40e_setup_misc_vector_for_recovery_mode(pf))
9987 goto end_unlock;
9988 } else {
9989 if (!lock_acquired)
9990 rtnl_lock();
9991
9992
9993
9994
9995 free_irq(pf->pdev->irq, pf);
9996 i40e_clear_interrupt_scheme(pf);
9997 if (i40e_restore_interrupt_scheme(pf))
9998 goto end_unlock;
9999 }
10000
10001
10002 i40e_send_version(pf);
10003
10004
10005
10006
10007 goto end_unlock;
10008 }
10009
10010 i40e_clear_pxe_mode(hw);
10011 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
10012 if (ret)
10013 goto end_core_reset;
10014
10015 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10016 hw->func_caps.num_rx_qp, 0, 0);
10017 if (ret) {
10018 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10019 goto end_core_reset;
10020 }
10021 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10022 if (ret) {
10023 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10024 goto end_core_reset;
10025 }
10026
10027
10028 i40e_aq_set_dcb_parameters(hw, true, NULL);
10029
10030 #ifdef CONFIG_I40E_DCB
10031 ret = i40e_init_pf_dcb(pf);
10032 if (ret) {
10033 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
10034 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10035
10036 }
10037 #endif
10038
10039 if (!lock_acquired)
10040 rtnl_lock();
10041 ret = i40e_setup_pf_switch(pf, reinit);
10042 if (ret)
10043 goto end_unlock;
10044
10045
10046
10047
10048 ret = i40e_aq_set_phy_int_mask(&pf->hw,
10049 ~(I40E_AQ_EVENT_LINK_UPDOWN |
10050 I40E_AQ_EVENT_MEDIA_NA |
10051 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10052 if (ret)
10053 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10054 i40e_stat_str(&pf->hw, ret),
10055 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10056
10057
10058 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
10059 if (ret)
10060 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
10061 i40e_stat_str(&pf->hw, ret),
10062 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10063
10064
10065
10066
10067
10068
10069
10070
10071 if (vsi->uplink_seid != pf->mac_seid) {
10072 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10073
10074 for (v = 0; v < I40E_MAX_VEB; v++) {
10075 if (!pf->veb[v])
10076 continue;
10077
10078 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10079 pf->veb[v]->uplink_seid == 0) {
10080 ret = i40e_reconstitute_veb(pf->veb[v]);
10081
10082 if (!ret)
10083 continue;
10084
10085
10086
10087
10088
10089
10090
10091 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10092 dev_info(&pf->pdev->dev,
10093 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
10094 ret);
10095 vsi->uplink_seid = pf->mac_seid;
10096 break;
10097 } else if (pf->veb[v]->uplink_seid == 0) {
10098 dev_info(&pf->pdev->dev,
10099 "rebuild of orphan VEB failed: %d\n",
10100 ret);
10101 }
10102 }
10103 }
10104 }
10105
10106 if (vsi->uplink_seid == pf->mac_seid) {
10107 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
10108
10109 ret = i40e_add_vsi(vsi);
10110 if (ret) {
10111 dev_info(&pf->pdev->dev,
10112 "rebuild of Main VSI failed: %d\n", ret);
10113 goto end_unlock;
10114 }
10115 }
10116
10117 if (vsi->mqprio_qopt.max_rate[0]) {
10118 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
10119 u64 credits = 0;
10120
10121 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
10122 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
10123 if (ret)
10124 goto end_unlock;
10125
10126 credits = max_tx_rate;
10127 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10128 dev_dbg(&vsi->back->pdev->dev,
10129 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10130 max_tx_rate,
10131 credits,
10132 vsi->seid);
10133 }
10134
10135 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
10136 if (ret)
10137 goto end_unlock;
10138
10139
10140
10141
10142 ret = i40e_rebuild_channels(vsi);
10143 if (ret)
10144 goto end_unlock;
10145
10146
10147
10148
10149
10150 #define I40E_REG_MSS 0x000E64DC
10151 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
10152 #define I40E_64BYTE_MSS 0x400000
10153 val = rd32(hw, I40E_REG_MSS);
10154 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10155 val &= ~I40E_REG_MSS_MIN_MASK;
10156 val |= I40E_64BYTE_MSS;
10157 wr32(hw, I40E_REG_MSS, val);
10158 }
10159
10160 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
10161 msleep(75);
10162 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10163 if (ret)
10164 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10165 i40e_stat_str(&pf->hw, ret),
10166 i40e_aq_str(&pf->hw,
10167 pf->hw.aq.asq_last_status));
10168 }
10169
10170 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10171 ret = i40e_setup_misc_vector(pf);
10172
10173
10174
10175
10176
10177
10178
10179 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10180 pf->main_vsi_seid);
10181
10182
10183 i40e_pf_unquiesce_all_vsi(pf);
10184
10185
10186 if (!lock_acquired)
10187 rtnl_unlock();
10188
10189
10190 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
10191 if (ret)
10192 dev_warn(&pf->pdev->dev,
10193 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
10194 pf->cur_promisc ? "on" : "off",
10195 i40e_stat_str(&pf->hw, ret),
10196 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10197
10198 i40e_reset_all_vfs(pf, true);
10199
10200
10201 i40e_send_version(pf);
10202
10203
10204 goto end_core_reset;
10205
10206 end_unlock:
10207 if (!lock_acquired)
10208 rtnl_unlock();
10209 end_core_reset:
10210 clear_bit(__I40E_RESET_FAILED, pf->state);
10211 clear_recovery:
10212 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
10213 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
10214 }
10215
10216
10217
10218
10219
10220
10221
10222
10223 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
10224 bool lock_acquired)
10225 {
10226 int ret;
10227
10228
10229
10230
10231 ret = i40e_reset(pf);
10232 if (!ret)
10233 i40e_rebuild(pf, reinit, lock_acquired);
10234 }
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
10246 {
10247 i40e_prep_for_reset(pf, lock_acquired);
10248 i40e_reset_and_rebuild(pf, false, lock_acquired);
10249 }
10250
10251
10252
10253
10254
10255
10256
10257 static void i40e_handle_mdd_event(struct i40e_pf *pf)
10258 {
10259 struct i40e_hw *hw = &pf->hw;
10260 bool mdd_detected = false;
10261 struct i40e_vf *vf;
10262 u32 reg;
10263 int i;
10264
10265 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
10266 return;
10267
10268
10269 reg = rd32(hw, I40E_GL_MDET_TX);
10270 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
10271 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
10272 I40E_GL_MDET_TX_PF_NUM_SHIFT;
10273 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
10274 I40E_GL_MDET_TX_VF_NUM_SHIFT;
10275 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
10276 I40E_GL_MDET_TX_EVENT_SHIFT;
10277 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
10278 I40E_GL_MDET_TX_QUEUE_SHIFT) -
10279 pf->hw.func_caps.base_queue;
10280 if (netif_msg_tx_err(pf))
10281 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
10282 event, queue, pf_num, vf_num);
10283 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
10284 mdd_detected = true;
10285 }
10286 reg = rd32(hw, I40E_GL_MDET_RX);
10287 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
10288 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
10289 I40E_GL_MDET_RX_FUNCTION_SHIFT;
10290 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
10291 I40E_GL_MDET_RX_EVENT_SHIFT;
10292 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
10293 I40E_GL_MDET_RX_QUEUE_SHIFT) -
10294 pf->hw.func_caps.base_queue;
10295 if (netif_msg_rx_err(pf))
10296 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
10297 event, queue, func);
10298 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
10299 mdd_detected = true;
10300 }
10301
10302 if (mdd_detected) {
10303 reg = rd32(hw, I40E_PF_MDET_TX);
10304 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
10305 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
10306 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
10307 }
10308 reg = rd32(hw, I40E_PF_MDET_RX);
10309 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
10310 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
10311 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
10312 }
10313 }
10314
10315
10316 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
10317 vf = &(pf->vf[i]);
10318 reg = rd32(hw, I40E_VP_MDET_TX(i));
10319 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
10320 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
10321 vf->num_mdd_events++;
10322 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
10323 i);
10324 dev_info(&pf->pdev->dev,
10325 "Use PF Control I/F to re-enable the VF\n");
10326 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10327 }
10328
10329 reg = rd32(hw, I40E_VP_MDET_RX(i));
10330 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
10331 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
10332 vf->num_mdd_events++;
10333 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
10334 i);
10335 dev_info(&pf->pdev->dev,
10336 "Use PF Control I/F to re-enable the VF\n");
10337 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
10338 }
10339 }
10340
10341
10342 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
10343 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
10344 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
10345 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
10346 i40e_flush(hw);
10347 }
10348
10349 static const char *i40e_tunnel_name(u8 type)
10350 {
10351 switch (type) {
10352 case UDP_TUNNEL_TYPE_VXLAN:
10353 return "vxlan";
10354 case UDP_TUNNEL_TYPE_GENEVE:
10355 return "geneve";
10356 default:
10357 return "unknown";
10358 }
10359 }
10360
10361
10362
10363
10364
10365 static void i40e_sync_udp_filters(struct i40e_pf *pf)
10366 {
10367 int i;
10368
10369
10370 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10371 if (pf->udp_ports[i].port)
10372 pf->pending_udp_bitmap |= BIT_ULL(i);
10373 }
10374
10375 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
10376 }
10377
10378
10379
10380
10381
10382 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
10383 {
10384 struct i40e_hw *hw = &pf->hw;
10385 u8 filter_index, type;
10386 u16 port;
10387 int i;
10388
10389 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
10390 return;
10391
10392
10393 rtnl_lock();
10394
10395 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
10396 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
10397 struct i40e_udp_port_config *udp_port;
10398 i40e_status ret = 0;
10399
10400 udp_port = &pf->udp_ports[i];
10401 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10402
10403 port = READ_ONCE(udp_port->port);
10404 type = READ_ONCE(udp_port->type);
10405 filter_index = READ_ONCE(udp_port->filter_index);
10406
10407
10408 rtnl_unlock();
10409
10410 if (port)
10411 ret = i40e_aq_add_udp_tunnel(hw, port,
10412 type,
10413 &filter_index,
10414 NULL);
10415 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
10416 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
10417 NULL);
10418
10419
10420 rtnl_lock();
10421
10422 if (ret) {
10423 dev_info(&pf->pdev->dev,
10424 "%s %s port %d, index %d failed, err %s aq_err %s\n",
10425 i40e_tunnel_name(type),
10426 port ? "add" : "delete",
10427 port,
10428 filter_index,
10429 i40e_stat_str(&pf->hw, ret),
10430 i40e_aq_str(&pf->hw,
10431 pf->hw.aq.asq_last_status));
10432 if (port) {
10433
10434
10435
10436 udp_port->port = 0;
10437 pf->pending_udp_bitmap &= ~BIT_ULL(i);
10438 }
10439 } else if (port) {
10440
10441 udp_port->filter_index = filter_index;
10442 }
10443 }
10444 }
10445
10446 rtnl_unlock();
10447 }
10448
10449
10450
10451
10452
10453 static void i40e_service_task(struct work_struct *work)
10454 {
10455 struct i40e_pf *pf = container_of(work,
10456 struct i40e_pf,
10457 service_task);
10458 unsigned long start_time = jiffies;
10459
10460
10461 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
10462 test_bit(__I40E_SUSPENDED, pf->state))
10463 return;
10464
10465 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10466 return;
10467
10468 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10469 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10470 i40e_sync_filters_subtask(pf);
10471 i40e_reset_subtask(pf);
10472 i40e_handle_mdd_event(pf);
10473 i40e_vc_process_vflr_event(pf);
10474 i40e_watchdog_subtask(pf);
10475 i40e_fdir_reinit_subtask(pf);
10476 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10477
10478 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
10479 true);
10480 } else {
10481 i40e_client_subtask(pf);
10482 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10483 pf->state))
10484 i40e_notify_client_of_l2_param_changes(
10485 pf->vsi[pf->lan_vsi]);
10486 }
10487 i40e_sync_filters_subtask(pf);
10488 i40e_sync_udp_filters_subtask(pf);
10489 } else {
10490 i40e_reset_subtask(pf);
10491 }
10492
10493 i40e_clean_adminq_subtask(pf);
10494
10495
10496 smp_mb__before_atomic();
10497 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10498
10499
10500
10501
10502
10503 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10504 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10505 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10506 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10507 i40e_service_event_schedule(pf);
10508 }
10509
10510
10511
10512
10513
10514 static void i40e_service_timer(struct timer_list *t)
10515 {
10516 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10517
10518 mod_timer(&pf->service_timer,
10519 round_jiffies(jiffies + pf->service_timer_period));
10520 i40e_service_event_schedule(pf);
10521 }
10522
10523
10524
10525
10526
10527 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10528 {
10529 struct i40e_pf *pf = vsi->back;
10530
10531 switch (vsi->type) {
10532 case I40E_VSI_MAIN:
10533 vsi->alloc_queue_pairs = pf->num_lan_qps;
10534 if (!vsi->num_tx_desc)
10535 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10536 I40E_REQ_DESCRIPTOR_MULTIPLE);
10537 if (!vsi->num_rx_desc)
10538 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10539 I40E_REQ_DESCRIPTOR_MULTIPLE);
10540 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10541 vsi->num_q_vectors = pf->num_lan_msix;
10542 else
10543 vsi->num_q_vectors = 1;
10544
10545 break;
10546
10547 case I40E_VSI_FDIR:
10548 vsi->alloc_queue_pairs = 1;
10549 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10550 I40E_REQ_DESCRIPTOR_MULTIPLE);
10551 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
10552 I40E_REQ_DESCRIPTOR_MULTIPLE);
10553 vsi->num_q_vectors = pf->num_fdsb_msix;
10554 break;
10555
10556 case I40E_VSI_VMDQ2:
10557 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10558 if (!vsi->num_tx_desc)
10559 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10560 I40E_REQ_DESCRIPTOR_MULTIPLE);
10561 if (!vsi->num_rx_desc)
10562 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10563 I40E_REQ_DESCRIPTOR_MULTIPLE);
10564 vsi->num_q_vectors = pf->num_vmdq_msix;
10565 break;
10566
10567 case I40E_VSI_SRIOV:
10568 vsi->alloc_queue_pairs = pf->num_vf_qps;
10569 if (!vsi->num_tx_desc)
10570 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10571 I40E_REQ_DESCRIPTOR_MULTIPLE);
10572 if (!vsi->num_rx_desc)
10573 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10574 I40E_REQ_DESCRIPTOR_MULTIPLE);
10575 break;
10576
10577 default:
10578 WARN_ON(1);
10579 return -ENODATA;
10580 }
10581
10582 return 0;
10583 }
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10594 {
10595 struct i40e_ring **next_rings;
10596 int size;
10597 int ret = 0;
10598
10599
10600 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10601 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10602 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10603 if (!vsi->tx_rings)
10604 return -ENOMEM;
10605 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10606 if (i40e_enabled_xdp_vsi(vsi)) {
10607 vsi->xdp_rings = next_rings;
10608 next_rings += vsi->alloc_queue_pairs;
10609 }
10610 vsi->rx_rings = next_rings;
10611
10612 if (alloc_qvectors) {
10613
10614 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10615 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10616 if (!vsi->q_vectors) {
10617 ret = -ENOMEM;
10618 goto err_vectors;
10619 }
10620 }
10621 return ret;
10622
10623 err_vectors:
10624 kfree(vsi->tx_rings);
10625 return ret;
10626 }
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10637 {
10638 int ret = -ENODEV;
10639 struct i40e_vsi *vsi;
10640 int vsi_idx;
10641 int i;
10642
10643
10644 mutex_lock(&pf->switch_mutex);
10645
10646
10647
10648
10649
10650
10651
10652 i = pf->next_vsi;
10653 while (i < pf->num_alloc_vsi && pf->vsi[i])
10654 i++;
10655 if (i >= pf->num_alloc_vsi) {
10656 i = 0;
10657 while (i < pf->next_vsi && pf->vsi[i])
10658 i++;
10659 }
10660
10661 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10662 vsi_idx = i;
10663 } else {
10664 ret = -ENODEV;
10665 goto unlock_pf;
10666 }
10667 pf->next_vsi = ++i;
10668
10669 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10670 if (!vsi) {
10671 ret = -ENOMEM;
10672 goto unlock_pf;
10673 }
10674 vsi->type = type;
10675 vsi->back = pf;
10676 set_bit(__I40E_VSI_DOWN, vsi->state);
10677 vsi->flags = 0;
10678 vsi->idx = vsi_idx;
10679 vsi->int_rate_limit = 0;
10680 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10681 pf->rss_table_size : 64;
10682 vsi->netdev_registered = false;
10683 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10684 hash_init(vsi->mac_filter_hash);
10685 vsi->irqs_ready = false;
10686
10687 if (type == I40E_VSI_MAIN) {
10688 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10689 if (!vsi->af_xdp_zc_qps)
10690 goto err_rings;
10691 }
10692
10693 ret = i40e_set_num_rings_in_vsi(vsi);
10694 if (ret)
10695 goto err_rings;
10696
10697 ret = i40e_vsi_alloc_arrays(vsi, true);
10698 if (ret)
10699 goto err_rings;
10700
10701
10702 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10703
10704
10705 spin_lock_init(&vsi->mac_filter_hash_lock);
10706 pf->vsi[vsi_idx] = vsi;
10707 ret = vsi_idx;
10708 goto unlock_pf;
10709
10710 err_rings:
10711 bitmap_free(vsi->af_xdp_zc_qps);
10712 pf->next_vsi = i - 1;
10713 kfree(vsi);
10714 unlock_pf:
10715 mutex_unlock(&pf->switch_mutex);
10716 return ret;
10717 }
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10728 {
10729
10730 if (free_qvectors) {
10731 kfree(vsi->q_vectors);
10732 vsi->q_vectors = NULL;
10733 }
10734 kfree(vsi->tx_rings);
10735 vsi->tx_rings = NULL;
10736 vsi->rx_rings = NULL;
10737 vsi->xdp_rings = NULL;
10738 }
10739
10740
10741
10742
10743
10744
10745 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10746 {
10747 if (!vsi)
10748 return;
10749
10750 kfree(vsi->rss_hkey_user);
10751 vsi->rss_hkey_user = NULL;
10752
10753 kfree(vsi->rss_lut_user);
10754 vsi->rss_lut_user = NULL;
10755 }
10756
10757
10758
10759
10760
10761 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10762 {
10763 struct i40e_pf *pf;
10764
10765 if (!vsi)
10766 return 0;
10767
10768 if (!vsi->back)
10769 goto free_vsi;
10770 pf = vsi->back;
10771
10772 mutex_lock(&pf->switch_mutex);
10773 if (!pf->vsi[vsi->idx]) {
10774 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10775 vsi->idx, vsi->idx, vsi->type);
10776 goto unlock_vsi;
10777 }
10778
10779 if (pf->vsi[vsi->idx] != vsi) {
10780 dev_err(&pf->pdev->dev,
10781 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10782 pf->vsi[vsi->idx]->idx,
10783 pf->vsi[vsi->idx]->type,
10784 vsi->idx, vsi->type);
10785 goto unlock_vsi;
10786 }
10787
10788
10789 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10790 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10791
10792 bitmap_free(vsi->af_xdp_zc_qps);
10793 i40e_vsi_free_arrays(vsi, true);
10794 i40e_clear_rss_config_user(vsi);
10795
10796 pf->vsi[vsi->idx] = NULL;
10797 if (vsi->idx < pf->next_vsi)
10798 pf->next_vsi = vsi->idx;
10799
10800 unlock_vsi:
10801 mutex_unlock(&pf->switch_mutex);
10802 free_vsi:
10803 kfree(vsi);
10804
10805 return 0;
10806 }
10807
10808
10809
10810
10811
10812 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10813 {
10814 int i;
10815
10816 if (vsi->tx_rings && vsi->tx_rings[0]) {
10817 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10818 kfree_rcu(vsi->tx_rings[i], rcu);
10819 vsi->tx_rings[i] = NULL;
10820 vsi->rx_rings[i] = NULL;
10821 if (vsi->xdp_rings)
10822 vsi->xdp_rings[i] = NULL;
10823 }
10824 }
10825 }
10826
10827
10828
10829
10830
10831 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10832 {
10833 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10834 struct i40e_pf *pf = vsi->back;
10835 struct i40e_ring *ring;
10836
10837
10838 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10839
10840 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10841 if (!ring)
10842 goto err_out;
10843
10844 ring->queue_index = i;
10845 ring->reg_idx = vsi->base_queue + i;
10846 ring->ring_active = false;
10847 ring->vsi = vsi;
10848 ring->netdev = vsi->netdev;
10849 ring->dev = &pf->pdev->dev;
10850 ring->count = vsi->num_tx_desc;
10851 ring->size = 0;
10852 ring->dcb_tc = 0;
10853 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10854 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10855 ring->itr_setting = pf->tx_itr_default;
10856 vsi->tx_rings[i] = ring++;
10857
10858 if (!i40e_enabled_xdp_vsi(vsi))
10859 goto setup_rx;
10860
10861 ring->queue_index = vsi->alloc_queue_pairs + i;
10862 ring->reg_idx = vsi->base_queue + ring->queue_index;
10863 ring->ring_active = false;
10864 ring->vsi = vsi;
10865 ring->netdev = NULL;
10866 ring->dev = &pf->pdev->dev;
10867 ring->count = vsi->num_tx_desc;
10868 ring->size = 0;
10869 ring->dcb_tc = 0;
10870 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10871 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10872 set_ring_xdp(ring);
10873 ring->itr_setting = pf->tx_itr_default;
10874 vsi->xdp_rings[i] = ring++;
10875
10876 setup_rx:
10877 ring->queue_index = i;
10878 ring->reg_idx = vsi->base_queue + i;
10879 ring->ring_active = false;
10880 ring->vsi = vsi;
10881 ring->netdev = vsi->netdev;
10882 ring->dev = &pf->pdev->dev;
10883 ring->count = vsi->num_rx_desc;
10884 ring->size = 0;
10885 ring->dcb_tc = 0;
10886 ring->itr_setting = pf->rx_itr_default;
10887 vsi->rx_rings[i] = ring;
10888 }
10889
10890 return 0;
10891
10892 err_out:
10893 i40e_vsi_clear_rings(vsi);
10894 return -ENOMEM;
10895 }
10896
10897
10898
10899
10900
10901
10902
10903
10904 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10905 {
10906 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10907 I40E_MIN_MSIX, vectors);
10908 if (vectors < 0) {
10909 dev_info(&pf->pdev->dev,
10910 "MSI-X vector reservation failed: %d\n", vectors);
10911 vectors = 0;
10912 }
10913
10914 return vectors;
10915 }
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925 static int i40e_init_msix(struct i40e_pf *pf)
10926 {
10927 struct i40e_hw *hw = &pf->hw;
10928 int cpus, extra_vectors;
10929 int vectors_left;
10930 int v_budget, i;
10931 int v_actual;
10932 int iwarp_requested = 0;
10933
10934 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10935 return -ENODEV;
10936
10937
10938
10939
10940
10941
10942
10943
10944
10945
10946
10947
10948
10949
10950
10951
10952 vectors_left = hw->func_caps.num_msix_vectors;
10953 v_budget = 0;
10954
10955
10956 if (vectors_left) {
10957 v_budget++;
10958 vectors_left--;
10959 }
10960
10961
10962
10963
10964
10965
10966
10967
10968 cpus = num_online_cpus();
10969 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10970 vectors_left -= pf->num_lan_msix;
10971
10972
10973 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10974 if (vectors_left) {
10975 pf->num_fdsb_msix = 1;
10976 v_budget++;
10977 vectors_left--;
10978 } else {
10979 pf->num_fdsb_msix = 0;
10980 }
10981 }
10982
10983
10984 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10985 iwarp_requested = pf->num_iwarp_msix;
10986
10987 if (!vectors_left)
10988 pf->num_iwarp_msix = 0;
10989 else if (vectors_left < pf->num_iwarp_msix)
10990 pf->num_iwarp_msix = 1;
10991 v_budget += pf->num_iwarp_msix;
10992 vectors_left -= pf->num_iwarp_msix;
10993 }
10994
10995
10996 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10997 if (!vectors_left) {
10998 pf->num_vmdq_msix = 0;
10999 pf->num_vmdq_qps = 0;
11000 } else {
11001 int vmdq_vecs_wanted =
11002 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11003 int vmdq_vecs =
11004 min_t(int, vectors_left, vmdq_vecs_wanted);
11005
11006
11007
11008
11009
11010
11011
11012 if (vectors_left < vmdq_vecs_wanted) {
11013 pf->num_vmdq_qps = 1;
11014 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11015 vmdq_vecs = min_t(int,
11016 vectors_left,
11017 vmdq_vecs_wanted);
11018 }
11019 pf->num_vmdq_msix = pf->num_vmdq_qps;
11020
11021 v_budget += vmdq_vecs;
11022 vectors_left -= vmdq_vecs;
11023 }
11024 }
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11036 pf->num_lan_msix += extra_vectors;
11037 vectors_left -= extra_vectors;
11038
11039 WARN(vectors_left < 0,
11040 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11041
11042 v_budget += pf->num_lan_msix;
11043 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
11044 GFP_KERNEL);
11045 if (!pf->msix_entries)
11046 return -ENOMEM;
11047
11048 for (i = 0; i < v_budget; i++)
11049 pf->msix_entries[i].entry = i;
11050 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
11051
11052 if (v_actual < I40E_MIN_MSIX) {
11053 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11054 kfree(pf->msix_entries);
11055 pf->msix_entries = NULL;
11056 pci_disable_msix(pf->pdev);
11057 return -ENODEV;
11058
11059 } else if (v_actual == I40E_MIN_MSIX) {
11060
11061 pf->num_vmdq_vsis = 0;
11062 pf->num_vmdq_qps = 0;
11063 pf->num_lan_qps = 1;
11064 pf->num_lan_msix = 1;
11065
11066 } else if (v_actual != v_budget) {
11067
11068
11069
11070
11071
11072 int vec;
11073
11074 dev_info(&pf->pdev->dev,
11075 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11076 v_actual, v_budget);
11077
11078 vec = v_actual - 1;
11079
11080
11081 pf->num_vmdq_msix = 1;
11082 pf->num_vmdq_vsis = 1;
11083 pf->num_vmdq_qps = 1;
11084
11085
11086 switch (vec) {
11087 case 2:
11088 pf->num_lan_msix = 1;
11089 break;
11090 case 3:
11091 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11092 pf->num_lan_msix = 1;
11093 pf->num_iwarp_msix = 1;
11094 } else {
11095 pf->num_lan_msix = 2;
11096 }
11097 break;
11098 default:
11099 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11100 pf->num_iwarp_msix = min_t(int, (vec / 3),
11101 iwarp_requested);
11102 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11103 I40E_DEFAULT_NUM_VMDQ_VSI);
11104 } else {
11105 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11106 I40E_DEFAULT_NUM_VMDQ_VSI);
11107 }
11108 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11109 pf->num_fdsb_msix = 1;
11110 vec--;
11111 }
11112 pf->num_lan_msix = min_t(int,
11113 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11114 pf->num_lan_msix);
11115 pf->num_lan_qps = pf->num_lan_msix;
11116 break;
11117 }
11118 }
11119
11120 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11121 (pf->num_fdsb_msix == 0)) {
11122 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11123 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11124 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11125 }
11126 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11127 (pf->num_vmdq_msix == 0)) {
11128 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11129 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11130 }
11131
11132 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11133 (pf->num_iwarp_msix == 0)) {
11134 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11135 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11136 }
11137 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11138 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11139 pf->num_lan_msix,
11140 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11141 pf->num_fdsb_msix,
11142 pf->num_iwarp_msix);
11143
11144 return v_actual;
11145 }
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
11156 {
11157 struct i40e_q_vector *q_vector;
11158
11159
11160 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
11161 if (!q_vector)
11162 return -ENOMEM;
11163
11164 q_vector->vsi = vsi;
11165 q_vector->v_idx = v_idx;
11166 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
11167
11168 if (vsi->netdev)
11169 netif_napi_add(vsi->netdev, &q_vector->napi,
11170 i40e_napi_poll, NAPI_POLL_WEIGHT);
11171
11172
11173 vsi->q_vectors[v_idx] = q_vector;
11174
11175 return 0;
11176 }
11177
11178
11179
11180
11181
11182
11183
11184
11185 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
11186 {
11187 struct i40e_pf *pf = vsi->back;
11188 int err, v_idx, num_q_vectors, current_cpu;
11189
11190
11191 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11192 num_q_vectors = vsi->num_q_vectors;
11193 else if (vsi == pf->vsi[pf->lan_vsi])
11194 num_q_vectors = 1;
11195 else
11196 return -EINVAL;
11197
11198 current_cpu = cpumask_first(cpu_online_mask);
11199
11200 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
11201 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
11202 if (err)
11203 goto err_out;
11204 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
11205 if (unlikely(current_cpu >= nr_cpu_ids))
11206 current_cpu = cpumask_first(cpu_online_mask);
11207 }
11208
11209 return 0;
11210
11211 err_out:
11212 while (v_idx--)
11213 i40e_free_q_vector(vsi, v_idx);
11214
11215 return err;
11216 }
11217
11218
11219
11220
11221
11222 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
11223 {
11224 int vectors = 0;
11225 ssize_t size;
11226
11227 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11228 vectors = i40e_init_msix(pf);
11229 if (vectors < 0) {
11230 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
11231 I40E_FLAG_IWARP_ENABLED |
11232 I40E_FLAG_RSS_ENABLED |
11233 I40E_FLAG_DCB_CAPABLE |
11234 I40E_FLAG_DCB_ENABLED |
11235 I40E_FLAG_SRIOV_ENABLED |
11236 I40E_FLAG_FD_SB_ENABLED |
11237 I40E_FLAG_FD_ATR_ENABLED |
11238 I40E_FLAG_VMDQ_ENABLED);
11239 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11240
11241
11242 i40e_determine_queue_usage(pf);
11243 }
11244 }
11245
11246 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11247 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
11248 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
11249 vectors = pci_enable_msi(pf->pdev);
11250 if (vectors < 0) {
11251 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
11252 vectors);
11253 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
11254 }
11255 vectors = 1;
11256 }
11257
11258 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
11259 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
11260
11261
11262 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
11263 pf->irq_pile = kzalloc(size, GFP_KERNEL);
11264 if (!pf->irq_pile)
11265 return -ENOMEM;
11266
11267 pf->irq_pile->num_entries = vectors;
11268 pf->irq_pile->search_hint = 0;
11269
11270
11271 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
11272
11273 return 0;
11274 }
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
11285 {
11286 int err, i;
11287
11288
11289
11290
11291
11292 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
11293
11294 err = i40e_init_interrupt_scheme(pf);
11295 if (err)
11296 return err;
11297
11298
11299
11300
11301 for (i = 0; i < pf->num_alloc_vsi; i++) {
11302 if (pf->vsi[i]) {
11303 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
11304 if (err)
11305 goto err_unwind;
11306 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
11307 }
11308 }
11309
11310 err = i40e_setup_misc_vector(pf);
11311 if (err)
11312 goto err_unwind;
11313
11314 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
11315 i40e_client_update_msix_info(pf);
11316
11317 return 0;
11318
11319 err_unwind:
11320 while (i--) {
11321 if (pf->vsi[i])
11322 i40e_vsi_free_q_vectors(pf->vsi[i]);
11323 }
11324
11325 return err;
11326 }
11327
11328
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
11339 {
11340 int err;
11341
11342 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11343 err = i40e_setup_misc_vector(pf);
11344
11345 if (err) {
11346 dev_info(&pf->pdev->dev,
11347 "MSI-X misc vector request failed, error %d\n",
11348 err);
11349 return err;
11350 }
11351 } else {
11352 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
11353
11354 err = request_irq(pf->pdev->irq, i40e_intr, flags,
11355 pf->int_name, pf);
11356
11357 if (err) {
11358 dev_info(&pf->pdev->dev,
11359 "MSI/legacy misc vector request failed, error %d\n",
11360 err);
11361 return err;
11362 }
11363 i40e_enable_misc_int_causes(pf);
11364 i40e_irq_dynamic_enable_icr0(pf);
11365 }
11366
11367 return 0;
11368 }
11369
11370
11371
11372
11373
11374
11375
11376
11377
11378 static int i40e_setup_misc_vector(struct i40e_pf *pf)
11379 {
11380 struct i40e_hw *hw = &pf->hw;
11381 int err = 0;
11382
11383
11384 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
11385 err = request_irq(pf->msix_entries[0].vector,
11386 i40e_intr, 0, pf->int_name, pf);
11387 if (err) {
11388 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
11389 dev_info(&pf->pdev->dev,
11390 "request_irq for %s failed: %d\n",
11391 pf->int_name, err);
11392 return -EFAULT;
11393 }
11394 }
11395
11396 i40e_enable_misc_int_causes(pf);
11397
11398
11399 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
11400 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
11401
11402 i40e_flush(hw);
11403
11404 i40e_irq_dynamic_enable_icr0(pf);
11405
11406 return err;
11407 }
11408
11409
11410
11411
11412
11413
11414
11415
11416
11417
11418 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
11419 u8 *lut, u16 lut_size)
11420 {
11421 struct i40e_pf *pf = vsi->back;
11422 struct i40e_hw *hw = &pf->hw;
11423 int ret = 0;
11424
11425 if (seed) {
11426 ret = i40e_aq_get_rss_key(hw, vsi->id,
11427 (struct i40e_aqc_get_set_rss_key_data *)seed);
11428 if (ret) {
11429 dev_info(&pf->pdev->dev,
11430 "Cannot get RSS key, err %s aq_err %s\n",
11431 i40e_stat_str(&pf->hw, ret),
11432 i40e_aq_str(&pf->hw,
11433 pf->hw.aq.asq_last_status));
11434 return ret;
11435 }
11436 }
11437
11438 if (lut) {
11439 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
11440
11441 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
11442 if (ret) {
11443 dev_info(&pf->pdev->dev,
11444 "Cannot get RSS lut, err %s aq_err %s\n",
11445 i40e_stat_str(&pf->hw, ret),
11446 i40e_aq_str(&pf->hw,
11447 pf->hw.aq.asq_last_status));
11448 return ret;
11449 }
11450 }
11451
11452 return ret;
11453 }
11454
11455
11456
11457
11458
11459
11460
11461
11462
11463
11464 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
11465 const u8 *lut, u16 lut_size)
11466 {
11467 struct i40e_pf *pf = vsi->back;
11468 struct i40e_hw *hw = &pf->hw;
11469 u16 vf_id = vsi->vf_id;
11470 u8 i;
11471
11472
11473 if (seed) {
11474 u32 *seed_dw = (u32 *)seed;
11475
11476 if (vsi->type == I40E_VSI_MAIN) {
11477 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11478 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
11479 } else if (vsi->type == I40E_VSI_SRIOV) {
11480 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
11481 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
11482 } else {
11483 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
11484 }
11485 }
11486
11487 if (lut) {
11488 u32 *lut_dw = (u32 *)lut;
11489
11490 if (vsi->type == I40E_VSI_MAIN) {
11491 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11492 return -EINVAL;
11493 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11494 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
11495 } else if (vsi->type == I40E_VSI_SRIOV) {
11496 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
11497 return -EINVAL;
11498 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11499 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
11500 } else {
11501 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11502 }
11503 }
11504 i40e_flush(hw);
11505
11506 return 0;
11507 }
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
11519 u8 *lut, u16 lut_size)
11520 {
11521 struct i40e_pf *pf = vsi->back;
11522 struct i40e_hw *hw = &pf->hw;
11523 u16 i;
11524
11525 if (seed) {
11526 u32 *seed_dw = (u32 *)seed;
11527
11528 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11529 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11530 }
11531 if (lut) {
11532 u32 *lut_dw = (u32 *)lut;
11533
11534 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11535 return -EINVAL;
11536 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11537 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11538 }
11539
11540 return 0;
11541 }
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11553 {
11554 struct i40e_pf *pf = vsi->back;
11555
11556 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11557 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11558 else
11559 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11560 }
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11572 {
11573 struct i40e_pf *pf = vsi->back;
11574
11575 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11576 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11577 else
11578 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11579 }
11580
11581
11582
11583
11584
11585
11586
11587
11588 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11589 u16 rss_table_size, u16 rss_size)
11590 {
11591 u16 i;
11592
11593 for (i = 0; i < rss_table_size; i++)
11594 lut[i] = i % rss_size;
11595 }
11596
11597
11598
11599
11600
11601 static int i40e_pf_config_rss(struct i40e_pf *pf)
11602 {
11603 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11604 u8 seed[I40E_HKEY_ARRAY_SIZE];
11605 u8 *lut;
11606 struct i40e_hw *hw = &pf->hw;
11607 u32 reg_val;
11608 u64 hena;
11609 int ret;
11610
11611
11612 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11613 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11614 hena |= i40e_pf_get_default_rss_hena(pf);
11615
11616 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11617 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11618
11619
11620 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11621 reg_val = (pf->rss_table_size == 512) ?
11622 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11623 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11624 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11625
11626
11627 if (!vsi->rss_size) {
11628 u16 qcount;
11629
11630
11631
11632
11633
11634 qcount = vsi->num_queue_pairs /
11635 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11636 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11637 }
11638 if (!vsi->rss_size)
11639 return -EINVAL;
11640
11641 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11642 if (!lut)
11643 return -ENOMEM;
11644
11645
11646 if (vsi->rss_lut_user)
11647 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11648 else
11649 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11650
11651
11652
11653
11654 if (vsi->rss_hkey_user)
11655 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11656 else
11657 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11658 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11659 kfree(lut);
11660
11661 return ret;
11662 }
11663
11664
11665
11666
11667
11668
11669
11670
11671
11672
11673 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11674 {
11675 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11676 int new_rss_size;
11677
11678 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11679 return 0;
11680
11681 queue_count = min_t(int, queue_count, num_online_cpus());
11682 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11683
11684 if (queue_count != vsi->num_queue_pairs) {
11685 u16 qcount;
11686
11687 vsi->req_queue_pairs = queue_count;
11688 i40e_prep_for_reset(pf, true);
11689
11690 pf->alloc_rss_size = new_rss_size;
11691
11692 i40e_reset_and_rebuild(pf, true, true);
11693
11694
11695
11696
11697 if (queue_count < vsi->rss_size) {
11698 i40e_clear_rss_config_user(vsi);
11699 dev_dbg(&pf->pdev->dev,
11700 "discard user configured hash keys and lut\n");
11701 }
11702
11703
11704 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11705 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11706
11707 i40e_pf_config_rss(pf);
11708 }
11709 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11710 vsi->req_queue_pairs, pf->rss_size_max);
11711 return pf->alloc_rss_size;
11712 }
11713
11714
11715
11716
11717
11718 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11719 {
11720 i40e_status status;
11721 bool min_valid, max_valid;
11722 u32 max_bw, min_bw;
11723
11724 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11725 &min_valid, &max_valid);
11726
11727 if (!status) {
11728 if (min_valid)
11729 pf->min_bw = min_bw;
11730 if (max_valid)
11731 pf->max_bw = max_bw;
11732 }
11733
11734 return status;
11735 }
11736
11737
11738
11739
11740
11741 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11742 {
11743 struct i40e_aqc_configure_partition_bw_data bw_data;
11744 i40e_status status;
11745
11746
11747 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11748 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11749 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11750
11751
11752 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11753
11754 return status;
11755 }
11756
11757
11758
11759
11760
11761 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11762 {
11763
11764 enum i40e_admin_queue_err last_aq_status;
11765 i40e_status ret;
11766 u16 nvm_word;
11767
11768 if (pf->hw.partition_id != 1) {
11769 dev_info(&pf->pdev->dev,
11770 "Commit BW only works on partition 1! This is partition %d",
11771 pf->hw.partition_id);
11772 ret = I40E_NOT_SUPPORTED;
11773 goto bw_commit_out;
11774 }
11775
11776
11777 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11778 last_aq_status = pf->hw.aq.asq_last_status;
11779 if (ret) {
11780 dev_info(&pf->pdev->dev,
11781 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11782 i40e_stat_str(&pf->hw, ret),
11783 i40e_aq_str(&pf->hw, last_aq_status));
11784 goto bw_commit_out;
11785 }
11786
11787
11788 ret = i40e_aq_read_nvm(&pf->hw,
11789 I40E_SR_NVM_CONTROL_WORD,
11790 0x10, sizeof(nvm_word), &nvm_word,
11791 false, NULL);
11792
11793
11794
11795 last_aq_status = pf->hw.aq.asq_last_status;
11796 i40e_release_nvm(&pf->hw);
11797 if (ret) {
11798 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11799 i40e_stat_str(&pf->hw, ret),
11800 i40e_aq_str(&pf->hw, last_aq_status));
11801 goto bw_commit_out;
11802 }
11803
11804
11805 msleep(50);
11806
11807
11808 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11809 last_aq_status = pf->hw.aq.asq_last_status;
11810 if (ret) {
11811 dev_info(&pf->pdev->dev,
11812 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11813 i40e_stat_str(&pf->hw, ret),
11814 i40e_aq_str(&pf->hw, last_aq_status));
11815 goto bw_commit_out;
11816 }
11817
11818
11819
11820
11821 ret = i40e_aq_update_nvm(&pf->hw,
11822 I40E_SR_NVM_CONTROL_WORD,
11823 0x10, sizeof(nvm_word),
11824 &nvm_word, true, 0, NULL);
11825
11826
11827
11828 last_aq_status = pf->hw.aq.asq_last_status;
11829 i40e_release_nvm(&pf->hw);
11830 if (ret)
11831 dev_info(&pf->pdev->dev,
11832 "BW settings NOT SAVED, err %s aq_err %s\n",
11833 i40e_stat_str(&pf->hw, ret),
11834 i40e_aq_str(&pf->hw, last_aq_status));
11835 bw_commit_out:
11836
11837 return ret;
11838 }
11839
11840
11841
11842
11843
11844
11845
11846
11847
11848 static int i40e_sw_init(struct i40e_pf *pf)
11849 {
11850 int err = 0;
11851 int size;
11852
11853
11854 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11855 I40E_FLAG_MSI_ENABLED |
11856 I40E_FLAG_MSIX_ENABLED;
11857
11858
11859 pf->rx_itr_default = I40E_ITR_RX_DEF;
11860 pf->tx_itr_default = I40E_ITR_TX_DEF;
11861
11862
11863
11864
11865 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11866 pf->alloc_rss_size = 1;
11867 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11868 pf->rss_size_max = min_t(int, pf->rss_size_max,
11869 pf->hw.func_caps.num_tx_qp);
11870 if (pf->hw.func_caps.rss) {
11871 pf->flags |= I40E_FLAG_RSS_ENABLED;
11872 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11873 num_online_cpus());
11874 }
11875
11876
11877 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11878 pf->flags |= I40E_FLAG_MFP_ENABLED;
11879 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11880 if (i40e_get_partition_bw_setting(pf)) {
11881 dev_warn(&pf->pdev->dev,
11882 "Could not get partition bw settings\n");
11883 } else {
11884 dev_info(&pf->pdev->dev,
11885 "Partition BW Min = %8.8x, Max = %8.8x\n",
11886 pf->min_bw, pf->max_bw);
11887
11888
11889 i40e_set_partition_bw_setting(pf);
11890 }
11891 }
11892
11893 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11894 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11895 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11896 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11897 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11898 pf->hw.num_partitions > 1)
11899 dev_info(&pf->pdev->dev,
11900 "Flow Director Sideband mode Disabled in MFP mode\n");
11901 else
11902 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11903 pf->fdir_pf_filter_count =
11904 pf->hw.func_caps.fd_filters_guaranteed;
11905 pf->hw.fdir_shared_filter_count =
11906 pf->hw.func_caps.fd_filters_best_effort;
11907 }
11908
11909 if (pf->hw.mac.type == I40E_MAC_X722) {
11910 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11911 I40E_HW_128_QP_RSS_CAPABLE |
11912 I40E_HW_ATR_EVICT_CAPABLE |
11913 I40E_HW_WB_ON_ITR_CAPABLE |
11914 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11915 I40E_HW_NO_PCI_LINK_CHECK |
11916 I40E_HW_USE_SET_LLDP_MIB |
11917 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11918 I40E_HW_PTP_L4_CAPABLE |
11919 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11920 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11921
11922 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11923 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11924 I40E_FDEVICT_PCTYPE_DEFAULT) {
11925 dev_warn(&pf->pdev->dev,
11926 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11927 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11928 }
11929 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11930 ((pf->hw.aq.api_maj_ver == 1) &&
11931 (pf->hw.aq.api_min_ver > 4))) {
11932
11933 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11934 }
11935
11936
11937 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11938 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11939
11940 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11941 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11942 (pf->hw.aq.fw_maj_ver < 4))) {
11943 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11944
11945 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11946 }
11947
11948
11949 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11950 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11951 (pf->hw.aq.fw_maj_ver < 4)))
11952 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11953
11954
11955 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11956 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11957 (pf->hw.aq.fw_maj_ver >= 5)))
11958 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11959
11960
11961 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11962 pf->hw.aq.fw_maj_ver >= 6)
11963 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11964
11965 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11966 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11967 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11968 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11969 }
11970
11971 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11972 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11973
11974 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11975 }
11976
11977
11978
11979
11980
11981 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11982 pf->hw.func_caps.npar_enable &&
11983 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11984 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11985
11986 #ifdef CONFIG_PCI_IOV
11987 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11988 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11989 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11990 pf->num_req_vfs = min_t(int,
11991 pf->hw.func_caps.num_vfs,
11992 I40E_MAX_VF_COUNT);
11993 }
11994 #endif
11995 pf->eeprom_version = 0xDEAD;
11996 pf->lan_veb = I40E_NO_VEB;
11997 pf->lan_vsi = I40E_NO_VSI;
11998
11999
12000 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12001
12002
12003 size = sizeof(struct i40e_lump_tracking)
12004 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12005 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12006 if (!pf->qp_pile) {
12007 err = -ENOMEM;
12008 goto sw_init_done;
12009 }
12010 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12011 pf->qp_pile->search_hint = 0;
12012
12013 pf->tx_timeout_recovery_level = 1;
12014
12015 mutex_init(&pf->switch_mutex);
12016
12017 sw_init_done:
12018 return err;
12019 }
12020
12021
12022
12023
12024
12025
12026
12027
12028 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12029 {
12030 bool need_reset = false;
12031
12032
12033
12034
12035 if (features & NETIF_F_NTUPLE) {
12036
12037 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12038 need_reset = true;
12039
12040
12041
12042 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12043 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12044 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12045 }
12046 } else {
12047
12048 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12049 need_reset = true;
12050 i40e_fdir_filter_exit(pf);
12051 }
12052 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12053 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
12054 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12055
12056
12057 pf->fd_add_err = 0;
12058 pf->fd_atr_cnt = 0;
12059
12060 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
12061 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12062 (I40E_DEBUG_FD & pf->hw.debug_mask))
12063 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12064 }
12065 return need_reset;
12066 }
12067
12068
12069
12070
12071
12072 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12073 {
12074 struct i40e_pf *pf = vsi->back;
12075 struct i40e_hw *hw = &pf->hw;
12076 u16 vf_id = vsi->vf_id;
12077 u8 i;
12078
12079 if (vsi->type == I40E_VSI_MAIN) {
12080 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12081 wr32(hw, I40E_PFQF_HLUT(i), 0);
12082 } else if (vsi->type == I40E_VSI_SRIOV) {
12083 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12084 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
12085 } else {
12086 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12087 }
12088 }
12089
12090
12091
12092
12093
12094
12095
12096 static int i40e_set_features(struct net_device *netdev,
12097 netdev_features_t features)
12098 {
12099 struct i40e_netdev_priv *np = netdev_priv(netdev);
12100 struct i40e_vsi *vsi = np->vsi;
12101 struct i40e_pf *pf = vsi->back;
12102 bool need_reset;
12103
12104 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
12105 i40e_pf_config_rss(pf);
12106 else if (!(features & NETIF_F_RXHASH) &&
12107 netdev->features & NETIF_F_RXHASH)
12108 i40e_clear_rss_lut(vsi);
12109
12110 if (features & NETIF_F_HW_VLAN_CTAG_RX)
12111 i40e_vlan_stripping_enable(vsi);
12112 else
12113 i40e_vlan_stripping_disable(vsi);
12114
12115 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
12116 dev_err(&pf->pdev->dev,
12117 "Offloaded tc filters active, can't turn hw_tc_offload off");
12118 return -EINVAL;
12119 }
12120
12121 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
12122 i40e_del_all_macvlans(vsi);
12123
12124 need_reset = i40e_set_ntuple(pf, features);
12125
12126 if (need_reset)
12127 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12128
12129 return 0;
12130 }
12131
12132
12133
12134
12135
12136
12137
12138
12139 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
12140 {
12141 u8 i;
12142
12143 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
12144
12145
12146
12147 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
12148 continue;
12149 if (pf->udp_ports[i].port == port)
12150 return i;
12151 }
12152
12153 return i;
12154 }
12155
12156
12157
12158
12159
12160
12161 static void i40e_udp_tunnel_add(struct net_device *netdev,
12162 struct udp_tunnel_info *ti)
12163 {
12164 struct i40e_netdev_priv *np = netdev_priv(netdev);
12165 struct i40e_vsi *vsi = np->vsi;
12166 struct i40e_pf *pf = vsi->back;
12167 u16 port = ntohs(ti->port);
12168 u8 next_idx;
12169 u8 idx;
12170
12171 idx = i40e_get_udp_port_idx(pf, port);
12172
12173
12174 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12175 netdev_info(netdev, "port %d already offloaded\n", port);
12176 return;
12177 }
12178
12179
12180 next_idx = i40e_get_udp_port_idx(pf, 0);
12181
12182 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
12183 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
12184 port);
12185 return;
12186 }
12187
12188 switch (ti->type) {
12189 case UDP_TUNNEL_TYPE_VXLAN:
12190 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
12191 break;
12192 case UDP_TUNNEL_TYPE_GENEVE:
12193 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
12194 return;
12195 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
12196 break;
12197 default:
12198 return;
12199 }
12200
12201
12202 pf->udp_ports[next_idx].port = port;
12203 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
12204 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
12205 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12206 }
12207
12208
12209
12210
12211
12212
12213 static void i40e_udp_tunnel_del(struct net_device *netdev,
12214 struct udp_tunnel_info *ti)
12215 {
12216 struct i40e_netdev_priv *np = netdev_priv(netdev);
12217 struct i40e_vsi *vsi = np->vsi;
12218 struct i40e_pf *pf = vsi->back;
12219 u16 port = ntohs(ti->port);
12220 u8 idx;
12221
12222 idx = i40e_get_udp_port_idx(pf, port);
12223
12224
12225 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
12226 goto not_found;
12227
12228 switch (ti->type) {
12229 case UDP_TUNNEL_TYPE_VXLAN:
12230 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
12231 goto not_found;
12232 break;
12233 case UDP_TUNNEL_TYPE_GENEVE:
12234 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
12235 goto not_found;
12236 break;
12237 default:
12238 goto not_found;
12239 }
12240
12241
12242
12243
12244 pf->udp_ports[idx].port = 0;
12245
12246
12247
12248
12249
12250 pf->pending_udp_bitmap ^= BIT_ULL(idx);
12251 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
12252
12253 return;
12254 not_found:
12255 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
12256 port);
12257 }
12258
12259 static int i40e_get_phys_port_id(struct net_device *netdev,
12260 struct netdev_phys_item_id *ppid)
12261 {
12262 struct i40e_netdev_priv *np = netdev_priv(netdev);
12263 struct i40e_pf *pf = np->vsi->back;
12264 struct i40e_hw *hw = &pf->hw;
12265
12266 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
12267 return -EOPNOTSUPP;
12268
12269 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
12270 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
12271
12272 return 0;
12273 }
12274
12275
12276
12277
12278
12279
12280
12281
12282
12283
12284 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
12285 struct net_device *dev,
12286 const unsigned char *addr, u16 vid,
12287 u16 flags,
12288 struct netlink_ext_ack *extack)
12289 {
12290 struct i40e_netdev_priv *np = netdev_priv(dev);
12291 struct i40e_pf *pf = np->vsi->back;
12292 int err = 0;
12293
12294 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
12295 return -EOPNOTSUPP;
12296
12297 if (vid) {
12298 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
12299 return -EINVAL;
12300 }
12301
12302
12303
12304
12305 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
12306 netdev_info(dev, "FDB only supports static addresses\n");
12307 return -EINVAL;
12308 }
12309
12310 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
12311 err = dev_uc_add_excl(dev, addr);
12312 else if (is_multicast_ether_addr(addr))
12313 err = dev_mc_add_excl(dev, addr);
12314 else
12315 err = -EINVAL;
12316
12317
12318 if (err == -EEXIST && !(flags & NLM_F_EXCL))
12319 err = 0;
12320
12321 return err;
12322 }
12323
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340 static int i40e_ndo_bridge_setlink(struct net_device *dev,
12341 struct nlmsghdr *nlh,
12342 u16 flags,
12343 struct netlink_ext_ack *extack)
12344 {
12345 struct i40e_netdev_priv *np = netdev_priv(dev);
12346 struct i40e_vsi *vsi = np->vsi;
12347 struct i40e_pf *pf = vsi->back;
12348 struct i40e_veb *veb = NULL;
12349 struct nlattr *attr, *br_spec;
12350 int i, rem;
12351
12352
12353 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12354 return -EOPNOTSUPP;
12355
12356
12357 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12358 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12359 veb = pf->veb[i];
12360 }
12361
12362 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12363
12364 nla_for_each_nested(attr, br_spec, rem) {
12365 __u16 mode;
12366
12367 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12368 continue;
12369
12370 mode = nla_get_u16(attr);
12371 if ((mode != BRIDGE_MODE_VEPA) &&
12372 (mode != BRIDGE_MODE_VEB))
12373 return -EINVAL;
12374
12375
12376 if (!veb) {
12377 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12378 vsi->tc_config.enabled_tc);
12379 if (veb) {
12380 veb->bridge_mode = mode;
12381 i40e_config_bridge_mode(veb);
12382 } else {
12383
12384 return -ENOENT;
12385 }
12386 break;
12387 } else if (mode != veb->bridge_mode) {
12388
12389 veb->bridge_mode = mode;
12390
12391 if (mode == BRIDGE_MODE_VEB)
12392 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
12393 else
12394 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12395 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
12396 break;
12397 }
12398 }
12399
12400 return 0;
12401 }
12402
12403
12404
12405
12406
12407
12408
12409
12410
12411
12412
12413
12414
12415 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12416 struct net_device *dev,
12417 u32 __always_unused filter_mask,
12418 int nlflags)
12419 {
12420 struct i40e_netdev_priv *np = netdev_priv(dev);
12421 struct i40e_vsi *vsi = np->vsi;
12422 struct i40e_pf *pf = vsi->back;
12423 struct i40e_veb *veb = NULL;
12424 int i;
12425
12426
12427 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
12428 return -EOPNOTSUPP;
12429
12430
12431 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12432 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12433 veb = pf->veb[i];
12434 }
12435
12436 if (!veb)
12437 return 0;
12438
12439 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
12440 0, 0, nlflags, filter_mask, NULL);
12441 }
12442
12443
12444
12445
12446
12447
12448
12449 static netdev_features_t i40e_features_check(struct sk_buff *skb,
12450 struct net_device *dev,
12451 netdev_features_t features)
12452 {
12453 size_t len;
12454
12455
12456
12457
12458
12459 if (skb->ip_summed != CHECKSUM_PARTIAL)
12460 return features;
12461
12462
12463
12464
12465 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
12466 features &= ~NETIF_F_GSO_MASK;
12467
12468
12469 len = skb_network_header(skb) - skb->data;
12470 if (len & ~(63 * 2))
12471 goto out_err;
12472
12473
12474 len = skb_transport_header(skb) - skb_network_header(skb);
12475 if (len & ~(127 * 4))
12476 goto out_err;
12477
12478 if (skb->encapsulation) {
12479
12480 len = skb_inner_network_header(skb) - skb_transport_header(skb);
12481 if (len & ~(127 * 2))
12482 goto out_err;
12483
12484
12485 len = skb_inner_transport_header(skb) -
12486 skb_inner_network_header(skb);
12487 if (len & ~(127 * 4))
12488 goto out_err;
12489 }
12490
12491
12492
12493
12494
12495
12496 return features;
12497 out_err:
12498 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
12499 }
12500
12501
12502
12503
12504
12505
12506 static int i40e_xdp_setup(struct i40e_vsi *vsi,
12507 struct bpf_prog *prog)
12508 {
12509 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
12510 struct i40e_pf *pf = vsi->back;
12511 struct bpf_prog *old_prog;
12512 bool need_reset;
12513 int i;
12514
12515
12516 if (frame_size > vsi->rx_buf_len)
12517 return -EINVAL;
12518
12519 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12520 return 0;
12521
12522
12523 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12524
12525 if (need_reset)
12526 i40e_prep_for_reset(pf, true);
12527
12528 old_prog = xchg(&vsi->xdp_prog, prog);
12529
12530 if (need_reset) {
12531 if (!prog)
12532
12533 synchronize_rcu();
12534 i40e_reset_and_rebuild(pf, true, true);
12535 }
12536
12537 for (i = 0; i < vsi->num_queue_pairs; i++)
12538 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12539
12540 if (old_prog)
12541 bpf_prog_put(old_prog);
12542
12543
12544
12545
12546 if (need_reset && prog)
12547 for (i = 0; i < vsi->num_queue_pairs; i++)
12548 if (vsi->xdp_rings[i]->xsk_umem)
12549 (void)i40e_xsk_wakeup(vsi->netdev, i,
12550 XDP_WAKEUP_RX);
12551
12552 return 0;
12553 }
12554
12555
12556
12557
12558
12559
12560
12561 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
12562 {
12563 struct i40e_pf *pf = vsi->back;
12564 int timeout = 50;
12565
12566 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
12567 timeout--;
12568 if (!timeout)
12569 return -EBUSY;
12570 usleep_range(1000, 2000);
12571 }
12572
12573 return 0;
12574 }
12575
12576
12577
12578
12579
12580 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
12581 {
12582 struct i40e_pf *pf = vsi->back;
12583
12584 clear_bit(__I40E_CONFIG_BUSY, pf->state);
12585 }
12586
12587
12588
12589
12590
12591
12592 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
12593 {
12594 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
12595 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
12596 memset(&vsi->tx_rings[queue_pair]->stats, 0,
12597 sizeof(vsi->tx_rings[queue_pair]->stats));
12598 if (i40e_enabled_xdp_vsi(vsi)) {
12599 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
12600 sizeof(vsi->xdp_rings[queue_pair]->stats));
12601 }
12602 }
12603
12604
12605
12606
12607
12608
12609 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
12610 {
12611 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
12612 if (i40e_enabled_xdp_vsi(vsi)) {
12613
12614
12615
12616 synchronize_rcu();
12617 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
12618 }
12619 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
12620 }
12621
12622
12623
12624
12625
12626
12627
12628 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
12629 bool enable)
12630 {
12631 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12632 struct i40e_q_vector *q_vector = rxr->q_vector;
12633
12634 if (!vsi->netdev)
12635 return;
12636
12637
12638 if (q_vector->rx.ring || q_vector->tx.ring) {
12639 if (enable)
12640 napi_enable(&q_vector->napi);
12641 else
12642 napi_disable(&q_vector->napi);
12643 }
12644 }
12645
12646
12647
12648
12649
12650
12651
12652
12653
12654 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
12655 bool enable)
12656 {
12657 struct i40e_pf *pf = vsi->back;
12658 int pf_q, ret = 0;
12659
12660 pf_q = vsi->base_queue + queue_pair;
12661 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12662 false , enable);
12663 if (ret) {
12664 dev_info(&pf->pdev->dev,
12665 "VSI seid %d Tx ring %d %sable timeout\n",
12666 vsi->seid, pf_q, (enable ? "en" : "dis"));
12667 return ret;
12668 }
12669
12670 i40e_control_rx_q(pf, pf_q, enable);
12671 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12672 if (ret) {
12673 dev_info(&pf->pdev->dev,
12674 "VSI seid %d Rx ring %d %sable timeout\n",
12675 vsi->seid, pf_q, (enable ? "en" : "dis"));
12676 return ret;
12677 }
12678
12679
12680
12681
12682 if (!enable)
12683 mdelay(50);
12684
12685 if (!i40e_enabled_xdp_vsi(vsi))
12686 return ret;
12687
12688 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12689 pf_q + vsi->alloc_queue_pairs,
12690 true , enable);
12691 if (ret) {
12692 dev_info(&pf->pdev->dev,
12693 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12694 vsi->seid, pf_q, (enable ? "en" : "dis"));
12695 }
12696
12697 return ret;
12698 }
12699
12700
12701
12702
12703
12704
12705 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12706 {
12707 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12708 struct i40e_pf *pf = vsi->back;
12709 struct i40e_hw *hw = &pf->hw;
12710
12711
12712 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12713 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12714 else
12715 i40e_irq_dynamic_enable_icr0(pf);
12716
12717 i40e_flush(hw);
12718 }
12719
12720
12721
12722
12723
12724
12725 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12726 {
12727 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12728 struct i40e_pf *pf = vsi->back;
12729 struct i40e_hw *hw = &pf->hw;
12730
12731
12732
12733
12734
12735
12736
12737 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12738 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12739
12740 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12741 i40e_flush(hw);
12742 synchronize_irq(pf->msix_entries[intpf].vector);
12743 } else {
12744
12745 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12746 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12747 i40e_flush(hw);
12748 synchronize_irq(pf->pdev->irq);
12749 }
12750 }
12751
12752
12753
12754
12755
12756
12757
12758
12759 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12760 {
12761 int err;
12762
12763 err = i40e_enter_busy_conf(vsi);
12764 if (err)
12765 return err;
12766
12767 i40e_queue_pair_disable_irq(vsi, queue_pair);
12768 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false );
12769 i40e_queue_pair_toggle_napi(vsi, queue_pair, false );
12770 i40e_queue_pair_clean_rings(vsi, queue_pair);
12771 i40e_queue_pair_reset_stats(vsi, queue_pair);
12772
12773 return err;
12774 }
12775
12776
12777
12778
12779
12780
12781
12782
12783 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12784 {
12785 int err;
12786
12787 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12788 if (err)
12789 return err;
12790
12791 if (i40e_enabled_xdp_vsi(vsi)) {
12792 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12793 if (err)
12794 return err;
12795 }
12796
12797 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12798 if (err)
12799 return err;
12800
12801 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true );
12802 i40e_queue_pair_toggle_napi(vsi, queue_pair, true );
12803 i40e_queue_pair_enable_irq(vsi, queue_pair);
12804
12805 i40e_exit_busy_conf(vsi);
12806
12807 return err;
12808 }
12809
12810
12811
12812
12813
12814
12815 static int i40e_xdp(struct net_device *dev,
12816 struct netdev_bpf *xdp)
12817 {
12818 struct i40e_netdev_priv *np = netdev_priv(dev);
12819 struct i40e_vsi *vsi = np->vsi;
12820
12821 if (vsi->type != I40E_VSI_MAIN)
12822 return -EINVAL;
12823
12824 switch (xdp->command) {
12825 case XDP_SETUP_PROG:
12826 return i40e_xdp_setup(vsi, xdp->prog);
12827 case XDP_QUERY_PROG:
12828 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12829 return 0;
12830 case XDP_SETUP_XSK_UMEM:
12831 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12832 xdp->xsk.queue_id);
12833 default:
12834 return -EINVAL;
12835 }
12836 }
12837
12838 static const struct net_device_ops i40e_netdev_ops = {
12839 .ndo_open = i40e_open,
12840 .ndo_stop = i40e_close,
12841 .ndo_start_xmit = i40e_lan_xmit_frame,
12842 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12843 .ndo_set_rx_mode = i40e_set_rx_mode,
12844 .ndo_validate_addr = eth_validate_addr,
12845 .ndo_set_mac_address = i40e_set_mac,
12846 .ndo_change_mtu = i40e_change_mtu,
12847 .ndo_do_ioctl = i40e_ioctl,
12848 .ndo_tx_timeout = i40e_tx_timeout,
12849 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12850 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12851 #ifdef CONFIG_NET_POLL_CONTROLLER
12852 .ndo_poll_controller = i40e_netpoll,
12853 #endif
12854 .ndo_setup_tc = __i40e_setup_tc,
12855 .ndo_set_features = i40e_set_features,
12856 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12857 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12858 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12859 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12860 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12861 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12862 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12863 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12864 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12865 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12866 .ndo_fdb_add = i40e_ndo_fdb_add,
12867 .ndo_features_check = i40e_features_check,
12868 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12869 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12870 .ndo_bpf = i40e_xdp,
12871 .ndo_xdp_xmit = i40e_xdp_xmit,
12872 .ndo_xsk_wakeup = i40e_xsk_wakeup,
12873 .ndo_dfwd_add_station = i40e_fwd_add,
12874 .ndo_dfwd_del_station = i40e_fwd_del,
12875 };
12876
12877
12878
12879
12880
12881
12882
12883 static int i40e_config_netdev(struct i40e_vsi *vsi)
12884 {
12885 struct i40e_pf *pf = vsi->back;
12886 struct i40e_hw *hw = &pf->hw;
12887 struct i40e_netdev_priv *np;
12888 struct net_device *netdev;
12889 u8 broadcast[ETH_ALEN];
12890 u8 mac_addr[ETH_ALEN];
12891 int etherdev_size;
12892 netdev_features_t hw_enc_features;
12893 netdev_features_t hw_features;
12894
12895 etherdev_size = sizeof(struct i40e_netdev_priv);
12896 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12897 if (!netdev)
12898 return -ENOMEM;
12899
12900 vsi->netdev = netdev;
12901 np = netdev_priv(netdev);
12902 np->vsi = vsi;
12903
12904 hw_enc_features = NETIF_F_SG |
12905 NETIF_F_IP_CSUM |
12906 NETIF_F_IPV6_CSUM |
12907 NETIF_F_HIGHDMA |
12908 NETIF_F_SOFT_FEATURES |
12909 NETIF_F_TSO |
12910 NETIF_F_TSO_ECN |
12911 NETIF_F_TSO6 |
12912 NETIF_F_GSO_GRE |
12913 NETIF_F_GSO_GRE_CSUM |
12914 NETIF_F_GSO_PARTIAL |
12915 NETIF_F_GSO_IPXIP4 |
12916 NETIF_F_GSO_IPXIP6 |
12917 NETIF_F_GSO_UDP_TUNNEL |
12918 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12919 NETIF_F_SCTP_CRC |
12920 NETIF_F_RXHASH |
12921 NETIF_F_RXCSUM |
12922 0;
12923
12924 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12925 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12926
12927 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12928
12929 netdev->hw_enc_features |= hw_enc_features;
12930
12931
12932 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12933
12934
12935 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
12936
12937 hw_features = hw_enc_features |
12938 NETIF_F_HW_VLAN_CTAG_TX |
12939 NETIF_F_HW_VLAN_CTAG_RX;
12940
12941 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12942 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12943
12944 netdev->hw_features |= hw_features;
12945
12946 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12947 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12948
12949 if (vsi->type == I40E_VSI_MAIN) {
12950 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12951 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962 i40e_rm_default_mac_filter(vsi, mac_addr);
12963 spin_lock_bh(&vsi->mac_filter_hash_lock);
12964 i40e_add_mac_filter(vsi, mac_addr);
12965 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12966 } else {
12967
12968
12969
12970
12971
12972 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12973 IFNAMSIZ - 4,
12974 pf->vsi[pf->lan_vsi]->netdev->name);
12975 eth_random_addr(mac_addr);
12976
12977 spin_lock_bh(&vsi->mac_filter_hash_lock);
12978 i40e_add_mac_filter(vsi, mac_addr);
12979 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12980 }
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995 eth_broadcast_addr(broadcast);
12996 spin_lock_bh(&vsi->mac_filter_hash_lock);
12997 i40e_add_mac_filter(vsi, broadcast);
12998 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12999
13000 ether_addr_copy(netdev->dev_addr, mac_addr);
13001 ether_addr_copy(netdev->perm_addr, mac_addr);
13002
13003
13004 netdev->neigh_priv_len = sizeof(u32) * 4;
13005
13006 netdev->priv_flags |= IFF_UNICAST_FLT;
13007 netdev->priv_flags |= IFF_SUPP_NOFCS;
13008
13009 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
13010
13011 netdev->netdev_ops = &i40e_netdev_ops;
13012 netdev->watchdog_timeo = 5 * HZ;
13013 i40e_set_ethtool_ops(netdev);
13014
13015
13016 netdev->min_mtu = ETH_MIN_MTU;
13017 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13018
13019 return 0;
13020 }
13021
13022
13023
13024
13025
13026
13027
13028 static void i40e_vsi_delete(struct i40e_vsi *vsi)
13029 {
13030
13031 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13032 return;
13033
13034 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
13035 }
13036
13037
13038
13039
13040
13041
13042
13043 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13044 {
13045 struct i40e_veb *veb;
13046 struct i40e_pf *pf = vsi->back;
13047
13048
13049 if (vsi->veb_idx >= I40E_MAX_VEB)
13050 return 1;
13051
13052 veb = pf->veb[vsi->veb_idx];
13053 if (!veb) {
13054 dev_info(&pf->pdev->dev,
13055 "There is no veb associated with the bridge\n");
13056 return -ENOENT;
13057 }
13058
13059
13060 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13061 return 0;
13062 } else {
13063
13064 return 1;
13065 }
13066
13067
13068 return 0;
13069 }
13070
13071
13072
13073
13074
13075
13076
13077
13078 static int i40e_add_vsi(struct i40e_vsi *vsi)
13079 {
13080 int ret = -ENODEV;
13081 struct i40e_pf *pf = vsi->back;
13082 struct i40e_hw *hw = &pf->hw;
13083 struct i40e_vsi_context ctxt;
13084 struct i40e_mac_filter *f;
13085 struct hlist_node *h;
13086 int bkt;
13087
13088 u8 enabled_tc = 0x1;
13089 int f_count = 0;
13090
13091 memset(&ctxt, 0, sizeof(ctxt));
13092 switch (vsi->type) {
13093 case I40E_VSI_MAIN:
13094
13095
13096
13097
13098
13099 ctxt.seid = pf->main_vsi_seid;
13100 ctxt.pf_num = pf->hw.pf_id;
13101 ctxt.vf_num = 0;
13102 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
13103 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13104 if (ret) {
13105 dev_info(&pf->pdev->dev,
13106 "couldn't get PF vsi config, err %s aq_err %s\n",
13107 i40e_stat_str(&pf->hw, ret),
13108 i40e_aq_str(&pf->hw,
13109 pf->hw.aq.asq_last_status));
13110 return -ENOENT;
13111 }
13112 vsi->info = ctxt.info;
13113 vsi->info.valid_sections = 0;
13114
13115 vsi->seid = ctxt.seid;
13116 vsi->id = ctxt.vsi_number;
13117
13118 enabled_tc = i40e_pf_get_tc_map(pf);
13119
13120
13121
13122
13123
13124 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13125 memset(&ctxt, 0, sizeof(ctxt));
13126 ctxt.seid = pf->main_vsi_seid;
13127 ctxt.pf_num = pf->hw.pf_id;
13128 ctxt.vf_num = 0;
13129 ctxt.info.valid_sections |=
13130 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13131 ctxt.info.switch_id =
13132 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
13133 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13134 if (ret) {
13135 dev_info(&pf->pdev->dev,
13136 "update vsi failed, err %s aq_err %s\n",
13137 i40e_stat_str(&pf->hw, ret),
13138 i40e_aq_str(&pf->hw,
13139 pf->hw.aq.asq_last_status));
13140 ret = -ENOENT;
13141 goto err;
13142 }
13143 }
13144
13145
13146 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
13147 !(pf->hw.func_caps.iscsi)) {
13148 memset(&ctxt, 0, sizeof(ctxt));
13149 ctxt.seid = pf->main_vsi_seid;
13150 ctxt.pf_num = pf->hw.pf_id;
13151 ctxt.vf_num = 0;
13152 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
13153 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
13154 if (ret) {
13155 dev_info(&pf->pdev->dev,
13156 "update vsi failed, err %s aq_err %s\n",
13157 i40e_stat_str(&pf->hw, ret),
13158 i40e_aq_str(&pf->hw,
13159 pf->hw.aq.asq_last_status));
13160 ret = -ENOENT;
13161 goto err;
13162 }
13163
13164 i40e_vsi_update_queue_map(vsi, &ctxt);
13165 vsi->info.valid_sections = 0;
13166 } else {
13167
13168
13169
13170
13171
13172
13173 ret = i40e_vsi_config_tc(vsi, enabled_tc);
13174 if (ret) {
13175
13176
13177
13178 dev_info(&pf->pdev->dev,
13179 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
13180 enabled_tc,
13181 i40e_stat_str(&pf->hw, ret),
13182 i40e_aq_str(&pf->hw,
13183 pf->hw.aq.asq_last_status));
13184 }
13185 }
13186 break;
13187
13188 case I40E_VSI_FDIR:
13189 ctxt.pf_num = hw->pf_id;
13190 ctxt.vf_num = 0;
13191 ctxt.uplink_seid = vsi->uplink_seid;
13192 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13193 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13194 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
13195 (i40e_is_vsi_uplink_mode_veb(vsi))) {
13196 ctxt.info.valid_sections |=
13197 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13198 ctxt.info.switch_id =
13199 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13200 }
13201 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13202 break;
13203
13204 case I40E_VSI_VMDQ2:
13205 ctxt.pf_num = hw->pf_id;
13206 ctxt.vf_num = 0;
13207 ctxt.uplink_seid = vsi->uplink_seid;
13208 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13209 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
13210
13211
13212
13213
13214 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13215 ctxt.info.valid_sections |=
13216 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13217 ctxt.info.switch_id =
13218 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13219 }
13220
13221
13222 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13223 break;
13224
13225 case I40E_VSI_SRIOV:
13226 ctxt.pf_num = hw->pf_id;
13227 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
13228 ctxt.uplink_seid = vsi->uplink_seid;
13229 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
13230 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
13231
13232
13233
13234
13235 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
13236 ctxt.info.valid_sections |=
13237 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13238 ctxt.info.switch_id =
13239 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
13240 }
13241
13242 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
13243 ctxt.info.valid_sections |=
13244 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
13245 ctxt.info.queueing_opt_flags |=
13246 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
13247 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
13248 }
13249
13250 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
13251 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
13252 if (pf->vf[vsi->vf_id].spoofchk) {
13253 ctxt.info.valid_sections |=
13254 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
13255 ctxt.info.sec_flags |=
13256 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
13257 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
13258 }
13259
13260 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
13261 break;
13262
13263 case I40E_VSI_IWARP:
13264
13265 break;
13266
13267 default:
13268 return -ENODEV;
13269 }
13270
13271 if (vsi->type != I40E_VSI_MAIN) {
13272 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
13273 if (ret) {
13274 dev_info(&vsi->back->pdev->dev,
13275 "add vsi failed, err %s aq_err %s\n",
13276 i40e_stat_str(&pf->hw, ret),
13277 i40e_aq_str(&pf->hw,
13278 pf->hw.aq.asq_last_status));
13279 ret = -ENOENT;
13280 goto err;
13281 }
13282 vsi->info = ctxt.info;
13283 vsi->info.valid_sections = 0;
13284 vsi->seid = ctxt.seid;
13285 vsi->id = ctxt.vsi_number;
13286 }
13287
13288 vsi->active_filters = 0;
13289 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
13290 spin_lock_bh(&vsi->mac_filter_hash_lock);
13291
13292 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
13293 f->state = I40E_FILTER_NEW;
13294 f_count++;
13295 }
13296 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13297
13298 if (f_count) {
13299 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
13300 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
13301 }
13302
13303
13304 ret = i40e_vsi_get_bw_info(vsi);
13305 if (ret) {
13306 dev_info(&pf->pdev->dev,
13307 "couldn't get vsi bw info, err %s aq_err %s\n",
13308 i40e_stat_str(&pf->hw, ret),
13309 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13310
13311 ret = 0;
13312 }
13313
13314 err:
13315 return ret;
13316 }
13317
13318
13319
13320
13321
13322
13323
13324 int i40e_vsi_release(struct i40e_vsi *vsi)
13325 {
13326 struct i40e_mac_filter *f;
13327 struct hlist_node *h;
13328 struct i40e_veb *veb = NULL;
13329 struct i40e_pf *pf;
13330 u16 uplink_seid;
13331 int i, n, bkt;
13332
13333 pf = vsi->back;
13334
13335
13336 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
13337 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
13338 vsi->seid, vsi->uplink_seid);
13339 return -ENODEV;
13340 }
13341 if (vsi == pf->vsi[pf->lan_vsi] &&
13342 !test_bit(__I40E_DOWN, pf->state)) {
13343 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
13344 return -ENODEV;
13345 }
13346
13347 uplink_seid = vsi->uplink_seid;
13348 if (vsi->type != I40E_VSI_SRIOV) {
13349 if (vsi->netdev_registered) {
13350 vsi->netdev_registered = false;
13351 if (vsi->netdev) {
13352
13353 unregister_netdev(vsi->netdev);
13354 }
13355 } else {
13356 i40e_vsi_close(vsi);
13357 }
13358 i40e_vsi_disable_irq(vsi);
13359 }
13360
13361 spin_lock_bh(&vsi->mac_filter_hash_lock);
13362
13363
13364 if (vsi->netdev) {
13365 __dev_uc_unsync(vsi->netdev, NULL);
13366 __dev_mc_unsync(vsi->netdev, NULL);
13367 }
13368
13369
13370 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
13371 __i40e_del_filter(vsi, f);
13372
13373 spin_unlock_bh(&vsi->mac_filter_hash_lock);
13374
13375 i40e_sync_vsi_filters(vsi);
13376
13377 i40e_vsi_delete(vsi);
13378 i40e_vsi_free_q_vectors(vsi);
13379 if (vsi->netdev) {
13380 free_netdev(vsi->netdev);
13381 vsi->netdev = NULL;
13382 }
13383 i40e_vsi_clear_rings(vsi);
13384 i40e_vsi_clear(vsi);
13385
13386
13387
13388
13389
13390
13391
13392
13393
13394 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
13395 if (pf->vsi[i] &&
13396 pf->vsi[i]->uplink_seid == uplink_seid &&
13397 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13398 n++;
13399 }
13400 }
13401 for (i = 0; i < I40E_MAX_VEB; i++) {
13402 if (!pf->veb[i])
13403 continue;
13404 if (pf->veb[i]->uplink_seid == uplink_seid)
13405 n++;
13406 if (pf->veb[i]->seid == uplink_seid)
13407 veb = pf->veb[i];
13408 }
13409 if (n == 0 && veb && veb->uplink_seid != 0)
13410 i40e_veb_release(veb);
13411
13412 return 0;
13413 }
13414
13415
13416
13417
13418
13419
13420
13421
13422
13423
13424
13425 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
13426 {
13427 int ret = -ENOENT;
13428 struct i40e_pf *pf = vsi->back;
13429
13430 if (vsi->q_vectors[0]) {
13431 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
13432 vsi->seid);
13433 return -EEXIST;
13434 }
13435
13436 if (vsi->base_vector) {
13437 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
13438 vsi->seid, vsi->base_vector);
13439 return -EEXIST;
13440 }
13441
13442 ret = i40e_vsi_alloc_q_vectors(vsi);
13443 if (ret) {
13444 dev_info(&pf->pdev->dev,
13445 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
13446 vsi->num_q_vectors, vsi->seid, ret);
13447 vsi->num_q_vectors = 0;
13448 goto vector_setup_out;
13449 }
13450
13451
13452
13453
13454 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
13455 return ret;
13456 if (vsi->num_q_vectors)
13457 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
13458 vsi->num_q_vectors, vsi->idx);
13459 if (vsi->base_vector < 0) {
13460 dev_info(&pf->pdev->dev,
13461 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
13462 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
13463 i40e_vsi_free_q_vectors(vsi);
13464 ret = -ENOENT;
13465 goto vector_setup_out;
13466 }
13467
13468 vector_setup_out:
13469 return ret;
13470 }
13471
13472
13473
13474
13475
13476
13477
13478
13479
13480
13481 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
13482 {
13483 u16 alloc_queue_pairs;
13484 struct i40e_pf *pf;
13485 u8 enabled_tc;
13486 int ret;
13487
13488 if (!vsi)
13489 return NULL;
13490
13491 pf = vsi->back;
13492
13493 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
13494 i40e_vsi_clear_rings(vsi);
13495
13496 i40e_vsi_free_arrays(vsi, false);
13497 i40e_set_num_rings_in_vsi(vsi);
13498 ret = i40e_vsi_alloc_arrays(vsi, false);
13499 if (ret)
13500 goto err_vsi;
13501
13502 alloc_queue_pairs = vsi->alloc_queue_pairs *
13503 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13504
13505 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13506 if (ret < 0) {
13507 dev_info(&pf->pdev->dev,
13508 "failed to get tracking for %d queues for VSI %d err %d\n",
13509 alloc_queue_pairs, vsi->seid, ret);
13510 goto err_vsi;
13511 }
13512 vsi->base_queue = ret;
13513
13514
13515
13516
13517 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13518 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13519 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13520 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13521 if (vsi->type == I40E_VSI_MAIN)
13522 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
13523
13524
13525 ret = i40e_alloc_rings(vsi);
13526 if (ret)
13527 goto err_rings;
13528
13529
13530 i40e_vsi_map_rings_to_vectors(vsi);
13531 return vsi;
13532
13533 err_rings:
13534 i40e_vsi_free_q_vectors(vsi);
13535 if (vsi->netdev_registered) {
13536 vsi->netdev_registered = false;
13537 unregister_netdev(vsi->netdev);
13538 free_netdev(vsi->netdev);
13539 vsi->netdev = NULL;
13540 }
13541 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13542 err_vsi:
13543 i40e_vsi_clear(vsi);
13544 return NULL;
13545 }
13546
13547
13548
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559
13560 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
13561 u16 uplink_seid, u32 param1)
13562 {
13563 struct i40e_vsi *vsi = NULL;
13564 struct i40e_veb *veb = NULL;
13565 u16 alloc_queue_pairs;
13566 int ret, i;
13567 int v_idx;
13568
13569
13570
13571
13572
13573
13574
13575
13576
13577
13578
13579
13580
13581
13582 for (i = 0; i < I40E_MAX_VEB; i++) {
13583 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
13584 veb = pf->veb[i];
13585 break;
13586 }
13587 }
13588
13589 if (!veb && uplink_seid != pf->mac_seid) {
13590
13591 for (i = 0; i < pf->num_alloc_vsi; i++) {
13592 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
13593 vsi = pf->vsi[i];
13594 break;
13595 }
13596 }
13597 if (!vsi) {
13598 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
13599 uplink_seid);
13600 return NULL;
13601 }
13602
13603 if (vsi->uplink_seid == pf->mac_seid)
13604 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
13605 vsi->tc_config.enabled_tc);
13606 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
13607 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
13608 vsi->tc_config.enabled_tc);
13609 if (veb) {
13610 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
13611 dev_info(&vsi->back->pdev->dev,
13612 "New VSI creation error, uplink seid of LAN VSI expected.\n");
13613 return NULL;
13614 }
13615
13616
13617
13618
13619 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
13620 veb->bridge_mode = BRIDGE_MODE_VEPA;
13621 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13622 }
13623 i40e_config_bridge_mode(veb);
13624 }
13625 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13626 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13627 veb = pf->veb[i];
13628 }
13629 if (!veb) {
13630 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
13631 return NULL;
13632 }
13633
13634 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13635 uplink_seid = veb->seid;
13636 }
13637
13638
13639 v_idx = i40e_vsi_mem_alloc(pf, type);
13640 if (v_idx < 0)
13641 goto err_alloc;
13642 vsi = pf->vsi[v_idx];
13643 if (!vsi)
13644 goto err_alloc;
13645 vsi->type = type;
13646 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
13647
13648 if (type == I40E_VSI_MAIN)
13649 pf->lan_vsi = v_idx;
13650 else if (type == I40E_VSI_SRIOV)
13651 vsi->vf_id = param1;
13652
13653 alloc_queue_pairs = vsi->alloc_queue_pairs *
13654 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
13655
13656 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
13657 if (ret < 0) {
13658 dev_info(&pf->pdev->dev,
13659 "failed to get tracking for %d queues for VSI %d err=%d\n",
13660 alloc_queue_pairs, vsi->seid, ret);
13661 goto err_vsi;
13662 }
13663 vsi->base_queue = ret;
13664
13665
13666 vsi->uplink_seid = uplink_seid;
13667 ret = i40e_add_vsi(vsi);
13668 if (ret)
13669 goto err_vsi;
13670
13671 switch (vsi->type) {
13672
13673 case I40E_VSI_MAIN:
13674 case I40E_VSI_VMDQ2:
13675 ret = i40e_config_netdev(vsi);
13676 if (ret)
13677 goto err_netdev;
13678 ret = register_netdev(vsi->netdev);
13679 if (ret)
13680 goto err_netdev;
13681 vsi->netdev_registered = true;
13682 netif_carrier_off(vsi->netdev);
13683 #ifdef CONFIG_I40E_DCB
13684
13685 i40e_dcbnl_setup(vsi);
13686 #endif
13687
13688
13689 case I40E_VSI_FDIR:
13690
13691 ret = i40e_vsi_setup_vectors(vsi);
13692 if (ret)
13693 goto err_msix;
13694
13695 ret = i40e_alloc_rings(vsi);
13696 if (ret)
13697 goto err_rings;
13698
13699
13700 i40e_vsi_map_rings_to_vectors(vsi);
13701
13702 i40e_vsi_reset_stats(vsi);
13703 break;
13704
13705 default:
13706
13707 break;
13708 }
13709
13710 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13711 (vsi->type == I40E_VSI_VMDQ2)) {
13712 ret = i40e_vsi_config_rss(vsi);
13713 }
13714 return vsi;
13715
13716 err_rings:
13717 i40e_vsi_free_q_vectors(vsi);
13718 err_msix:
13719 if (vsi->netdev_registered) {
13720 vsi->netdev_registered = false;
13721 unregister_netdev(vsi->netdev);
13722 free_netdev(vsi->netdev);
13723 vsi->netdev = NULL;
13724 }
13725 err_netdev:
13726 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13727 err_vsi:
13728 i40e_vsi_clear(vsi);
13729 err_alloc:
13730 return NULL;
13731 }
13732
13733
13734
13735
13736
13737
13738
13739 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13740 {
13741 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13742 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13743 struct i40e_pf *pf = veb->pf;
13744 struct i40e_hw *hw = &pf->hw;
13745 u32 tc_bw_max;
13746 int ret = 0;
13747 int i;
13748
13749 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13750 &bw_data, NULL);
13751 if (ret) {
13752 dev_info(&pf->pdev->dev,
13753 "query veb bw config failed, err %s aq_err %s\n",
13754 i40e_stat_str(&pf->hw, ret),
13755 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13756 goto out;
13757 }
13758
13759 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13760 &ets_data, NULL);
13761 if (ret) {
13762 dev_info(&pf->pdev->dev,
13763 "query veb bw ets config failed, err %s aq_err %s\n",
13764 i40e_stat_str(&pf->hw, ret),
13765 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13766 goto out;
13767 }
13768
13769 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13770 veb->bw_max_quanta = ets_data.tc_bw_max;
13771 veb->is_abs_credits = bw_data.absolute_credits_enable;
13772 veb->enabled_tc = ets_data.tc_valid_bits;
13773 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13774 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13775 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13776 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13777 veb->bw_tc_limit_credits[i] =
13778 le16_to_cpu(bw_data.tc_bw_limits[i]);
13779 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13780 }
13781
13782 out:
13783 return ret;
13784 }
13785
13786
13787
13788
13789
13790
13791
13792
13793 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13794 {
13795 int ret = -ENOENT;
13796 struct i40e_veb *veb;
13797 int i;
13798
13799
13800 mutex_lock(&pf->switch_mutex);
13801
13802
13803
13804
13805
13806
13807
13808 i = 0;
13809 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13810 i++;
13811 if (i >= I40E_MAX_VEB) {
13812 ret = -ENOMEM;
13813 goto err_alloc_veb;
13814 }
13815
13816 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13817 if (!veb) {
13818 ret = -ENOMEM;
13819 goto err_alloc_veb;
13820 }
13821 veb->pf = pf;
13822 veb->idx = i;
13823 veb->enabled_tc = 1;
13824
13825 pf->veb[i] = veb;
13826 ret = i;
13827 err_alloc_veb:
13828 mutex_unlock(&pf->switch_mutex);
13829 return ret;
13830 }
13831
13832
13833
13834
13835
13836
13837
13838
13839 static void i40e_switch_branch_release(struct i40e_veb *branch)
13840 {
13841 struct i40e_pf *pf = branch->pf;
13842 u16 branch_seid = branch->seid;
13843 u16 veb_idx = branch->idx;
13844 int i;
13845
13846
13847 for (i = 0; i < I40E_MAX_VEB; i++) {
13848 if (!pf->veb[i])
13849 continue;
13850 if (pf->veb[i]->uplink_seid == branch->seid)
13851 i40e_switch_branch_release(pf->veb[i]);
13852 }
13853
13854
13855
13856
13857
13858
13859 for (i = 0; i < pf->num_alloc_vsi; i++) {
13860 if (!pf->vsi[i])
13861 continue;
13862 if (pf->vsi[i]->uplink_seid == branch_seid &&
13863 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13864 i40e_vsi_release(pf->vsi[i]);
13865 }
13866 }
13867
13868
13869
13870
13871
13872
13873 if (pf->veb[veb_idx])
13874 i40e_veb_release(pf->veb[veb_idx]);
13875 }
13876
13877
13878
13879
13880
13881 static void i40e_veb_clear(struct i40e_veb *veb)
13882 {
13883 if (!veb)
13884 return;
13885
13886 if (veb->pf) {
13887 struct i40e_pf *pf = veb->pf;
13888
13889 mutex_lock(&pf->switch_mutex);
13890 if (pf->veb[veb->idx] == veb)
13891 pf->veb[veb->idx] = NULL;
13892 mutex_unlock(&pf->switch_mutex);
13893 }
13894
13895 kfree(veb);
13896 }
13897
13898
13899
13900
13901
13902 void i40e_veb_release(struct i40e_veb *veb)
13903 {
13904 struct i40e_vsi *vsi = NULL;
13905 struct i40e_pf *pf;
13906 int i, n = 0;
13907
13908 pf = veb->pf;
13909
13910
13911 for (i = 0; i < pf->num_alloc_vsi; i++) {
13912 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13913 n++;
13914 vsi = pf->vsi[i];
13915 }
13916 }
13917 if (n != 1) {
13918 dev_info(&pf->pdev->dev,
13919 "can't remove VEB %d with %d VSIs left\n",
13920 veb->seid, n);
13921 return;
13922 }
13923
13924
13925 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13926 if (veb->uplink_seid) {
13927 vsi->uplink_seid = veb->uplink_seid;
13928 if (veb->uplink_seid == pf->mac_seid)
13929 vsi->veb_idx = I40E_NO_VEB;
13930 else
13931 vsi->veb_idx = veb->veb_idx;
13932 } else {
13933
13934 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13935 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13936 }
13937
13938 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13939 i40e_veb_clear(veb);
13940 }
13941
13942
13943
13944
13945
13946
13947 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13948 {
13949 struct i40e_pf *pf = veb->pf;
13950 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13951 int ret;
13952
13953 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13954 veb->enabled_tc, false,
13955 &veb->seid, enable_stats, NULL);
13956
13957
13958 if (ret) {
13959 dev_info(&pf->pdev->dev,
13960 "couldn't add VEB, err %s aq_err %s\n",
13961 i40e_stat_str(&pf->hw, ret),
13962 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13963 return -EPERM;
13964 }
13965
13966
13967 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13968 &veb->stats_idx, NULL, NULL, NULL);
13969 if (ret) {
13970 dev_info(&pf->pdev->dev,
13971 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13972 i40e_stat_str(&pf->hw, ret),
13973 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13974 return -EPERM;
13975 }
13976 ret = i40e_veb_get_bw_info(veb);
13977 if (ret) {
13978 dev_info(&pf->pdev->dev,
13979 "couldn't get VEB bw info, err %s aq_err %s\n",
13980 i40e_stat_str(&pf->hw, ret),
13981 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13982 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13983 return -ENOENT;
13984 }
13985
13986 vsi->uplink_seid = veb->seid;
13987 vsi->veb_idx = veb->idx;
13988 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13989
13990 return 0;
13991 }
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002
14003
14004
14005
14006
14007
14008
14009 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14010 u16 uplink_seid, u16 vsi_seid,
14011 u8 enabled_tc)
14012 {
14013 struct i40e_veb *veb, *uplink_veb = NULL;
14014 int vsi_idx, veb_idx;
14015 int ret;
14016
14017
14018 if ((uplink_seid == 0 || vsi_seid == 0) &&
14019 (uplink_seid + vsi_seid != 0)) {
14020 dev_info(&pf->pdev->dev,
14021 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14022 uplink_seid, vsi_seid);
14023 return NULL;
14024 }
14025
14026
14027 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14028 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14029 break;
14030 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14031 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14032 vsi_seid);
14033 return NULL;
14034 }
14035
14036 if (uplink_seid && uplink_seid != pf->mac_seid) {
14037 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14038 if (pf->veb[veb_idx] &&
14039 pf->veb[veb_idx]->seid == uplink_seid) {
14040 uplink_veb = pf->veb[veb_idx];
14041 break;
14042 }
14043 }
14044 if (!uplink_veb) {
14045 dev_info(&pf->pdev->dev,
14046 "uplink seid %d not found\n", uplink_seid);
14047 return NULL;
14048 }
14049 }
14050
14051
14052 veb_idx = i40e_veb_mem_alloc(pf);
14053 if (veb_idx < 0)
14054 goto err_alloc;
14055 veb = pf->veb[veb_idx];
14056 veb->flags = flags;
14057 veb->uplink_seid = uplink_seid;
14058 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14059 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14060
14061
14062 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
14063 if (ret)
14064 goto err_veb;
14065 if (vsi_idx == pf->lan_vsi)
14066 pf->lan_veb = veb->idx;
14067
14068 return veb;
14069
14070 err_veb:
14071 i40e_veb_clear(veb);
14072 err_alloc:
14073 return NULL;
14074 }
14075
14076
14077
14078
14079
14080
14081
14082
14083
14084
14085 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14086 struct i40e_aqc_switch_config_element_resp *ele,
14087 u16 num_reported, bool printconfig)
14088 {
14089 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14090 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14091 u8 element_type = ele->element_type;
14092 u16 seid = le16_to_cpu(ele->seid);
14093
14094 if (printconfig)
14095 dev_info(&pf->pdev->dev,
14096 "type=%d seid=%d uplink=%d downlink=%d\n",
14097 element_type, seid, uplink_seid, downlink_seid);
14098
14099 switch (element_type) {
14100 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14101 pf->mac_seid = seid;
14102 break;
14103 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14104
14105 if (uplink_seid != pf->mac_seid)
14106 break;
14107 if (pf->lan_veb >= I40E_MAX_VEB) {
14108 int v;
14109
14110
14111 for (v = 0; v < I40E_MAX_VEB; v++) {
14112 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14113 pf->lan_veb = v;
14114 break;
14115 }
14116 }
14117 if (pf->lan_veb >= I40E_MAX_VEB) {
14118 v = i40e_veb_mem_alloc(pf);
14119 if (v < 0)
14120 break;
14121 pf->lan_veb = v;
14122 }
14123 }
14124 if (pf->lan_veb >= I40E_MAX_VEB)
14125 break;
14126
14127 pf->veb[pf->lan_veb]->seid = seid;
14128 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
14129 pf->veb[pf->lan_veb]->pf = pf;
14130 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
14131 break;
14132 case I40E_SWITCH_ELEMENT_TYPE_VSI:
14133 if (num_reported != 1)
14134 break;
14135
14136
14137
14138 pf->mac_seid = uplink_seid;
14139 pf->pf_seid = downlink_seid;
14140 pf->main_vsi_seid = seid;
14141 if (printconfig)
14142 dev_info(&pf->pdev->dev,
14143 "pf_seid=%d main_vsi_seid=%d\n",
14144 pf->pf_seid, pf->main_vsi_seid);
14145 break;
14146 case I40E_SWITCH_ELEMENT_TYPE_PF:
14147 case I40E_SWITCH_ELEMENT_TYPE_VF:
14148 case I40E_SWITCH_ELEMENT_TYPE_EMP:
14149 case I40E_SWITCH_ELEMENT_TYPE_BMC:
14150 case I40E_SWITCH_ELEMENT_TYPE_PE:
14151 case I40E_SWITCH_ELEMENT_TYPE_PA:
14152
14153 break;
14154 default:
14155 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
14156 element_type, seid);
14157 break;
14158 }
14159 }
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
14170 {
14171 struct i40e_aqc_get_switch_config_resp *sw_config;
14172 u16 next_seid = 0;
14173 int ret = 0;
14174 u8 *aq_buf;
14175 int i;
14176
14177 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
14178 if (!aq_buf)
14179 return -ENOMEM;
14180
14181 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
14182 do {
14183 u16 num_reported, num_total;
14184
14185 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
14186 I40E_AQ_LARGE_BUF,
14187 &next_seid, NULL);
14188 if (ret) {
14189 dev_info(&pf->pdev->dev,
14190 "get switch config failed err %s aq_err %s\n",
14191 i40e_stat_str(&pf->hw, ret),
14192 i40e_aq_str(&pf->hw,
14193 pf->hw.aq.asq_last_status));
14194 kfree(aq_buf);
14195 return -ENOENT;
14196 }
14197
14198 num_reported = le16_to_cpu(sw_config->header.num_reported);
14199 num_total = le16_to_cpu(sw_config->header.num_total);
14200
14201 if (printconfig)
14202 dev_info(&pf->pdev->dev,
14203 "header: %d reported %d total\n",
14204 num_reported, num_total);
14205
14206 for (i = 0; i < num_reported; i++) {
14207 struct i40e_aqc_switch_config_element_resp *ele =
14208 &sw_config->element[i];
14209
14210 i40e_setup_pf_switch_element(pf, ele, num_reported,
14211 printconfig);
14212 }
14213 } while (next_seid != 0);
14214
14215 kfree(aq_buf);
14216 return ret;
14217 }
14218
14219
14220
14221
14222
14223
14224
14225
14226 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
14227 {
14228 u16 flags = 0;
14229 int ret;
14230
14231
14232 ret = i40e_fetch_switch_configuration(pf, false);
14233 if (ret) {
14234 dev_info(&pf->pdev->dev,
14235 "couldn't fetch switch config, err %s aq_err %s\n",
14236 i40e_stat_str(&pf->hw, ret),
14237 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14238 return ret;
14239 }
14240 i40e_pf_reset_stats(pf);
14241
14242
14243
14244
14245
14246
14247
14248 if ((pf->hw.pf_id == 0) &&
14249 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
14250 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14251 pf->last_sw_conf_flags = flags;
14252 }
14253
14254 if (pf->hw.pf_id == 0) {
14255 u16 valid_flags;
14256
14257 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
14258 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
14259 NULL);
14260 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
14261 dev_info(&pf->pdev->dev,
14262 "couldn't set switch config bits, err %s aq_err %s\n",
14263 i40e_stat_str(&pf->hw, ret),
14264 i40e_aq_str(&pf->hw,
14265 pf->hw.aq.asq_last_status));
14266
14267 }
14268 pf->last_sw_conf_valid_flags = valid_flags;
14269 }
14270
14271
14272 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
14273 struct i40e_vsi *vsi = NULL;
14274 u16 uplink_seid;
14275
14276
14277
14278
14279 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
14280 uplink_seid = pf->veb[pf->lan_veb]->seid;
14281 else
14282 uplink_seid = pf->mac_seid;
14283 if (pf->lan_vsi == I40E_NO_VSI)
14284 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
14285 else if (reinit)
14286 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
14287 if (!vsi) {
14288 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
14289 i40e_cloud_filter_exit(pf);
14290 i40e_fdir_teardown(pf);
14291 return -EAGAIN;
14292 }
14293 } else {
14294
14295 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14296
14297 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14298 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14299 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
14300 }
14301 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
14302
14303 i40e_fdir_sb_setup(pf);
14304
14305
14306 ret = i40e_setup_pf_filter_control(pf);
14307 if (ret) {
14308 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
14309 ret);
14310
14311 }
14312
14313
14314
14315
14316 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
14317 i40e_pf_config_rss(pf);
14318
14319
14320 i40e_link_event(pf);
14321
14322
14323 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
14324 I40E_AQ_AN_COMPLETED) ? true : false);
14325
14326 i40e_ptp_init(pf);
14327
14328
14329 i40e_sync_udp_filters(pf);
14330
14331 return ret;
14332 }
14333
14334
14335
14336
14337
14338 static void i40e_determine_queue_usage(struct i40e_pf *pf)
14339 {
14340 int queues_left;
14341 int q_max;
14342
14343 pf->num_lan_qps = 0;
14344
14345
14346
14347
14348
14349 queues_left = pf->hw.func_caps.num_tx_qp;
14350
14351 if ((queues_left == 1) ||
14352 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
14353
14354 queues_left = 0;
14355 pf->alloc_rss_size = pf->num_lan_qps = 1;
14356
14357
14358 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14359 I40E_FLAG_IWARP_ENABLED |
14360 I40E_FLAG_FD_SB_ENABLED |
14361 I40E_FLAG_FD_ATR_ENABLED |
14362 I40E_FLAG_DCB_CAPABLE |
14363 I40E_FLAG_DCB_ENABLED |
14364 I40E_FLAG_SRIOV_ENABLED |
14365 I40E_FLAG_VMDQ_ENABLED);
14366 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14367 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
14368 I40E_FLAG_FD_SB_ENABLED |
14369 I40E_FLAG_FD_ATR_ENABLED |
14370 I40E_FLAG_DCB_CAPABLE))) {
14371
14372 pf->alloc_rss_size = pf->num_lan_qps = 1;
14373 queues_left -= pf->num_lan_qps;
14374
14375 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
14376 I40E_FLAG_IWARP_ENABLED |
14377 I40E_FLAG_FD_SB_ENABLED |
14378 I40E_FLAG_FD_ATR_ENABLED |
14379 I40E_FLAG_DCB_ENABLED |
14380 I40E_FLAG_VMDQ_ENABLED);
14381 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14382 } else {
14383
14384 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
14385 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
14386 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
14387 I40E_FLAG_DCB_ENABLED);
14388 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
14389 }
14390
14391
14392 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
14393 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
14394 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
14395 pf->num_lan_qps = q_max;
14396
14397 queues_left -= pf->num_lan_qps;
14398 }
14399
14400 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14401 if (queues_left > 1) {
14402 queues_left -= 1;
14403 } else {
14404 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
14405 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
14406 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
14407 }
14408 }
14409
14410 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14411 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
14412 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
14413 (queues_left / pf->num_vf_qps));
14414 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
14415 }
14416
14417 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
14418 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
14419 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
14420 (queues_left / pf->num_vmdq_qps));
14421 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
14422 }
14423
14424 pf->queues_left = queues_left;
14425 dev_dbg(&pf->pdev->dev,
14426 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
14427 pf->hw.func_caps.num_tx_qp,
14428 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
14429 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
14430 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
14431 queues_left);
14432 }
14433
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
14446 {
14447 struct i40e_filter_control_settings *settings = &pf->filter_settings;
14448
14449 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
14450
14451
14452 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
14453 settings->enable_fdir = true;
14454
14455
14456 settings->enable_ethtype = true;
14457 settings->enable_macvlan = true;
14458
14459 if (i40e_set_filter_control(&pf->hw, settings))
14460 return -ENOENT;
14461
14462 return 0;
14463 }
14464
14465 #define INFO_STRING_LEN 255
14466 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
14467 static void i40e_print_features(struct i40e_pf *pf)
14468 {
14469 struct i40e_hw *hw = &pf->hw;
14470 char *buf;
14471 int i;
14472
14473 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
14474 if (!buf)
14475 return;
14476
14477 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
14478 #ifdef CONFIG_PCI_IOV
14479 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
14480 #endif
14481 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
14482 pf->hw.func_caps.num_vsis,
14483 pf->vsi[pf->lan_vsi]->num_queue_pairs);
14484 if (pf->flags & I40E_FLAG_RSS_ENABLED)
14485 i += snprintf(&buf[i], REMAIN(i), " RSS");
14486 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
14487 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
14488 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
14489 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
14490 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
14491 }
14492 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
14493 i += snprintf(&buf[i], REMAIN(i), " DCB");
14494 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
14495 i += snprintf(&buf[i], REMAIN(i), " Geneve");
14496 if (pf->flags & I40E_FLAG_PTP)
14497 i += snprintf(&buf[i], REMAIN(i), " PTP");
14498 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
14499 i += snprintf(&buf[i], REMAIN(i), " VEB");
14500 else
14501 i += snprintf(&buf[i], REMAIN(i), " VEPA");
14502
14503 dev_info(&pf->pdev->dev, "%s\n", buf);
14504 kfree(buf);
14505 WARN_ON(i > INFO_STRING_LEN);
14506 }
14507
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517
14518 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
14519 {
14520 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
14521 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
14522 }
14523
14524
14525
14526
14527
14528
14529 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
14530 {
14531 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
14532 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
14533 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
14534 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
14535 *flags |= I40E_FLAG_RS_FEC;
14536 *flags &= ~I40E_FLAG_BASE_R_FEC;
14537 }
14538 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
14539 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
14540 *flags |= I40E_FLAG_BASE_R_FEC;
14541 *flags &= ~I40E_FLAG_RS_FEC;
14542 }
14543 if (fec_cfg == 0)
14544 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
14545 }
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
14557 {
14558 u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
14559 bool is_recovery_mode = false;
14560
14561 if (pf->hw.mac.type == I40E_MAC_XL710)
14562 is_recovery_mode =
14563 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14564 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
14565 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
14566 val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
14567 if (pf->hw.mac.type == I40E_MAC_X722)
14568 is_recovery_mode =
14569 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
14570 val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
14571 if (is_recovery_mode) {
14572 dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
14573 dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
14574 set_bit(__I40E_RECOVERY_MODE, pf->state);
14575
14576 return true;
14577 }
14578 if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
14579 dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
14580
14581 return false;
14582 }
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600
14601
14602
14603
14604
14605 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
14606 {
14607 const unsigned short MAX_CNT = 1000;
14608 const unsigned short MSECS = 10;
14609 struct i40e_hw *hw = &pf->hw;
14610 i40e_status ret;
14611 int cnt;
14612
14613 for (cnt = 0; cnt < MAX_CNT; ++cnt) {
14614 ret = i40e_pf_reset(hw);
14615 if (!ret)
14616 break;
14617 msleep(MSECS);
14618 }
14619
14620 if (cnt == MAX_CNT) {
14621 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
14622 return ret;
14623 }
14624
14625 pf->pfr_count++;
14626 return ret;
14627 }
14628
14629
14630
14631
14632
14633
14634
14635
14636
14637
14638
14639 static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
14640 {
14641 struct i40e_vsi *vsi;
14642 int err;
14643 int v_idx;
14644
14645 pci_save_state(pf->pdev);
14646
14647
14648 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14649 pf->service_timer_period = HZ;
14650
14651 INIT_WORK(&pf->service_task, i40e_service_task);
14652 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14653
14654 err = i40e_init_interrupt_scheme(pf);
14655 if (err)
14656 goto err_switch_setup;
14657
14658
14659
14660
14661
14662
14663 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14664 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14665 else
14666 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14667
14668
14669 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14670 GFP_KERNEL);
14671 if (!pf->vsi) {
14672 err = -ENOMEM;
14673 goto err_switch_setup;
14674 }
14675
14676
14677
14678
14679 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
14680 if (v_idx < 0)
14681 goto err_switch_setup;
14682 pf->lan_vsi = v_idx;
14683 vsi = pf->vsi[v_idx];
14684 if (!vsi)
14685 goto err_switch_setup;
14686 vsi->alloc_queue_pairs = 1;
14687 err = i40e_config_netdev(vsi);
14688 if (err)
14689 goto err_switch_setup;
14690 err = register_netdev(vsi->netdev);
14691 if (err)
14692 goto err_switch_setup;
14693 vsi->netdev_registered = true;
14694 i40e_dbg_pf_init(pf);
14695
14696 err = i40e_setup_misc_vector_for_recovery_mode(pf);
14697 if (err)
14698 goto err_switch_setup;
14699
14700
14701 i40e_send_version(pf);
14702
14703
14704 mod_timer(&pf->service_timer,
14705 round_jiffies(jiffies + pf->service_timer_period));
14706
14707 return 0;
14708
14709 err_switch_setup:
14710 i40e_reset_interrupt_capability(pf);
14711 del_timer_sync(&pf->service_timer);
14712 i40e_shutdown_adminq(hw);
14713 iounmap(hw->hw_addr);
14714 pci_disable_pcie_error_reporting(pf->pdev);
14715 pci_release_mem_regions(pf->pdev);
14716 pci_disable_device(pf->pdev);
14717 kfree(pf);
14718
14719 return err;
14720 }
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
14734 {
14735 struct i40e_aq_get_phy_abilities_resp abilities;
14736 struct i40e_pf *pf;
14737 struct i40e_hw *hw;
14738 static u16 pfs_found;
14739 u16 wol_nvm_bits;
14740 u16 link_status;
14741 int err;
14742 u32 val;
14743 u32 i;
14744 u8 set_fc_aq_fail;
14745
14746 err = pci_enable_device_mem(pdev);
14747 if (err)
14748 return err;
14749
14750
14751 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
14752 if (err) {
14753 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
14754 if (err) {
14755 dev_err(&pdev->dev,
14756 "DMA configuration failed: 0x%x\n", err);
14757 goto err_dma;
14758 }
14759 }
14760
14761
14762 err = pci_request_mem_regions(pdev, i40e_driver_name);
14763 if (err) {
14764 dev_info(&pdev->dev,
14765 "pci_request_selected_regions failed %d\n", err);
14766 goto err_pci_reg;
14767 }
14768
14769 pci_enable_pcie_error_reporting(pdev);
14770 pci_set_master(pdev);
14771
14772
14773
14774
14775
14776
14777 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
14778 if (!pf) {
14779 err = -ENOMEM;
14780 goto err_pf_alloc;
14781 }
14782 pf->next_vsi = 0;
14783 pf->pdev = pdev;
14784 set_bit(__I40E_DOWN, pf->state);
14785
14786 hw = &pf->hw;
14787 hw->back = pf;
14788
14789 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
14790 I40E_MAX_CSR_SPACE);
14791
14792
14793
14794
14795
14796 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
14797 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
14798 pf->ioremap_len);
14799 err = -ENOMEM;
14800 goto err_ioremap;
14801 }
14802 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
14803 if (!hw->hw_addr) {
14804 err = -EIO;
14805 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
14806 (unsigned int)pci_resource_start(pdev, 0),
14807 pf->ioremap_len, err);
14808 goto err_ioremap;
14809 }
14810 hw->vendor_id = pdev->vendor;
14811 hw->device_id = pdev->device;
14812 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
14813 hw->subsystem_vendor_id = pdev->subsystem_vendor;
14814 hw->subsystem_device_id = pdev->subsystem_device;
14815 hw->bus.device = PCI_SLOT(pdev->devfn);
14816 hw->bus.func = PCI_FUNC(pdev->devfn);
14817 hw->bus.bus_id = pdev->bus->number;
14818 pf->instance = pfs_found;
14819
14820
14821
14822
14823 hw->switch_tag = 0xffff;
14824 hw->first_tag = ETH_P_8021AD;
14825 hw->second_tag = ETH_P_8021Q;
14826
14827 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
14828 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
14829 INIT_LIST_HEAD(&pf->ddp_old_prof);
14830
14831
14832
14833
14834 mutex_init(&hw->aq.asq_mutex);
14835 mutex_init(&hw->aq.arq_mutex);
14836
14837 pf->msg_enable = netif_msg_init(debug,
14838 NETIF_MSG_DRV |
14839 NETIF_MSG_PROBE |
14840 NETIF_MSG_LINK);
14841 if (debug < -1)
14842 pf->hw.debug_mask = debug;
14843
14844
14845 if (hw->revision_id == 0 &&
14846 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
14847 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14848 i40e_flush(hw);
14849 msleep(200);
14850 pf->corer_count++;
14851
14852 i40e_clear_pxe_mode(hw);
14853 }
14854
14855
14856 i40e_clear_hw(hw);
14857
14858 err = i40e_set_mac_type(hw);
14859 if (err) {
14860 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14861 err);
14862 goto err_pf_reset;
14863 }
14864
14865 err = i40e_pf_loop_reset(pf);
14866 if (err) {
14867 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14868 goto err_pf_reset;
14869 }
14870
14871 i40e_check_recovery_mode(pf);
14872
14873 hw->aq.num_arq_entries = I40E_AQ_LEN;
14874 hw->aq.num_asq_entries = I40E_AQ_LEN;
14875 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14876 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14877 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14878
14879 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14880 "%s-%s:misc",
14881 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14882
14883 err = i40e_init_shared_code(hw);
14884 if (err) {
14885 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14886 err);
14887 goto err_pf_reset;
14888 }
14889
14890
14891 pf->hw.fc.requested_mode = I40E_FC_NONE;
14892
14893 err = i40e_init_adminq(hw);
14894 if (err) {
14895 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14896 dev_info(&pdev->dev,
14897 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
14898 hw->aq.api_maj_ver,
14899 hw->aq.api_min_ver,
14900 I40E_FW_API_VERSION_MAJOR,
14901 I40E_FW_MINOR_VERSION(hw));
14902 else
14903 dev_info(&pdev->dev,
14904 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14905
14906 goto err_pf_reset;
14907 }
14908 i40e_get_oem_version(hw);
14909
14910
14911 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
14912 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14913 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14914 i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
14915 hw->subsystem_vendor_id, hw->subsystem_device_id);
14916
14917 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14918 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14919 dev_info(&pdev->dev,
14920 "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
14921 hw->aq.api_maj_ver,
14922 hw->aq.api_min_ver,
14923 I40E_FW_API_VERSION_MAJOR,
14924 I40E_FW_MINOR_VERSION(hw));
14925 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14926 dev_info(&pdev->dev,
14927 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
14928 hw->aq.api_maj_ver,
14929 hw->aq.api_min_ver,
14930 I40E_FW_API_VERSION_MAJOR,
14931 I40E_FW_MINOR_VERSION(hw));
14932
14933 i40e_verify_eeprom(pf);
14934
14935
14936 if (hw->revision_id < 1)
14937 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14938
14939 i40e_clear_pxe_mode(hw);
14940
14941 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14942 if (err)
14943 goto err_adminq_setup;
14944
14945 err = i40e_sw_init(pf);
14946 if (err) {
14947 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14948 goto err_sw_init;
14949 }
14950
14951 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
14952 return i40e_init_recovery_mode(pf, hw);
14953
14954 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14955 hw->func_caps.num_rx_qp, 0, 0);
14956 if (err) {
14957 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14958 goto err_init_lan_hmc;
14959 }
14960
14961 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14962 if (err) {
14963 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14964 err = -ENOENT;
14965 goto err_configure_lan_hmc;
14966 }
14967
14968
14969
14970
14971
14972 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14973 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14974 i40e_aq_stop_lldp(hw, true, false, NULL);
14975 }
14976
14977
14978 i40e_get_platform_mac_addr(pdev, pf);
14979
14980 if (!is_valid_ether_addr(hw->mac.addr)) {
14981 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14982 err = -EIO;
14983 goto err_mac_addr;
14984 }
14985 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14986 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14987 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14988 if (is_valid_ether_addr(hw->mac.port_addr))
14989 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14990
14991 pci_set_drvdata(pdev, pf);
14992 pci_save_state(pdev);
14993
14994 dev_info(&pdev->dev,
14995 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
14996 "FW LLDP is disabled\n" :
14997 "FW LLDP is enabled\n");
14998
14999
15000 i40e_aq_set_dcb_parameters(hw, true, NULL);
15001
15002 #ifdef CONFIG_I40E_DCB
15003 err = i40e_init_pf_dcb(pf);
15004 if (err) {
15005 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15006 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15007
15008 }
15009 #endif
15010
15011
15012 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15013 pf->service_timer_period = HZ;
15014
15015 INIT_WORK(&pf->service_task, i40e_service_task);
15016 clear_bit(__I40E_SERVICE_SCHED, pf->state);
15017
15018
15019 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
15020 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15021 pf->wol_en = false;
15022 else
15023 pf->wol_en = true;
15024 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
15025
15026
15027 i40e_determine_queue_usage(pf);
15028 err = i40e_init_interrupt_scheme(pf);
15029 if (err)
15030 goto err_switch_setup;
15031
15032
15033
15034
15035
15036
15037 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15038 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15039 else
15040 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15041
15042
15043 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
15044 GFP_KERNEL);
15045 if (!pf->vsi) {
15046 err = -ENOMEM;
15047 goto err_switch_setup;
15048 }
15049
15050 #ifdef CONFIG_PCI_IOV
15051
15052 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15053 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15054 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15055 if (pci_num_vf(pdev))
15056 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
15057 }
15058 #endif
15059 err = i40e_setup_pf_switch(pf, false);
15060 if (err) {
15061 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
15062 goto err_vsis;
15063 }
15064 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
15065
15066
15067 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
15068 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
15069 dev_dbg(&pf->pdev->dev,
15070 "Set fc with err %s aq_err %s on get_phy_cap\n",
15071 i40e_stat_str(hw, err),
15072 i40e_aq_str(hw, hw->aq.asq_last_status));
15073 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
15074 dev_dbg(&pf->pdev->dev,
15075 "Set fc with err %s aq_err %s on set_phy_config\n",
15076 i40e_stat_str(hw, err),
15077 i40e_aq_str(hw, hw->aq.asq_last_status));
15078 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
15079 dev_dbg(&pf->pdev->dev,
15080 "Set fc with err %s aq_err %s on get_link_info\n",
15081 i40e_stat_str(hw, err),
15082 i40e_aq_str(hw, hw->aq.asq_last_status));
15083
15084
15085 for (i = 0; i < pf->num_alloc_vsi; i++) {
15086 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
15087 i40e_vsi_open(pf->vsi[i]);
15088 break;
15089 }
15090 }
15091
15092
15093
15094
15095 err = i40e_aq_set_phy_int_mask(&pf->hw,
15096 ~(I40E_AQ_EVENT_LINK_UPDOWN |
15097 I40E_AQ_EVENT_MEDIA_NA |
15098 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
15099 if (err)
15100 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
15101 i40e_stat_str(&pf->hw, err),
15102 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15103
15104
15105
15106
15107
15108 val = rd32(hw, I40E_REG_MSS);
15109 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
15110 val &= ~I40E_REG_MSS_MIN_MASK;
15111 val |= I40E_64BYTE_MSS;
15112 wr32(hw, I40E_REG_MSS, val);
15113 }
15114
15115 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
15116 msleep(75);
15117 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
15118 if (err)
15119 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
15120 i40e_stat_str(&pf->hw, err),
15121 i40e_aq_str(&pf->hw,
15122 pf->hw.aq.asq_last_status));
15123 }
15124
15125
15126
15127
15128 clear_bit(__I40E_DOWN, pf->state);
15129
15130
15131
15132
15133
15134
15135 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
15136 err = i40e_setup_misc_vector(pf);
15137 if (err) {
15138 dev_info(&pdev->dev,
15139 "setup of misc vector failed: %d\n", err);
15140 goto err_vsis;
15141 }
15142 }
15143
15144 #ifdef CONFIG_PCI_IOV
15145
15146 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15147 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
15148 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
15149
15150 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
15151 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
15152 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
15153 i40e_flush(hw);
15154
15155 if (pci_num_vf(pdev)) {
15156 dev_info(&pdev->dev,
15157 "Active VFs found, allocating resources.\n");
15158 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
15159 if (err)
15160 dev_info(&pdev->dev,
15161 "Error %d allocating resources for existing VFs\n",
15162 err);
15163 }
15164 }
15165 #endif
15166
15167 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15168 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
15169 pf->num_iwarp_msix,
15170 I40E_IWARP_IRQ_PILE_ID);
15171 if (pf->iwarp_base_vector < 0) {
15172 dev_info(&pdev->dev,
15173 "failed to get tracking for %d vectors for IWARP err=%d\n",
15174 pf->num_iwarp_msix, pf->iwarp_base_vector);
15175 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
15176 }
15177 }
15178
15179 i40e_dbg_pf_init(pf);
15180
15181
15182 i40e_send_version(pf);
15183
15184
15185 mod_timer(&pf->service_timer,
15186 round_jiffies(jiffies + pf->service_timer_period));
15187
15188
15189 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15190 err = i40e_lan_add_device(pf);
15191 if (err)
15192 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
15193 err);
15194 }
15195
15196 #define PCI_SPEED_SIZE 8
15197 #define PCI_WIDTH_SIZE 8
15198
15199
15200
15201
15202 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
15203 char speed[PCI_SPEED_SIZE] = "Unknown";
15204 char width[PCI_WIDTH_SIZE] = "Unknown";
15205
15206
15207
15208
15209 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
15210 &link_status);
15211
15212 i40e_set_pci_config_data(hw, link_status);
15213
15214 switch (hw->bus.speed) {
15215 case i40e_bus_speed_8000:
15216 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
15217 case i40e_bus_speed_5000:
15218 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
15219 case i40e_bus_speed_2500:
15220 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
15221 default:
15222 break;
15223 }
15224 switch (hw->bus.width) {
15225 case i40e_bus_width_pcie_x8:
15226 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
15227 case i40e_bus_width_pcie_x4:
15228 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
15229 case i40e_bus_width_pcie_x2:
15230 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
15231 case i40e_bus_width_pcie_x1:
15232 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
15233 default:
15234 break;
15235 }
15236
15237 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
15238 speed, width);
15239
15240 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
15241 hw->bus.speed < i40e_bus_speed_8000) {
15242 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
15243 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
15244 }
15245 }
15246
15247
15248 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
15249 if (err)
15250 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
15251 i40e_stat_str(&pf->hw, err),
15252 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15253 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
15254
15255
15256 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
15257
15258
15259 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
15260 if (err)
15261 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
15262 i40e_stat_str(&pf->hw, err),
15263 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15264
15265
15266
15267
15268
15269
15270
15271 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
15272 pf->main_vsi_seid);
15273
15274 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
15275 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
15276 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
15277 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
15278 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
15279
15280 i40e_print_features(pf);
15281
15282 return 0;
15283
15284
15285 err_vsis:
15286 set_bit(__I40E_DOWN, pf->state);
15287 i40e_clear_interrupt_scheme(pf);
15288 kfree(pf->vsi);
15289 err_switch_setup:
15290 i40e_reset_interrupt_capability(pf);
15291 del_timer_sync(&pf->service_timer);
15292 err_mac_addr:
15293 err_configure_lan_hmc:
15294 (void)i40e_shutdown_lan_hmc(hw);
15295 err_init_lan_hmc:
15296 kfree(pf->qp_pile);
15297 err_sw_init:
15298 err_adminq_setup:
15299 err_pf_reset:
15300 iounmap(hw->hw_addr);
15301 err_ioremap:
15302 kfree(pf);
15303 err_pf_alloc:
15304 pci_disable_pcie_error_reporting(pdev);
15305 pci_release_mem_regions(pdev);
15306 err_pci_reg:
15307 err_dma:
15308 pci_disable_device(pdev);
15309 return err;
15310 }
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321 static void i40e_remove(struct pci_dev *pdev)
15322 {
15323 struct i40e_pf *pf = pci_get_drvdata(pdev);
15324 struct i40e_hw *hw = &pf->hw;
15325 i40e_status ret_code;
15326 int i;
15327
15328 i40e_dbg_pf_exit(pf);
15329
15330 i40e_ptp_stop(pf);
15331
15332
15333 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
15334 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
15335
15336
15337 set_bit(__I40E_SUSPENDED, pf->state);
15338 set_bit(__I40E_DOWN, pf->state);
15339 if (pf->service_timer.function)
15340 del_timer_sync(&pf->service_timer);
15341 if (pf->service_task.func)
15342 cancel_work_sync(&pf->service_task);
15343
15344 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
15345 struct i40e_vsi *vsi = pf->vsi[0];
15346
15347
15348
15349
15350
15351 unregister_netdev(vsi->netdev);
15352 free_netdev(vsi->netdev);
15353
15354 goto unmap;
15355 }
15356
15357
15358
15359
15360 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15361
15362 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
15363 i40e_free_vfs(pf);
15364 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
15365 }
15366
15367 i40e_fdir_teardown(pf);
15368
15369
15370
15371
15372 for (i = 0; i < I40E_MAX_VEB; i++) {
15373 if (!pf->veb[i])
15374 continue;
15375
15376 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
15377 pf->veb[i]->uplink_seid == 0)
15378 i40e_switch_branch_release(pf->veb[i]);
15379 }
15380
15381
15382
15383
15384 if (pf->vsi[pf->lan_vsi])
15385 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
15386
15387 i40e_cloud_filter_exit(pf);
15388
15389
15390 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
15391 ret_code = i40e_lan_del_device(pf);
15392 if (ret_code)
15393 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
15394 ret_code);
15395 }
15396
15397
15398 if (hw->hmc.hmc_obj) {
15399 ret_code = i40e_shutdown_lan_hmc(hw);
15400 if (ret_code)
15401 dev_warn(&pdev->dev,
15402 "Failed to destroy the HMC resources: %d\n",
15403 ret_code);
15404 }
15405
15406 unmap:
15407
15408 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15409 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15410 free_irq(pf->pdev->irq, pf);
15411
15412
15413 i40e_shutdown_adminq(hw);
15414
15415
15416 mutex_destroy(&hw->aq.arq_mutex);
15417 mutex_destroy(&hw->aq.asq_mutex);
15418
15419
15420 rtnl_lock();
15421 i40e_clear_interrupt_scheme(pf);
15422 for (i = 0; i < pf->num_alloc_vsi; i++) {
15423 if (pf->vsi[i]) {
15424 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
15425 i40e_vsi_clear_rings(pf->vsi[i]);
15426 i40e_vsi_clear(pf->vsi[i]);
15427 pf->vsi[i] = NULL;
15428 }
15429 }
15430 rtnl_unlock();
15431
15432 for (i = 0; i < I40E_MAX_VEB; i++) {
15433 kfree(pf->veb[i]);
15434 pf->veb[i] = NULL;
15435 }
15436
15437 kfree(pf->qp_pile);
15438 kfree(pf->vsi);
15439
15440 iounmap(hw->hw_addr);
15441 kfree(pf);
15442 pci_release_mem_regions(pdev);
15443
15444 pci_disable_pcie_error_reporting(pdev);
15445 pci_disable_device(pdev);
15446 }
15447
15448
15449
15450
15451
15452
15453
15454
15455
15456
15457 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
15458 enum pci_channel_state error)
15459 {
15460 struct i40e_pf *pf = pci_get_drvdata(pdev);
15461
15462 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
15463
15464 if (!pf) {
15465 dev_info(&pdev->dev,
15466 "Cannot recover - error happened during device probe\n");
15467 return PCI_ERS_RESULT_DISCONNECT;
15468 }
15469
15470
15471 if (!test_bit(__I40E_SUSPENDED, pf->state))
15472 i40e_prep_for_reset(pf, false);
15473
15474
15475 return PCI_ERS_RESULT_NEED_RESET;
15476 }
15477
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
15488 {
15489 struct i40e_pf *pf = pci_get_drvdata(pdev);
15490 pci_ers_result_t result;
15491 u32 reg;
15492
15493 dev_dbg(&pdev->dev, "%s\n", __func__);
15494 if (pci_enable_device_mem(pdev)) {
15495 dev_info(&pdev->dev,
15496 "Cannot re-enable PCI device after reset.\n");
15497 result = PCI_ERS_RESULT_DISCONNECT;
15498 } else {
15499 pci_set_master(pdev);
15500 pci_restore_state(pdev);
15501 pci_save_state(pdev);
15502 pci_wake_from_d3(pdev, false);
15503
15504 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
15505 if (reg == 0)
15506 result = PCI_ERS_RESULT_RECOVERED;
15507 else
15508 result = PCI_ERS_RESULT_DISCONNECT;
15509 }
15510
15511 return result;
15512 }
15513
15514
15515
15516
15517
15518 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
15519 {
15520 struct i40e_pf *pf = pci_get_drvdata(pdev);
15521
15522 i40e_prep_for_reset(pf, false);
15523 }
15524
15525
15526
15527
15528
15529 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
15530 {
15531 struct i40e_pf *pf = pci_get_drvdata(pdev);
15532
15533 i40e_reset_and_rebuild(pf, false, false);
15534 }
15535
15536
15537
15538
15539
15540
15541
15542
15543 static void i40e_pci_error_resume(struct pci_dev *pdev)
15544 {
15545 struct i40e_pf *pf = pci_get_drvdata(pdev);
15546
15547 dev_dbg(&pdev->dev, "%s\n", __func__);
15548 if (test_bit(__I40E_SUSPENDED, pf->state))
15549 return;
15550
15551 i40e_handle_reset_warning(pf, false);
15552 }
15553
15554
15555
15556
15557
15558
15559 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
15560 {
15561 struct i40e_hw *hw = &pf->hw;
15562 i40e_status ret;
15563 u8 mac_addr[6];
15564 u16 flags = 0;
15565
15566
15567 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
15568 ether_addr_copy(mac_addr,
15569 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
15570 } else {
15571 dev_err(&pf->pdev->dev,
15572 "Failed to retrieve MAC address; using default\n");
15573 ether_addr_copy(mac_addr, hw->mac.addr);
15574 }
15575
15576
15577
15578
15579
15580 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
15581
15582 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
15583 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
15584
15585 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15586 if (ret) {
15587 dev_err(&pf->pdev->dev,
15588 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
15589 return;
15590 }
15591
15592 flags = I40E_AQC_MC_MAG_EN
15593 | I40E_AQC_WOL_PRESERVE_ON_PFR
15594 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
15595 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
15596 if (ret)
15597 dev_err(&pf->pdev->dev,
15598 "Failed to enable Multicast Magic Packet wake up\n");
15599 }
15600
15601
15602
15603
15604
15605 static void i40e_shutdown(struct pci_dev *pdev)
15606 {
15607 struct i40e_pf *pf = pci_get_drvdata(pdev);
15608 struct i40e_hw *hw = &pf->hw;
15609
15610 set_bit(__I40E_SUSPENDED, pf->state);
15611 set_bit(__I40E_DOWN, pf->state);
15612
15613 del_timer_sync(&pf->service_timer);
15614 cancel_work_sync(&pf->service_task);
15615 i40e_cloud_filter_exit(pf);
15616 i40e_fdir_teardown(pf);
15617
15618
15619
15620
15621 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15622
15623 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15624 i40e_enable_mc_magic_wake(pf);
15625
15626 i40e_prep_for_reset(pf, false);
15627
15628 wr32(hw, I40E_PFPM_APM,
15629 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15630 wr32(hw, I40E_PFPM_WUFC,
15631 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15632
15633
15634 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
15635 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
15636 free_irq(pf->pdev->irq, pf);
15637
15638
15639
15640
15641
15642 rtnl_lock();
15643 i40e_clear_interrupt_scheme(pf);
15644 rtnl_unlock();
15645
15646 if (system_state == SYSTEM_POWER_OFF) {
15647 pci_wake_from_d3(pdev, pf->wol_en);
15648 pci_set_power_state(pdev, PCI_D3hot);
15649 }
15650 }
15651
15652
15653
15654
15655
15656 static int __maybe_unused i40e_suspend(struct device *dev)
15657 {
15658 struct i40e_pf *pf = dev_get_drvdata(dev);
15659 struct i40e_hw *hw = &pf->hw;
15660
15661
15662 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
15663 return 0;
15664
15665 set_bit(__I40E_DOWN, pf->state);
15666
15667
15668 del_timer_sync(&pf->service_timer);
15669 cancel_work_sync(&pf->service_task);
15670
15671
15672
15673
15674 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
15675
15676 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
15677 i40e_enable_mc_magic_wake(pf);
15678
15679
15680
15681
15682
15683 rtnl_lock();
15684
15685 i40e_prep_for_reset(pf, true);
15686
15687 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
15688 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
15689
15690
15691
15692
15693
15694
15695 i40e_clear_interrupt_scheme(pf);
15696
15697 rtnl_unlock();
15698
15699 return 0;
15700 }
15701
15702
15703
15704
15705
15706 static int __maybe_unused i40e_resume(struct device *dev)
15707 {
15708 struct i40e_pf *pf = dev_get_drvdata(dev);
15709 int err;
15710
15711
15712 if (!test_bit(__I40E_SUSPENDED, pf->state))
15713 return 0;
15714
15715
15716
15717
15718 rtnl_lock();
15719
15720
15721
15722
15723 err = i40e_restore_interrupt_scheme(pf);
15724 if (err) {
15725 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
15726 err);
15727 }
15728
15729 clear_bit(__I40E_DOWN, pf->state);
15730 i40e_reset_and_rebuild(pf, false, true);
15731
15732 rtnl_unlock();
15733
15734
15735 clear_bit(__I40E_SUSPENDED, pf->state);
15736
15737
15738 mod_timer(&pf->service_timer,
15739 round_jiffies(jiffies + pf->service_timer_period));
15740
15741 return 0;
15742 }
15743
15744 static const struct pci_error_handlers i40e_err_handler = {
15745 .error_detected = i40e_pci_error_detected,
15746 .slot_reset = i40e_pci_error_slot_reset,
15747 .reset_prepare = i40e_pci_error_reset_prepare,
15748 .reset_done = i40e_pci_error_reset_done,
15749 .resume = i40e_pci_error_resume,
15750 };
15751
15752 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
15753
15754 static struct pci_driver i40e_driver = {
15755 .name = i40e_driver_name,
15756 .id_table = i40e_pci_tbl,
15757 .probe = i40e_probe,
15758 .remove = i40e_remove,
15759 .driver = {
15760 .pm = &i40e_pm_ops,
15761 },
15762 .shutdown = i40e_shutdown,
15763 .err_handler = &i40e_err_handler,
15764 .sriov_configure = i40e_pci_sriov_configure,
15765 };
15766
15767
15768
15769
15770
15771
15772
15773 static int __init i40e_init_module(void)
15774 {
15775 pr_info("%s: %s - version %s\n", i40e_driver_name,
15776 i40e_driver_string, i40e_driver_version_str);
15777 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
15778
15779
15780
15781
15782
15783
15784
15785
15786 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
15787 if (!i40e_wq) {
15788 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
15789 return -ENOMEM;
15790 }
15791
15792 i40e_dbg_init();
15793 return pci_register_driver(&i40e_driver);
15794 }
15795 module_init(i40e_init_module);
15796
15797
15798
15799
15800
15801
15802
15803 static void __exit i40e_exit_module(void)
15804 {
15805 pci_unregister_driver(&i40e_driver);
15806 destroy_workqueue(i40e_wq);
15807 i40e_dbg_exit();
15808 }
15809 module_exit(i40e_exit_module);