This source file includes following definitions.
- ice_set_mac_type
- ice_dev_onetime_setup
- ice_clear_pf_cfg
- ice_aq_manage_mac_read
- ice_aq_get_phy_caps
- ice_get_media_type
- ice_aq_get_link_info
- ice_init_flex_flags
- ice_init_flex_flds
- ice_init_fltr_mgmt_struct
- ice_cleanup_fltr_mgmt_struct
- ice_get_fw_log_cfg
- ice_cfg_fw_log
- ice_output_fw_log
- ice_get_itr_intrl_gran
- ice_get_nvm_version
- ice_init_hw
- ice_deinit_hw
- ice_check_reset
- ice_pf_reset
- ice_reset
- ice_copy_rxq_ctx_to_hw
- ice_write_rxq_ctx
- ice_debug_cq
- ice_aq_send_cmd
- ice_aq_get_fw_ver
- ice_aq_send_driver_ver
- ice_aq_q_shutdown
- ice_aq_req_res
- ice_aq_release_res
- ice_acquire_res
- ice_release_res
- ice_get_num_per_func
- ice_parse_caps
- ice_aq_discover_caps
- ice_discover_caps
- ice_set_safe_mode_caps
- ice_get_caps
- ice_aq_manage_mac_write
- ice_aq_clear_pxe_mode
- ice_clear_pxe_mode
- ice_get_link_speed_based_on_phy_type
- ice_update_phy_type
- ice_aq_set_phy_cfg
- ice_update_link_info
- ice_set_fc
- ice_copy_phy_caps_to_cfg
- ice_cfg_phy_fec
- ice_get_link_status
- ice_aq_set_link_restart_an
- ice_aq_set_event_mask
- ice_aq_set_mac_loopback
- ice_aq_set_port_id_led
- __ice_aq_get_set_rss_lut
- ice_aq_get_rss_lut
- ice_aq_set_rss_lut
- __ice_aq_get_set_rss_key
- ice_aq_get_rss_key
- ice_aq_set_rss_key
- ice_aq_add_lan_txq
- ice_aq_dis_lan_txq
- ice_write_byte
- ice_write_word
- ice_write_dword
- ice_write_qword
- ice_set_ctx
- ice_get_lan_q_ctx
- ice_ena_vsi_txq
- ice_dis_vsi_txq
- ice_cfg_vsi_qs
- ice_cfg_vsi_lan
- ice_replay_pre_init
- ice_replay_vsi
- ice_replay_post
- ice_stat_update40
- ice_stat_update32
- ice_sched_query_elem
1
2
3
4 #include "ice_common.h"
5 #include "ice_sched.h"
6 #include "ice_adminq_cmd.h"
7
8 #define ICE_PF_RESET_WAIT_COUNT 200
9
10 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29
30
31
32
33
34
35
36 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37 {
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43 }
44
45
46
47
48
49
50
51
52 void ice_dev_onetime_setup(struct ice_hw *hw)
53 {
54 #define MBX_PF_VT_PFALLOC 0x00231E80
55
56 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57 }
58
59
60
61
62
63
64
65
66 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67 {
68 struct ice_aq_desc desc;
69
70 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71
72 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73 }
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89 static enum ice_status
90 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_sq_cd *cd)
92 {
93 struct ice_aqc_manage_mac_read_resp *resp;
94 struct ice_aqc_manage_mac_read *cmd;
95 struct ice_aq_desc desc;
96 enum ice_status status;
97 u16 flags;
98 u8 i;
99
100 cmd = &desc.params.mac_read;
101
102 if (buf_size < sizeof(*resp))
103 return ICE_ERR_BUF_TOO_SHORT;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106
107 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 if (status)
109 return status;
110
111 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113
114 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 return ICE_ERR_CFG;
117 }
118
119
120 for (i = 0; i < cmd->num_addr; i++)
121 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 ether_addr_copy(hw->port_info->mac.lan_addr,
123 resp[i].mac_addr);
124 ether_addr_copy(hw->port_info->mac.perm_addr,
125 resp[i].mac_addr);
126 break;
127 }
128
129 return 0;
130 }
131
132
133
134
135
136
137
138
139
140
141
142 enum ice_status
143 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 struct ice_aqc_get_phy_caps_data *pcaps,
145 struct ice_sq_cd *cd)
146 {
147 struct ice_aqc_get_phy_caps *cmd;
148 u16 pcaps_size = sizeof(*pcaps);
149 struct ice_aq_desc desc;
150 enum ice_status status;
151
152 cmd = &desc.params.get_phy;
153
154 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 return ICE_ERR_PARAM;
156
157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158
159 if (qual_mods)
160 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161
162 cmd->param0 |= cpu_to_le16(report_mode);
163 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164
165 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 }
169
170 return status;
171 }
172
173
174
175
176
177 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178 {
179 struct ice_link_status *hw_link_info;
180
181 if (!pi)
182 return ICE_MEDIA_UNKNOWN;
183
184 hw_link_info = &pi->phy.link_info;
185 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186
187 return ICE_MEDIA_UNKNOWN;
188
189 if (hw_link_info->phy_type_low) {
190 switch (hw_link_info->phy_type_low) {
191 case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 return ICE_MEDIA_FIBER;
211 case ICE_PHY_TYPE_LOW_100BASE_TX:
212 case ICE_PHY_TYPE_LOW_1000BASE_T:
213 case ICE_PHY_TYPE_LOW_2500BASE_T:
214 case ICE_PHY_TYPE_LOW_5GBASE_T:
215 case ICE_PHY_TYPE_LOW_10GBASE_T:
216 case ICE_PHY_TYPE_LOW_25GBASE_T:
217 return ICE_MEDIA_BASET;
218 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 return ICE_MEDIA_DA;
229 case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 case ICE_PHY_TYPE_LOW_2500BASE_X:
232 case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 return ICE_MEDIA_BACKPLANE;
243 }
244 } else {
245 switch (hw_link_info->phy_type_high) {
246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 return ICE_MEDIA_BACKPLANE;
248 }
249 }
250 return ICE_MEDIA_UNKNOWN;
251 }
252
253
254
255
256
257
258
259
260
261
262 enum ice_status
263 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 struct ice_link_status *link, struct ice_sq_cd *cd)
265 {
266 struct ice_aqc_get_link_status_data link_data = { 0 };
267 struct ice_aqc_get_link_status *resp;
268 struct ice_link_status *li_old, *li;
269 enum ice_media_type *hw_media_type;
270 struct ice_fc_info *hw_fc_info;
271 bool tx_pause, rx_pause;
272 struct ice_aq_desc desc;
273 enum ice_status status;
274 struct ice_hw *hw;
275 u16 cmd_flags;
276
277 if (!pi)
278 return ICE_ERR_PARAM;
279 hw = pi->hw;
280 li_old = &pi->phy.link_info_old;
281 hw_media_type = &pi->phy.media_type;
282 li = &pi->phy.link_info;
283 hw_fc_info = &pi->fc;
284
285 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
286 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
287 resp = &desc.params.get_link_status;
288 resp->cmd_flags = cpu_to_le16(cmd_flags);
289 resp->lport_num = pi->lport;
290
291 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
292
293 if (status)
294 return status;
295
296
297 *li_old = *li;
298
299
300 li->link_speed = le16_to_cpu(link_data.link_speed);
301 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
302 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
303 *hw_media_type = ice_get_media_type(pi);
304 li->link_info = link_data.link_info;
305 li->an_info = link_data.an_info;
306 li->ext_info = link_data.ext_info;
307 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
308 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
309 li->topo_media_conflict = link_data.topo_media_conflict;
310 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
311 ICE_AQ_CFG_PACING_TYPE_M);
312
313
314 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
315 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
316 if (tx_pause && rx_pause)
317 hw_fc_info->current_mode = ICE_FC_FULL;
318 else if (tx_pause)
319 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
320 else if (rx_pause)
321 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
322 else
323 hw_fc_info->current_mode = ICE_FC_NONE;
324
325 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
326
327 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
328 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
329 (unsigned long long)li->phy_type_low);
330 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
331 (unsigned long long)li->phy_type_high);
332 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
333 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
334 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
335 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
336 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
337 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
338 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
339
340
341 if (link)
342 *link = *li;
343
344
345 pi->phy.get_link_info = false;
346
347 return 0;
348 }
349
350
351
352
353
354
355
356
357 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
358 {
359 u8 idx = 0;
360
361
362
363
364
365
366
367 switch (prof_id) {
368
369
370
371
372 case ICE_RXDID_FLEX_NIC:
373 case ICE_RXDID_FLEX_NIC_2:
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
375 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
376 ICE_FLG_FIN, idx++);
377
378
379
380 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
381 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
382 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
383 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
384 ICE_FLG_EVLAN_x9100, idx++);
385 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
386 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
387 ICE_FLG_TNL0, idx++);
388 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
389 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
390 break;
391
392 default:
393 ice_debug(hw, ICE_DBG_INIT,
394 "Flag programming for profile ID %d not supported\n",
395 prof_id);
396 }
397 }
398
399
400
401
402
403
404
405
406 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
407 {
408 enum ice_flex_rx_mdid mdid;
409
410 switch (prof_id) {
411 case ICE_RXDID_FLEX_NIC:
412 case ICE_RXDID_FLEX_NIC_2:
413 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
414 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
415 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
416
417 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
418 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
419
420 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
421
422 ice_init_flex_flags(hw, prof_id);
423 break;
424
425 default:
426 ice_debug(hw, ICE_DBG_INIT,
427 "Field init for profile ID %d not supported\n",
428 prof_id);
429 }
430 }
431
432
433
434
435
436 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
437 {
438 struct ice_switch_info *sw;
439
440 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
441 sizeof(*hw->switch_info), GFP_KERNEL);
442 sw = hw->switch_info;
443
444 if (!sw)
445 return ICE_ERR_NO_MEMORY;
446
447 INIT_LIST_HEAD(&sw->vsi_list_map_head);
448
449 return ice_init_def_sw_recp(hw);
450 }
451
452
453
454
455
456 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
457 {
458 struct ice_switch_info *sw = hw->switch_info;
459 struct ice_vsi_list_map_info *v_pos_map;
460 struct ice_vsi_list_map_info *v_tmp_map;
461 struct ice_sw_recipe *recps;
462 u8 i;
463
464 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
465 list_entry) {
466 list_del(&v_pos_map->list_entry);
467 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
468 }
469 recps = hw->switch_info->recp_list;
470 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
471 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
472
473 recps[i].root_rid = i;
474 mutex_destroy(&recps[i].filt_rule_lock);
475 list_for_each_entry_safe(lst_itr, tmp_entry,
476 &recps[i].filt_rules, list_entry) {
477 list_del(&lst_itr->list_entry);
478 devm_kfree(ice_hw_to_dev(hw), lst_itr);
479 }
480 }
481 ice_rm_all_sw_replay_rule_info(hw);
482 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
483 devm_kfree(ice_hw_to_dev(hw), sw);
484 }
485
486 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
487 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
488 #define ICE_FW_LOG_DESC_SIZE_MAX \
489 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
490
491
492
493
494
495 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
496 {
497 struct ice_aqc_fw_logging_data *config;
498 struct ice_aq_desc desc;
499 enum ice_status status;
500 u16 size;
501
502 size = ICE_FW_LOG_DESC_SIZE_MAX;
503 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
504 if (!config)
505 return ICE_ERR_NO_MEMORY;
506
507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
508
509 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
510 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
511
512 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
513 if (!status) {
514 u16 i;
515
516
517 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
518 u16 v, m, flgs;
519
520 v = le16_to_cpu(config->entry[i]);
521 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
522 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
523
524 if (m < ICE_AQC_FW_LOG_ID_MAX)
525 hw->fw_log.evnts[m].cur = flgs;
526 }
527 }
528
529 devm_kfree(ice_hw_to_dev(hw), config);
530
531 return status;
532 }
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
571 {
572 struct ice_aqc_fw_logging_data *data = NULL;
573 struct ice_aqc_fw_logging *cmd;
574 enum ice_status status = 0;
575 u16 i, chgs = 0, len = 0;
576 struct ice_aq_desc desc;
577 u8 actv_evnts = 0;
578 void *buf = NULL;
579
580 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
581 return 0;
582
583
584 if (!enable &&
585 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
586 return 0;
587
588
589 status = ice_get_fw_log_cfg(hw);
590 if (status)
591 return status;
592
593 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
594 cmd = &desc.params.fw_logging;
595
596
597 if (hw->fw_log.cq_en)
598 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
599
600 if (hw->fw_log.uart_en)
601 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
602
603 if (enable) {
604
605
606
607 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
608 u16 val;
609
610
611 actv_evnts |= hw->fw_log.evnts[i].cfg;
612
613 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
614 continue;
615
616 if (!data) {
617 data = devm_kzalloc(ice_hw_to_dev(hw),
618 ICE_FW_LOG_DESC_SIZE_MAX,
619 GFP_KERNEL);
620 if (!data)
621 return ICE_ERR_NO_MEMORY;
622 }
623
624 val = i << ICE_AQC_FW_LOG_ID_S;
625 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
626 data->entry[chgs++] = cpu_to_le16(val);
627 }
628
629
630
631
632
633 if (actv_evnts) {
634
635 if (!chgs)
636 goto out;
637
638 if (hw->fw_log.cq_en)
639 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
640
641 if (hw->fw_log.uart_en)
642 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
643
644 buf = data;
645 len = ICE_FW_LOG_DESC_SIZE(chgs);
646 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
647 }
648 }
649
650 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
651 if (!status) {
652
653
654
655
656
657
658 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
659
660 hw->fw_log.actv_evnts = actv_evnts;
661 for (i = 0; i < cnt; i++) {
662 u16 v, m;
663
664 if (!enable) {
665
666
667
668
669
670
671 hw->fw_log.evnts[i].cur = 0;
672 continue;
673 }
674
675 v = le16_to_cpu(data->entry[i]);
676 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
677 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
678 }
679 }
680
681 out:
682 if (data)
683 devm_kfree(ice_hw_to_dev(hw), data);
684
685 return status;
686 }
687
688
689
690
691
692
693
694
695
696 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
697 {
698 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
699 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
700 le16_to_cpu(desc->datalen));
701 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
702 }
703
704
705
706
707
708
709
710
711 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
712 {
713 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
714 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
715 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
716
717 switch (max_agg_bw) {
718 case ICE_MAX_AGG_BW_200G:
719 case ICE_MAX_AGG_BW_100G:
720 case ICE_MAX_AGG_BW_50G:
721 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
722 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
723 break;
724 case ICE_MAX_AGG_BW_25G:
725 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
726 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
727 break;
728 }
729 }
730
731
732
733
734
735
736
737
738
739
740 void
741 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
742 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
743 {
744 struct ice_nvm_info *nvm = &hw->nvm;
745
746 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
747 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
748 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
749 ICE_OEM_VER_BUILD_SHIFT);
750 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
751 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
752 }
753
754
755
756
757
758 enum ice_status ice_init_hw(struct ice_hw *hw)
759 {
760 struct ice_aqc_get_phy_caps_data *pcaps;
761 enum ice_status status;
762 u16 mac_buf_len;
763 void *mac_buf;
764
765
766 status = ice_set_mac_type(hw);
767 if (status)
768 return status;
769
770 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
771 PF_FUNC_RID_FUNC_NUM_M) >>
772 PF_FUNC_RID_FUNC_NUM_S;
773
774 status = ice_reset(hw, ICE_RESET_PFR);
775 if (status)
776 return status;
777
778 ice_get_itr_intrl_gran(hw);
779
780 status = ice_create_all_ctrlq(hw);
781 if (status)
782 goto err_unroll_cqinit;
783
784
785 status = ice_cfg_fw_log(hw, true);
786 if (status)
787 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
788
789 status = ice_clear_pf_cfg(hw);
790 if (status)
791 goto err_unroll_cqinit;
792
793 ice_clear_pxe_mode(hw);
794
795 status = ice_init_nvm(hw);
796 if (status)
797 goto err_unroll_cqinit;
798
799 status = ice_get_caps(hw);
800 if (status)
801 goto err_unroll_cqinit;
802
803 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
804 sizeof(*hw->port_info), GFP_KERNEL);
805 if (!hw->port_info) {
806 status = ICE_ERR_NO_MEMORY;
807 goto err_unroll_cqinit;
808 }
809
810
811 hw->port_info->hw = hw;
812
813
814 status = ice_get_initial_sw_cfg(hw);
815 if (status)
816 goto err_unroll_alloc;
817
818 hw->evb_veb = true;
819
820
821 status = ice_sched_query_res_alloc(hw);
822 if (status) {
823 ice_debug(hw, ICE_DBG_SCHED,
824 "Failed to get scheduler allocated resources\n");
825 goto err_unroll_alloc;
826 }
827
828
829 status = ice_sched_init_port(hw->port_info);
830 if (status)
831 goto err_unroll_sched;
832
833 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
834 if (!pcaps) {
835 status = ICE_ERR_NO_MEMORY;
836 goto err_unroll_sched;
837 }
838
839
840 status = ice_aq_get_phy_caps(hw->port_info, false,
841 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
842 devm_kfree(ice_hw_to_dev(hw), pcaps);
843 if (status)
844 goto err_unroll_sched;
845
846
847 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
848 if (status)
849 goto err_unroll_sched;
850
851
852 if (!hw->sw_entry_point_layer) {
853 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
854 status = ICE_ERR_CFG;
855 goto err_unroll_sched;
856 }
857 INIT_LIST_HEAD(&hw->agg_list);
858
859 status = ice_init_fltr_mgmt_struct(hw);
860 if (status)
861 goto err_unroll_sched;
862
863 ice_dev_onetime_setup(hw);
864
865
866
867 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
868 sizeof(struct ice_aqc_manage_mac_read_resp),
869 GFP_KERNEL);
870 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
871
872 if (!mac_buf) {
873 status = ICE_ERR_NO_MEMORY;
874 goto err_unroll_fltr_mgmt_struct;
875 }
876
877 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
878 devm_kfree(ice_hw_to_dev(hw), mac_buf);
879
880 if (status)
881 goto err_unroll_fltr_mgmt_struct;
882
883 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
884 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
885 status = ice_init_hw_tbls(hw);
886 if (status)
887 goto err_unroll_fltr_mgmt_struct;
888 return 0;
889
890 err_unroll_fltr_mgmt_struct:
891 ice_cleanup_fltr_mgmt_struct(hw);
892 err_unroll_sched:
893 ice_sched_cleanup_all(hw);
894 err_unroll_alloc:
895 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
896 err_unroll_cqinit:
897 ice_destroy_all_ctrlq(hw);
898 return status;
899 }
900
901
902
903
904
905
906
907
908
909 void ice_deinit_hw(struct ice_hw *hw)
910 {
911 ice_cleanup_fltr_mgmt_struct(hw);
912
913 ice_sched_cleanup_all(hw);
914 ice_sched_clear_agg(hw);
915 ice_free_seg(hw);
916 ice_free_hw_tbls(hw);
917
918 if (hw->port_info) {
919 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
920 hw->port_info = NULL;
921 }
922
923
924 ice_cfg_fw_log(hw, false);
925 ice_destroy_all_ctrlq(hw);
926
927
928 ice_clear_all_vsi_ctx(hw);
929 }
930
931
932
933
934
935 enum ice_status ice_check_reset(struct ice_hw *hw)
936 {
937 u32 cnt, reg = 0, grst_delay, uld_mask;
938
939
940
941
942
943 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
944 GLGEN_RSTCTL_GRSTDEL_S) + 10;
945
946 for (cnt = 0; cnt < grst_delay; cnt++) {
947 mdelay(100);
948 reg = rd32(hw, GLGEN_RSTAT);
949 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
950 break;
951 }
952
953 if (cnt == grst_delay) {
954 ice_debug(hw, ICE_DBG_INIT,
955 "Global reset polling failed to complete.\n");
956 return ICE_ERR_RESET_FAILED;
957 }
958
959 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
960 GLNVM_ULD_PCIER_DONE_1_M |\
961 GLNVM_ULD_CORER_DONE_M |\
962 GLNVM_ULD_GLOBR_DONE_M |\
963 GLNVM_ULD_POR_DONE_M |\
964 GLNVM_ULD_POR_DONE_1_M |\
965 GLNVM_ULD_PCIER_DONE_2_M)
966
967 uld_mask = ICE_RESET_DONE_MASK;
968
969
970 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
971 reg = rd32(hw, GLNVM_ULD) & uld_mask;
972 if (reg == uld_mask) {
973 ice_debug(hw, ICE_DBG_INIT,
974 "Global reset processes done. %d\n", cnt);
975 break;
976 }
977 mdelay(10);
978 }
979
980 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
981 ice_debug(hw, ICE_DBG_INIT,
982 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
983 reg);
984 return ICE_ERR_RESET_FAILED;
985 }
986
987 return 0;
988 }
989
990
991
992
993
994
995
996
997 static enum ice_status ice_pf_reset(struct ice_hw *hw)
998 {
999 u32 cnt, reg;
1000
1001
1002
1003
1004
1005
1006 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1007 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1008
1009 if (ice_check_reset(hw))
1010 return ICE_ERR_RESET_FAILED;
1011
1012 return 0;
1013 }
1014
1015
1016 reg = rd32(hw, PFGEN_CTRL);
1017
1018 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1019
1020 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1021 reg = rd32(hw, PFGEN_CTRL);
1022 if (!(reg & PFGEN_CTRL_PFSWR_M))
1023 break;
1024
1025 mdelay(1);
1026 }
1027
1028 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1029 ice_debug(hw, ICE_DBG_INIT,
1030 "PF reset polling failed to complete.\n");
1031 return ICE_ERR_RESET_FAILED;
1032 }
1033
1034 return 0;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1050 {
1051 u32 val = 0;
1052
1053 switch (req) {
1054 case ICE_RESET_PFR:
1055 return ice_pf_reset(hw);
1056 case ICE_RESET_CORER:
1057 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1058 val = GLGEN_RTRIG_CORER_M;
1059 break;
1060 case ICE_RESET_GLOBR:
1061 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1062 val = GLGEN_RTRIG_GLOBR_M;
1063 break;
1064 default:
1065 return ICE_ERR_PARAM;
1066 }
1067
1068 val |= rd32(hw, GLGEN_RTRIG);
1069 wr32(hw, GLGEN_RTRIG, val);
1070 ice_flush(hw);
1071
1072
1073 return ice_check_reset(hw);
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 static enum ice_status
1085 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1086 {
1087 u8 i;
1088
1089 if (!ice_rxq_ctx)
1090 return ICE_ERR_BAD_PTR;
1091
1092 if (rxq_index > QRX_CTRL_MAX_INDEX)
1093 return ICE_ERR_PARAM;
1094
1095
1096 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1097 wr32(hw, QRX_CONTEXT(i, rxq_index),
1098 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1099
1100 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1101 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1102 }
1103
1104 return 0;
1105 }
1106
1107
1108 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1109
1110 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1111 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1112 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1113 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1114 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1115 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1116 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1117 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1118 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1119 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1120 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1121 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1122 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1123 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1124 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1125 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1126 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1127 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1128 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1129 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1130 { 0 }
1131 };
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 enum ice_status
1144 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1145 u32 rxq_index)
1146 {
1147 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1148
1149 if (!rlan_ctx)
1150 return ICE_ERR_BAD_PTR;
1151
1152 rlan_ctx->prefena = 1;
1153
1154 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1155 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1156 }
1157
1158
1159 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1160
1161 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1162 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1163 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1164 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1165 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1166 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1167 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1168 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1169 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1170 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1171 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1172 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1173 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1174 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1175 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1176 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1177 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1178 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1179 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1180 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1181 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1182 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1183 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1184 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1185 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1186 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1187 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1188 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1189 { 0 }
1190 };
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 void
1203 ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1204 u16 buf_len)
1205 {
1206 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1207 u16 len;
1208
1209 #ifndef CONFIG_DYNAMIC_DEBUG
1210 if (!(mask & hw->debug_mask))
1211 return;
1212 #endif
1213
1214 if (!desc)
1215 return;
1216
1217 len = le16_to_cpu(cq_desc->datalen);
1218
1219 ice_debug(hw, mask,
1220 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1221 le16_to_cpu(cq_desc->opcode),
1222 le16_to_cpu(cq_desc->flags),
1223 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1224 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1225 le32_to_cpu(cq_desc->cookie_high),
1226 le32_to_cpu(cq_desc->cookie_low));
1227 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1228 le32_to_cpu(cq_desc->params.generic.param0),
1229 le32_to_cpu(cq_desc->params.generic.param1));
1230 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1231 le32_to_cpu(cq_desc->params.generic.addr_high),
1232 le32_to_cpu(cq_desc->params.generic.addr_low));
1233 if (buf && cq_desc->datalen != 0) {
1234 ice_debug(hw, mask, "Buffer:\n");
1235 if (buf_len < len)
1236 len = buf_len;
1237
1238 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1239 }
1240 }
1241
1242
1243
1244
1245
1246
1247
1248 DEFINE_MUTEX(ice_global_cfg_lock_sw);
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 enum ice_status
1261 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1262 u16 buf_size, struct ice_sq_cd *cd)
1263 {
1264 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1265 bool lock_acquired = false;
1266 enum ice_status status;
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 switch (le16_to_cpu(desc->opcode)) {
1277 case ice_aqc_opc_download_pkg:
1278 case ice_aqc_opc_get_pkg_info_list:
1279 case ice_aqc_opc_get_ver:
1280 break;
1281 case ice_aqc_opc_release_res:
1282 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1283 break;
1284
1285 default:
1286 mutex_lock(&ice_global_cfg_lock_sw);
1287 lock_acquired = true;
1288 break;
1289 }
1290
1291 status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1292 if (lock_acquired)
1293 mutex_unlock(&ice_global_cfg_lock_sw);
1294
1295 return status;
1296 }
1297
1298
1299
1300
1301
1302
1303
1304
1305 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1306 {
1307 struct ice_aqc_get_ver *resp;
1308 struct ice_aq_desc desc;
1309 enum ice_status status;
1310
1311 resp = &desc.params.get_ver;
1312
1313 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1314
1315 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1316
1317 if (!status) {
1318 hw->fw_branch = resp->fw_branch;
1319 hw->fw_maj_ver = resp->fw_major;
1320 hw->fw_min_ver = resp->fw_minor;
1321 hw->fw_patch = resp->fw_patch;
1322 hw->fw_build = le32_to_cpu(resp->fw_build);
1323 hw->api_branch = resp->api_branch;
1324 hw->api_maj_ver = resp->api_major;
1325 hw->api_min_ver = resp->api_minor;
1326 hw->api_patch = resp->api_patch;
1327 }
1328
1329 return status;
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 enum ice_status
1341 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1342 struct ice_sq_cd *cd)
1343 {
1344 struct ice_aqc_driver_ver *cmd;
1345 struct ice_aq_desc desc;
1346 u16 len;
1347
1348 cmd = &desc.params.driver_ver;
1349
1350 if (!dv)
1351 return ICE_ERR_PARAM;
1352
1353 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1354
1355 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1356 cmd->major_ver = dv->major_ver;
1357 cmd->minor_ver = dv->minor_ver;
1358 cmd->build_ver = dv->build_ver;
1359 cmd->subbuild_ver = dv->subbuild_ver;
1360
1361 len = 0;
1362 while (len < sizeof(dv->driver_string) &&
1363 isascii(dv->driver_string[len]) && dv->driver_string[len])
1364 len++;
1365
1366 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1378 {
1379 struct ice_aqc_q_shutdown *cmd;
1380 struct ice_aq_desc desc;
1381
1382 cmd = &desc.params.q_shutdown;
1383
1384 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1385
1386 if (unloading)
1387 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1388
1389 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1390 }
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 static enum ice_status
1419 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1420 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1421 struct ice_sq_cd *cd)
1422 {
1423 struct ice_aqc_req_res *cmd_resp;
1424 struct ice_aq_desc desc;
1425 enum ice_status status;
1426
1427 cmd_resp = &desc.params.res_owner;
1428
1429 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1430
1431 cmd_resp->res_id = cpu_to_le16(res);
1432 cmd_resp->access_type = cpu_to_le16(access);
1433 cmd_resp->res_number = cpu_to_le32(sdp_number);
1434 cmd_resp->timeout = cpu_to_le32(*timeout);
1435 *timeout = 0;
1436
1437 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1451 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1452 *timeout = le32_to_cpu(cmd_resp->timeout);
1453 return 0;
1454 } else if (le16_to_cpu(cmd_resp->status) ==
1455 ICE_AQ_RES_GLBL_IN_PROG) {
1456 *timeout = le32_to_cpu(cmd_resp->timeout);
1457 return ICE_ERR_AQ_ERROR;
1458 } else if (le16_to_cpu(cmd_resp->status) ==
1459 ICE_AQ_RES_GLBL_DONE) {
1460 return ICE_ERR_AQ_NO_WORK;
1461 }
1462
1463
1464 *timeout = 0;
1465 return ICE_ERR_AQ_ERROR;
1466 }
1467
1468
1469
1470
1471
1472 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1473 *timeout = le32_to_cpu(cmd_resp->timeout);
1474
1475 return status;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487 static enum ice_status
1488 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1489 struct ice_sq_cd *cd)
1490 {
1491 struct ice_aqc_req_res *cmd;
1492 struct ice_aq_desc desc;
1493
1494 cmd = &desc.params.res_owner;
1495
1496 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1497
1498 cmd->res_id = cpu_to_le16(res);
1499 cmd->res_number = cpu_to_le32(sdp_number);
1500
1501 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 enum ice_status
1514 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1515 enum ice_aq_res_access_type access, u32 timeout)
1516 {
1517 #define ICE_RES_POLLING_DELAY_MS 10
1518 u32 delay = ICE_RES_POLLING_DELAY_MS;
1519 u32 time_left = timeout;
1520 enum ice_status status;
1521
1522 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1523
1524
1525
1526
1527
1528
1529 if (status == ICE_ERR_AQ_NO_WORK)
1530 goto ice_acquire_res_exit;
1531
1532 if (status)
1533 ice_debug(hw, ICE_DBG_RES,
1534 "resource %d acquire type %d failed.\n", res, access);
1535
1536
1537 timeout = time_left;
1538 while (status && timeout && time_left) {
1539 mdelay(delay);
1540 timeout = (timeout > delay) ? timeout - delay : 0;
1541 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1542
1543 if (status == ICE_ERR_AQ_NO_WORK)
1544
1545 break;
1546
1547 if (!status)
1548
1549 break;
1550 }
1551 if (status && status != ICE_ERR_AQ_NO_WORK)
1552 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1553
1554 ice_acquire_res_exit:
1555 if (status == ICE_ERR_AQ_NO_WORK) {
1556 if (access == ICE_RES_WRITE)
1557 ice_debug(hw, ICE_DBG_RES,
1558 "resource indicates no work to do.\n");
1559 else
1560 ice_debug(hw, ICE_DBG_RES,
1561 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1562 }
1563 return status;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1574 {
1575 enum ice_status status;
1576 u32 total_delay = 0;
1577
1578 status = ice_aq_release_res(hw, res, 0, NULL);
1579
1580
1581
1582
1583 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1584 (total_delay < hw->adminq.sq_cmd_timeout)) {
1585 mdelay(1);
1586 status = ice_aq_release_res(hw, res, 0, NULL);
1587 total_delay++;
1588 }
1589 }
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1601 {
1602 u8 funcs;
1603
1604 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1605 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1606 ICE_CAPS_VALID_FUNCS_M);
1607
1608 if (!funcs)
1609 return 0;
1610
1611 return max / funcs;
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 static void
1624 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1625 enum ice_adminq_opc opc)
1626 {
1627 struct ice_aqc_list_caps_elem *cap_resp;
1628 struct ice_hw_func_caps *func_p = NULL;
1629 struct ice_hw_dev_caps *dev_p = NULL;
1630 struct ice_hw_common_caps *caps;
1631 char const *prefix;
1632 u32 i;
1633
1634 if (!buf)
1635 return;
1636
1637 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1638
1639 if (opc == ice_aqc_opc_list_dev_caps) {
1640 dev_p = &hw->dev_caps;
1641 caps = &dev_p->common_cap;
1642 prefix = "dev cap";
1643 } else if (opc == ice_aqc_opc_list_func_caps) {
1644 func_p = &hw->func_caps;
1645 caps = &func_p->common_cap;
1646 prefix = "func cap";
1647 } else {
1648 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1649 return;
1650 }
1651
1652 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1653 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1654 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1655 u32 number = le32_to_cpu(cap_resp->number);
1656 u16 cap = le16_to_cpu(cap_resp->cap);
1657
1658 switch (cap) {
1659 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1660 caps->valid_functions = number;
1661 ice_debug(hw, ICE_DBG_INIT,
1662 "%s: valid_functions (bitmap) = %d\n", prefix,
1663 caps->valid_functions);
1664 break;
1665 case ICE_AQC_CAPS_SRIOV:
1666 caps->sr_iov_1_1 = (number == 1);
1667 ice_debug(hw, ICE_DBG_INIT,
1668 "%s: sr_iov_1_1 = %d\n", prefix,
1669 caps->sr_iov_1_1);
1670 break;
1671 case ICE_AQC_CAPS_VF:
1672 if (dev_p) {
1673 dev_p->num_vfs_exposed = number;
1674 ice_debug(hw, ICE_DBG_INIT,
1675 "%s: num_vfs_exposed = %d\n", prefix,
1676 dev_p->num_vfs_exposed);
1677 } else if (func_p) {
1678 func_p->num_allocd_vfs = number;
1679 func_p->vf_base_id = logical_id;
1680 ice_debug(hw, ICE_DBG_INIT,
1681 "%s: num_allocd_vfs = %d\n", prefix,
1682 func_p->num_allocd_vfs);
1683 ice_debug(hw, ICE_DBG_INIT,
1684 "%s: vf_base_id = %d\n", prefix,
1685 func_p->vf_base_id);
1686 }
1687 break;
1688 case ICE_AQC_CAPS_VSI:
1689 if (dev_p) {
1690 dev_p->num_vsi_allocd_to_host = number;
1691 ice_debug(hw, ICE_DBG_INIT,
1692 "%s: num_vsi_allocd_to_host = %d\n",
1693 prefix,
1694 dev_p->num_vsi_allocd_to_host);
1695 } else if (func_p) {
1696 func_p->guar_num_vsi =
1697 ice_get_num_per_func(hw, ICE_MAX_VSI);
1698 ice_debug(hw, ICE_DBG_INIT,
1699 "%s: guar_num_vsi (fw) = %d\n",
1700 prefix, number);
1701 ice_debug(hw, ICE_DBG_INIT,
1702 "%s: guar_num_vsi = %d\n",
1703 prefix, func_p->guar_num_vsi);
1704 }
1705 break;
1706 case ICE_AQC_CAPS_DCB:
1707 caps->dcb = (number == 1);
1708 caps->active_tc_bitmap = logical_id;
1709 caps->maxtc = phys_id;
1710 ice_debug(hw, ICE_DBG_INIT,
1711 "%s: dcb = %d\n", prefix, caps->dcb);
1712 ice_debug(hw, ICE_DBG_INIT,
1713 "%s: active_tc_bitmap = %d\n", prefix,
1714 caps->active_tc_bitmap);
1715 ice_debug(hw, ICE_DBG_INIT,
1716 "%s: maxtc = %d\n", prefix, caps->maxtc);
1717 break;
1718 case ICE_AQC_CAPS_RSS:
1719 caps->rss_table_size = number;
1720 caps->rss_table_entry_width = logical_id;
1721 ice_debug(hw, ICE_DBG_INIT,
1722 "%s: rss_table_size = %d\n", prefix,
1723 caps->rss_table_size);
1724 ice_debug(hw, ICE_DBG_INIT,
1725 "%s: rss_table_entry_width = %d\n", prefix,
1726 caps->rss_table_entry_width);
1727 break;
1728 case ICE_AQC_CAPS_RXQS:
1729 caps->num_rxq = number;
1730 caps->rxq_first_id = phys_id;
1731 ice_debug(hw, ICE_DBG_INIT,
1732 "%s: num_rxq = %d\n", prefix,
1733 caps->num_rxq);
1734 ice_debug(hw, ICE_DBG_INIT,
1735 "%s: rxq_first_id = %d\n", prefix,
1736 caps->rxq_first_id);
1737 break;
1738 case ICE_AQC_CAPS_TXQS:
1739 caps->num_txq = number;
1740 caps->txq_first_id = phys_id;
1741 ice_debug(hw, ICE_DBG_INIT,
1742 "%s: num_txq = %d\n", prefix,
1743 caps->num_txq);
1744 ice_debug(hw, ICE_DBG_INIT,
1745 "%s: txq_first_id = %d\n", prefix,
1746 caps->txq_first_id);
1747 break;
1748 case ICE_AQC_CAPS_MSIX:
1749 caps->num_msix_vectors = number;
1750 caps->msix_vector_first_id = phys_id;
1751 ice_debug(hw, ICE_DBG_INIT,
1752 "%s: num_msix_vectors = %d\n", prefix,
1753 caps->num_msix_vectors);
1754 ice_debug(hw, ICE_DBG_INIT,
1755 "%s: msix_vector_first_id = %d\n", prefix,
1756 caps->msix_vector_first_id);
1757 break;
1758 case ICE_AQC_CAPS_MAX_MTU:
1759 caps->max_mtu = number;
1760 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1761 prefix, caps->max_mtu);
1762 break;
1763 default:
1764 ice_debug(hw, ICE_DBG_INIT,
1765 "%s: unknown capability[%d]: 0x%x\n", prefix,
1766 i, cap);
1767 break;
1768 }
1769 }
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 static enum ice_status
1785 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1786 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1787 {
1788 struct ice_aqc_list_caps *cmd;
1789 struct ice_aq_desc desc;
1790 enum ice_status status;
1791
1792 cmd = &desc.params.get_cap;
1793
1794 if (opc != ice_aqc_opc_list_func_caps &&
1795 opc != ice_aqc_opc_list_dev_caps)
1796 return ICE_ERR_PARAM;
1797
1798 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1799
1800 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1801 if (!status)
1802 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1803 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1804 *cap_count = le32_to_cpu(cmd->count);
1805 return status;
1806 }
1807
1808
1809
1810
1811
1812
1813 static enum ice_status
1814 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1815 {
1816 enum ice_status status;
1817 u32 cap_count;
1818 u16 cbuf_len;
1819 u8 retries;
1820
1821
1822
1823
1824
1825
1826
1827
1828 #define ICE_GET_CAP_BUF_COUNT 40
1829 #define ICE_GET_CAP_RETRY_COUNT 2
1830
1831 cap_count = ICE_GET_CAP_BUF_COUNT;
1832 retries = ICE_GET_CAP_RETRY_COUNT;
1833
1834 do {
1835 void *cbuf;
1836
1837 cbuf_len = (u16)(cap_count *
1838 sizeof(struct ice_aqc_list_caps_elem));
1839 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1840 if (!cbuf)
1841 return ICE_ERR_NO_MEMORY;
1842
1843 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1844 opc, NULL);
1845 devm_kfree(ice_hw_to_dev(hw), cbuf);
1846
1847 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1848 break;
1849
1850
1851 } while (--retries);
1852
1853 return status;
1854 }
1855
1856
1857
1858
1859
1860 void ice_set_safe_mode_caps(struct ice_hw *hw)
1861 {
1862 struct ice_hw_func_caps *func_caps = &hw->func_caps;
1863 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
1864 u32 valid_func, rxq_first_id, txq_first_id;
1865 u32 msix_vector_first_id, max_mtu;
1866 u32 num_func = 0;
1867 u8 i;
1868
1869
1870 valid_func = func_caps->common_cap.valid_functions;
1871 txq_first_id = func_caps->common_cap.txq_first_id;
1872 rxq_first_id = func_caps->common_cap.rxq_first_id;
1873 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
1874 max_mtu = func_caps->common_cap.max_mtu;
1875
1876
1877 memset(func_caps, 0, sizeof(*func_caps));
1878
1879
1880 func_caps->common_cap.valid_functions = valid_func;
1881 func_caps->common_cap.txq_first_id = txq_first_id;
1882 func_caps->common_cap.rxq_first_id = rxq_first_id;
1883 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1884 func_caps->common_cap.max_mtu = max_mtu;
1885
1886
1887 func_caps->common_cap.num_rxq = 1;
1888 func_caps->common_cap.num_txq = 1;
1889
1890
1891 func_caps->common_cap.num_msix_vectors = 2;
1892 func_caps->guar_num_vsi = 1;
1893
1894
1895 valid_func = dev_caps->common_cap.valid_functions;
1896 txq_first_id = dev_caps->common_cap.txq_first_id;
1897 rxq_first_id = dev_caps->common_cap.rxq_first_id;
1898 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
1899 max_mtu = dev_caps->common_cap.max_mtu;
1900
1901
1902 memset(dev_caps, 0, sizeof(*dev_caps));
1903
1904
1905 dev_caps->common_cap.valid_functions = valid_func;
1906 dev_caps->common_cap.txq_first_id = txq_first_id;
1907 dev_caps->common_cap.rxq_first_id = rxq_first_id;
1908 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
1909 dev_caps->common_cap.max_mtu = max_mtu;
1910
1911
1912 #define ICE_MAX_FUNCS 8
1913 for (i = 0; i < ICE_MAX_FUNCS; i++)
1914 if (valid_func & BIT(i))
1915 num_func++;
1916
1917
1918 dev_caps->common_cap.num_rxq = num_func;
1919 dev_caps->common_cap.num_txq = num_func;
1920
1921
1922 dev_caps->common_cap.num_msix_vectors = 2 * num_func;
1923 }
1924
1925
1926
1927
1928
1929 enum ice_status ice_get_caps(struct ice_hw *hw)
1930 {
1931 enum ice_status status;
1932
1933 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1934 if (!status)
1935 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1936
1937 return status;
1938 }
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 enum ice_status
1950 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1951 struct ice_sq_cd *cd)
1952 {
1953 struct ice_aqc_manage_mac_write *cmd;
1954 struct ice_aq_desc desc;
1955
1956 cmd = &desc.params.mac_write;
1957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1958
1959 cmd->flags = flags;
1960
1961
1962 cmd->sah = htons(*((const u16 *)mac_addr));
1963 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1964
1965 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1966 }
1967
1968
1969
1970
1971
1972
1973
1974 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1975 {
1976 struct ice_aq_desc desc;
1977
1978 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1979 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1980
1981 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1982 }
1983
1984
1985
1986
1987
1988
1989
1990
1991 void ice_clear_pxe_mode(struct ice_hw *hw)
1992 {
1993 if (ice_check_sq_alive(hw, &hw->adminq))
1994 ice_aq_clear_pxe_mode(hw);
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static u16
2011 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2012 {
2013 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2014 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2015
2016 switch (phy_type_low) {
2017 case ICE_PHY_TYPE_LOW_100BASE_TX:
2018 case ICE_PHY_TYPE_LOW_100M_SGMII:
2019 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2020 break;
2021 case ICE_PHY_TYPE_LOW_1000BASE_T:
2022 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2023 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2024 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2025 case ICE_PHY_TYPE_LOW_1G_SGMII:
2026 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2027 break;
2028 case ICE_PHY_TYPE_LOW_2500BASE_T:
2029 case ICE_PHY_TYPE_LOW_2500BASE_X:
2030 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2031 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2032 break;
2033 case ICE_PHY_TYPE_LOW_5GBASE_T:
2034 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2035 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2036 break;
2037 case ICE_PHY_TYPE_LOW_10GBASE_T:
2038 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2039 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2040 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2041 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2042 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2043 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2044 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2045 break;
2046 case ICE_PHY_TYPE_LOW_25GBASE_T:
2047 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2048 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2049 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2050 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2051 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2052 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2053 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2054 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2055 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2056 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2057 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2058 break;
2059 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2060 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2061 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2062 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2063 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2064 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2065 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2066 break;
2067 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2068 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2069 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2070 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2071 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2072 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2073 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2074 case ICE_PHY_TYPE_LOW_50G_AUI2:
2075 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2076 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2077 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2078 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2079 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2080 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2081 case ICE_PHY_TYPE_LOW_50G_AUI1:
2082 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2083 break;
2084 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2085 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2086 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2087 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2088 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2089 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2090 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2091 case ICE_PHY_TYPE_LOW_100G_AUI4:
2092 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2093 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2094 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2095 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2096 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2097 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2098 break;
2099 default:
2100 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2101 break;
2102 }
2103
2104 switch (phy_type_high) {
2105 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2106 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2107 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2108 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2109 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2110 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2111 break;
2112 default:
2113 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2114 break;
2115 }
2116
2117 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2118 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2119 return ICE_AQ_LINK_SPEED_UNKNOWN;
2120 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2121 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2122 return ICE_AQ_LINK_SPEED_UNKNOWN;
2123 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2124 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2125 return speed_phy_type_low;
2126 else
2127 return speed_phy_type_high;
2128 }
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 void
2146 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2147 u16 link_speeds_bitmap)
2148 {
2149 u64 pt_high;
2150 u64 pt_low;
2151 int index;
2152 u16 speed;
2153
2154
2155 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2156 pt_low = BIT_ULL(index);
2157 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2158
2159 if (link_speeds_bitmap & speed)
2160 *phy_type_low |= BIT_ULL(index);
2161 }
2162
2163
2164 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2165 pt_high = BIT_ULL(index);
2166 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2167
2168 if (link_speeds_bitmap & speed)
2169 *phy_type_high |= BIT_ULL(index);
2170 }
2171 }
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 enum ice_status
2186 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2187 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2188 {
2189 struct ice_aq_desc desc;
2190
2191 if (!cfg)
2192 return ICE_ERR_PARAM;
2193
2194
2195 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2196 ice_debug(hw, ICE_DBG_PHY,
2197 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2198 cfg->caps);
2199
2200 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2201 }
2202
2203 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2204 desc.params.set_phy.lport_num = lport;
2205 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2206
2207 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2208 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2209 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2210 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2211 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2212 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2213 cfg->low_power_ctrl);
2214 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2215 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2216 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2217
2218 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2219 }
2220
2221
2222
2223
2224
2225 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2226 {
2227 struct ice_link_status *li;
2228 enum ice_status status;
2229
2230 if (!pi)
2231 return ICE_ERR_PARAM;
2232
2233 li = &pi->phy.link_info;
2234
2235 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2236 if (status)
2237 return status;
2238
2239 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2240 struct ice_aqc_get_phy_caps_data *pcaps;
2241 struct ice_hw *hw;
2242
2243 hw = pi->hw;
2244 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2245 GFP_KERNEL);
2246 if (!pcaps)
2247 return ICE_ERR_NO_MEMORY;
2248
2249 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2250 pcaps, NULL);
2251 if (!status)
2252 memcpy(li->module_type, &pcaps->module_type,
2253 sizeof(li->module_type));
2254
2255 devm_kfree(ice_hw_to_dev(hw), pcaps);
2256 }
2257
2258 return status;
2259 }
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 enum ice_status
2270 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2271 {
2272 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2273 struct ice_aqc_get_phy_caps_data *pcaps;
2274 enum ice_status status;
2275 u8 pause_mask = 0x0;
2276 struct ice_hw *hw;
2277
2278 if (!pi)
2279 return ICE_ERR_PARAM;
2280 hw = pi->hw;
2281 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2282
2283 switch (pi->fc.req_mode) {
2284 case ICE_FC_FULL:
2285 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2286 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2287 break;
2288 case ICE_FC_RX_PAUSE:
2289 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2290 break;
2291 case ICE_FC_TX_PAUSE:
2292 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2293 break;
2294 default:
2295 break;
2296 }
2297
2298 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2299 if (!pcaps)
2300 return ICE_ERR_NO_MEMORY;
2301
2302
2303 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2304 NULL);
2305 if (status) {
2306 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2307 goto out;
2308 }
2309
2310
2311 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2312 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2313
2314
2315 cfg.caps |= pause_mask;
2316
2317
2318 if (cfg.caps != pcaps->caps) {
2319 int retry_count, retry_max = 10;
2320
2321
2322 if (ena_auto_link_update)
2323 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2324
2325 cfg.phy_type_high = pcaps->phy_type_high;
2326 cfg.phy_type_low = pcaps->phy_type_low;
2327 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2328 cfg.eee_cap = pcaps->eee_cap;
2329 cfg.eeer_value = pcaps->eeer_value;
2330 cfg.link_fec_opt = pcaps->link_fec_options;
2331
2332 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2333 if (status) {
2334 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2335 goto out;
2336 }
2337
2338
2339
2340
2341
2342
2343 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2344 status = ice_update_link_info(pi);
2345
2346 if (!status)
2347 break;
2348
2349 mdelay(100);
2350 }
2351
2352 if (status)
2353 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2354 }
2355
2356 out:
2357 devm_kfree(ice_hw_to_dev(hw), pcaps);
2358 return status;
2359 }
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369 void
2370 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2371 struct ice_aqc_set_phy_cfg_data *cfg)
2372 {
2373 if (!caps || !cfg)
2374 return;
2375
2376 cfg->phy_type_low = caps->phy_type_low;
2377 cfg->phy_type_high = caps->phy_type_high;
2378 cfg->caps = caps->caps;
2379 cfg->low_power_ctrl = caps->low_power_ctrl;
2380 cfg->eee_cap = caps->eee_cap;
2381 cfg->eeer_value = caps->eeer_value;
2382 cfg->link_fec_opt = caps->link_fec_options;
2383 }
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394 void
2395 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2396 {
2397 switch (fec) {
2398 case ICE_FEC_BASER:
2399
2400
2401
2402 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2403 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2404 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2405 ICE_AQC_PHY_FEC_25G_KR_REQ;
2406 break;
2407 case ICE_FEC_RS:
2408
2409
2410
2411 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2412 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2413 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2414 break;
2415 case ICE_FEC_NONE:
2416
2417 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2418 break;
2419 case ICE_FEC_AUTO:
2420
2421 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2422 break;
2423 }
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2436 {
2437 struct ice_phy_info *phy_info;
2438 enum ice_status status = 0;
2439
2440 if (!pi || !link_up)
2441 return ICE_ERR_PARAM;
2442
2443 phy_info = &pi->phy;
2444
2445 if (phy_info->get_link_info) {
2446 status = ice_update_link_info(pi);
2447
2448 if (status)
2449 ice_debug(pi->hw, ICE_DBG_LINK,
2450 "get link status error, status = %d\n",
2451 status);
2452 }
2453
2454 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2455
2456 return status;
2457 }
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467 enum ice_status
2468 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2469 struct ice_sq_cd *cd)
2470 {
2471 struct ice_aqc_restart_an *cmd;
2472 struct ice_aq_desc desc;
2473
2474 cmd = &desc.params.restart_an;
2475
2476 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2477
2478 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2479 cmd->lport_num = pi->lport;
2480 if (ena_link)
2481 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2482 else
2483 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2484
2485 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2486 }
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497 enum ice_status
2498 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2499 struct ice_sq_cd *cd)
2500 {
2501 struct ice_aqc_set_event_mask *cmd;
2502 struct ice_aq_desc desc;
2503
2504 cmd = &desc.params.set_event_mask;
2505
2506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2507
2508 cmd->lport_num = port_num;
2509
2510 cmd->event_mask = cpu_to_le16(mask);
2511 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2512 }
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522 enum ice_status
2523 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2524 {
2525 struct ice_aqc_set_mac_lb *cmd;
2526 struct ice_aq_desc desc;
2527
2528 cmd = &desc.params.set_mac_lb;
2529
2530 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2531 if (ena_lpbk)
2532 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2533
2534 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2535 }
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545 enum ice_status
2546 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2547 struct ice_sq_cd *cd)
2548 {
2549 struct ice_aqc_set_port_id_led *cmd;
2550 struct ice_hw *hw = pi->hw;
2551 struct ice_aq_desc desc;
2552
2553 cmd = &desc.params.set_port_id_led;
2554
2555 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2556
2557 if (is_orig_mode)
2558 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2559 else
2560 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2561
2562 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2563 }
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 static enum ice_status
2578 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2579 u16 lut_size, u8 glob_lut_idx, bool set)
2580 {
2581 struct ice_aqc_get_set_rss_lut *cmd_resp;
2582 struct ice_aq_desc desc;
2583 enum ice_status status;
2584 u16 flags = 0;
2585
2586 cmd_resp = &desc.params.get_set_rss_lut;
2587
2588 if (set) {
2589 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2590 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2591 } else {
2592 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2593 }
2594
2595 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2596 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2597 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2598 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2599
2600 switch (lut_type) {
2601 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2602 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2603 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2604 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2605 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2606 break;
2607 default:
2608 status = ICE_ERR_PARAM;
2609 goto ice_aq_get_set_rss_lut_exit;
2610 }
2611
2612 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2613 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2614 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2615
2616 if (!set)
2617 goto ice_aq_get_set_rss_lut_send;
2618 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2619 if (!set)
2620 goto ice_aq_get_set_rss_lut_send;
2621 } else {
2622 goto ice_aq_get_set_rss_lut_send;
2623 }
2624
2625
2626 switch (lut_size) {
2627 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2628 break;
2629 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2630 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2632 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2633 break;
2634 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2635 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2636 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2637 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2638 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2639 break;
2640 }
2641
2642 default:
2643 status = ICE_ERR_PARAM;
2644 goto ice_aq_get_set_rss_lut_exit;
2645 }
2646
2647 ice_aq_get_set_rss_lut_send:
2648 cmd_resp->flags = cpu_to_le16(flags);
2649 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2650
2651 ice_aq_get_set_rss_lut_exit:
2652 return status;
2653 }
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665 enum ice_status
2666 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2667 u8 *lut, u16 lut_size)
2668 {
2669 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2670 return ICE_ERR_PARAM;
2671
2672 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2673 lut_type, lut, lut_size, 0, false);
2674 }
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686 enum ice_status
2687 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2688 u8 *lut, u16 lut_size)
2689 {
2690 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2691 return ICE_ERR_PARAM;
2692
2693 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2694 lut_type, lut, lut_size, 0, true);
2695 }
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706 static enum
2707 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2708 struct ice_aqc_get_set_rss_keys *key,
2709 bool set)
2710 {
2711 struct ice_aqc_get_set_rss_key *cmd_resp;
2712 u16 key_size = sizeof(*key);
2713 struct ice_aq_desc desc;
2714
2715 cmd_resp = &desc.params.get_set_rss_key;
2716
2717 if (set) {
2718 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2719 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2720 } else {
2721 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2722 }
2723
2724 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2725 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2726 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2727 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2728
2729 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2730 }
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740 enum ice_status
2741 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2742 struct ice_aqc_get_set_rss_keys *key)
2743 {
2744 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2745 return ICE_ERR_PARAM;
2746
2747 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2748 key, false);
2749 }
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759 enum ice_status
2760 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2761 struct ice_aqc_get_set_rss_keys *keys)
2762 {
2763 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2764 return ICE_ERR_PARAM;
2765
2766 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2767 keys, true);
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 static enum ice_status
2792 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2793 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2794 struct ice_sq_cd *cd)
2795 {
2796 u16 i, sum_header_size, sum_q_size = 0;
2797 struct ice_aqc_add_tx_qgrp *list;
2798 struct ice_aqc_add_txqs *cmd;
2799 struct ice_aq_desc desc;
2800
2801 cmd = &desc.params.add_txqs;
2802
2803 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2804
2805 if (!qg_list)
2806 return ICE_ERR_PARAM;
2807
2808 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2809 return ICE_ERR_PARAM;
2810
2811 sum_header_size = num_qgrps *
2812 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2813
2814 list = qg_list;
2815 for (i = 0; i < num_qgrps; i++) {
2816 struct ice_aqc_add_txqs_perq *q = list->txqs;
2817
2818 sum_q_size += list->num_txqs * sizeof(*q);
2819 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2820 }
2821
2822 if (buf_size != (sum_header_size + sum_q_size))
2823 return ICE_ERR_PARAM;
2824
2825 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2826
2827 cmd->num_qgrps = num_qgrps;
2828
2829 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2830 }
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844 static enum ice_status
2845 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2846 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2847 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2848 struct ice_sq_cd *cd)
2849 {
2850 struct ice_aqc_dis_txqs *cmd;
2851 struct ice_aq_desc desc;
2852 enum ice_status status;
2853 u16 i, sz = 0;
2854
2855 cmd = &desc.params.dis_txqs;
2856 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2857
2858
2859 if (!qg_list && !rst_src)
2860 return ICE_ERR_PARAM;
2861
2862 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2863 return ICE_ERR_PARAM;
2864
2865 cmd->num_entries = num_qgrps;
2866
2867 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2868 ICE_AQC_Q_DIS_TIMEOUT_M);
2869
2870 switch (rst_src) {
2871 case ICE_VM_RESET:
2872 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2873 cmd->vmvf_and_timeout |=
2874 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2875 break;
2876 case ICE_VF_RESET:
2877 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2878
2879 cmd->vmvf_and_timeout |=
2880 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2881 ICE_AQC_Q_DIS_VMVF_NUM_M);
2882 break;
2883 case ICE_NO_RESET:
2884 default:
2885 break;
2886 }
2887
2888
2889 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2890
2891 if (!qg_list)
2892 goto do_aq;
2893
2894
2895
2896
2897 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2898
2899 for (i = 0; i < num_qgrps; ++i) {
2900
2901 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2902
2903
2904 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2905
2906
2907 if ((qg_list[i].num_qs % 2) == 0)
2908 sz += 2;
2909 }
2910
2911 if (buf_size != sz)
2912 return ICE_ERR_PARAM;
2913
2914 do_aq:
2915 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2916 if (status) {
2917 if (!qg_list)
2918 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2919 vmvf_num, hw->adminq.sq_last_status);
2920 else
2921 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2922 le16_to_cpu(qg_list[0].q_id[0]),
2923 hw->adminq.sq_last_status);
2924 }
2925 return status;
2926 }
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 static void
2937 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2938 {
2939 u8 src_byte, dest_byte, mask;
2940 u8 *from, *dest;
2941 u16 shift_width;
2942
2943
2944 from = src_ctx + ce_info->offset;
2945
2946
2947 shift_width = ce_info->lsb % 8;
2948 mask = (u8)(BIT(ce_info->width) - 1);
2949
2950 src_byte = *from;
2951 src_byte &= mask;
2952
2953
2954 mask <<= shift_width;
2955 src_byte <<= shift_width;
2956
2957
2958 dest = dest_ctx + (ce_info->lsb / 8);
2959
2960 memcpy(&dest_byte, dest, sizeof(dest_byte));
2961
2962 dest_byte &= ~mask;
2963 dest_byte |= src_byte;
2964
2965
2966 memcpy(dest, &dest_byte, sizeof(dest_byte));
2967 }
2968
2969
2970
2971
2972
2973
2974
2975 static void
2976 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2977 {
2978 u16 src_word, mask;
2979 __le16 dest_word;
2980 u8 *from, *dest;
2981 u16 shift_width;
2982
2983
2984 from = src_ctx + ce_info->offset;
2985
2986
2987 shift_width = ce_info->lsb % 8;
2988 mask = BIT(ce_info->width) - 1;
2989
2990
2991
2992
2993 src_word = *(u16 *)from;
2994 src_word &= mask;
2995
2996
2997 mask <<= shift_width;
2998 src_word <<= shift_width;
2999
3000
3001 dest = dest_ctx + (ce_info->lsb / 8);
3002
3003 memcpy(&dest_word, dest, sizeof(dest_word));
3004
3005 dest_word &= ~(cpu_to_le16(mask));
3006 dest_word |= cpu_to_le16(src_word);
3007
3008
3009 memcpy(dest, &dest_word, sizeof(dest_word));
3010 }
3011
3012
3013
3014
3015
3016
3017
3018 static void
3019 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3020 {
3021 u32 src_dword, mask;
3022 __le32 dest_dword;
3023 u8 *from, *dest;
3024 u16 shift_width;
3025
3026
3027 from = src_ctx + ce_info->offset;
3028
3029
3030 shift_width = ce_info->lsb % 8;
3031
3032
3033
3034
3035
3036 if (ce_info->width < 32)
3037 mask = BIT(ce_info->width) - 1;
3038 else
3039 mask = (u32)~0;
3040
3041
3042
3043
3044 src_dword = *(u32 *)from;
3045 src_dword &= mask;
3046
3047
3048 mask <<= shift_width;
3049 src_dword <<= shift_width;
3050
3051
3052 dest = dest_ctx + (ce_info->lsb / 8);
3053
3054 memcpy(&dest_dword, dest, sizeof(dest_dword));
3055
3056 dest_dword &= ~(cpu_to_le32(mask));
3057 dest_dword |= cpu_to_le32(src_dword);
3058
3059
3060 memcpy(dest, &dest_dword, sizeof(dest_dword));
3061 }
3062
3063
3064
3065
3066
3067
3068
3069 static void
3070 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3071 {
3072 u64 src_qword, mask;
3073 __le64 dest_qword;
3074 u8 *from, *dest;
3075 u16 shift_width;
3076
3077
3078 from = src_ctx + ce_info->offset;
3079
3080
3081 shift_width = ce_info->lsb % 8;
3082
3083
3084
3085
3086
3087 if (ce_info->width < 64)
3088 mask = BIT_ULL(ce_info->width) - 1;
3089 else
3090 mask = (u64)~0;
3091
3092
3093
3094
3095 src_qword = *(u64 *)from;
3096 src_qword &= mask;
3097
3098
3099 mask <<= shift_width;
3100 src_qword <<= shift_width;
3101
3102
3103 dest = dest_ctx + (ce_info->lsb / 8);
3104
3105 memcpy(&dest_qword, dest, sizeof(dest_qword));
3106
3107 dest_qword &= ~(cpu_to_le64(mask));
3108 dest_qword |= cpu_to_le64(src_qword);
3109
3110
3111 memcpy(dest, &dest_qword, sizeof(dest_qword));
3112 }
3113
3114
3115
3116
3117
3118
3119
3120 enum ice_status
3121 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3122 {
3123 int f;
3124
3125 for (f = 0; ce_info[f].width; f++) {
3126
3127
3128
3129
3130 switch (ce_info[f].size_of) {
3131 case sizeof(u8):
3132 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3133 break;
3134 case sizeof(u16):
3135 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3136 break;
3137 case sizeof(u32):
3138 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3139 break;
3140 case sizeof(u64):
3141 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3142 break;
3143 default:
3144 return ICE_ERR_INVAL_SIZE;
3145 }
3146 }
3147
3148 return 0;
3149 }
3150
3151
3152
3153
3154
3155
3156
3157
3158 static struct ice_q_ctx *
3159 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3160 {
3161 struct ice_vsi_ctx *vsi;
3162 struct ice_q_ctx *q_ctx;
3163
3164 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3165 if (!vsi)
3166 return NULL;
3167 if (q_handle >= vsi->num_lan_q_entries[tc])
3168 return NULL;
3169 if (!vsi->lan_q_ctx[tc])
3170 return NULL;
3171 q_ctx = vsi->lan_q_ctx[tc];
3172 return &q_ctx[q_handle];
3173 }
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188 enum ice_status
3189 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3190 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3191 struct ice_sq_cd *cd)
3192 {
3193 struct ice_aqc_txsched_elem_data node = { 0 };
3194 struct ice_sched_node *parent;
3195 struct ice_q_ctx *q_ctx;
3196 enum ice_status status;
3197 struct ice_hw *hw;
3198
3199 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3200 return ICE_ERR_CFG;
3201
3202 if (num_qgrps > 1 || buf->num_txqs > 1)
3203 return ICE_ERR_MAX_LIMIT;
3204
3205 hw = pi->hw;
3206
3207 if (!ice_is_vsi_valid(hw, vsi_handle))
3208 return ICE_ERR_PARAM;
3209
3210 mutex_lock(&pi->sched_lock);
3211
3212 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3213 if (!q_ctx) {
3214 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3215 q_handle);
3216 status = ICE_ERR_PARAM;
3217 goto ena_txq_exit;
3218 }
3219
3220
3221 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3222 ICE_SCHED_NODE_OWNER_LAN);
3223 if (!parent) {
3224 status = ICE_ERR_PARAM;
3225 goto ena_txq_exit;
3226 }
3227
3228 buf->parent_teid = parent->info.node_teid;
3229 node.parent_teid = parent->info.node_teid;
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3242
3243
3244 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3245 if (status) {
3246 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3247 le16_to_cpu(buf->txqs[0].txq_id),
3248 hw->adminq.sq_last_status);
3249 goto ena_txq_exit;
3250 }
3251
3252 node.node_teid = buf->txqs[0].q_teid;
3253 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3254 q_ctx->q_handle = q_handle;
3255
3256
3257 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3258
3259 ena_txq_exit:
3260 mutex_unlock(&pi->sched_lock);
3261 return status;
3262 }
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279 enum ice_status
3280 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3281 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3282 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3283 struct ice_sq_cd *cd)
3284 {
3285 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3286 struct ice_aqc_dis_txq_item qg_list;
3287 struct ice_q_ctx *q_ctx;
3288 u16 i;
3289
3290 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3291 return ICE_ERR_CFG;
3292
3293 if (!num_queues) {
3294
3295
3296
3297
3298 if (rst_src)
3299 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3300 vmvf_num, NULL);
3301 return ICE_ERR_CFG;
3302 }
3303
3304 mutex_lock(&pi->sched_lock);
3305
3306 for (i = 0; i < num_queues; i++) {
3307 struct ice_sched_node *node;
3308
3309 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3310 if (!node)
3311 continue;
3312 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3313 if (!q_ctx) {
3314 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3315 q_handles[i]);
3316 continue;
3317 }
3318 if (q_ctx->q_handle != q_handles[i]) {
3319 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3320 q_ctx->q_handle, q_handles[i]);
3321 continue;
3322 }
3323 qg_list.parent_teid = node->info.parent_teid;
3324 qg_list.num_qs = 1;
3325 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3326 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3327 sizeof(qg_list), rst_src, vmvf_num,
3328 cd);
3329
3330 if (status)
3331 break;
3332 ice_free_sched_node(pi, node);
3333 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3334 }
3335 mutex_unlock(&pi->sched_lock);
3336 return status;
3337 }
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349 static enum ice_status
3350 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3351 u16 *maxqs, u8 owner)
3352 {
3353 enum ice_status status = 0;
3354 u8 i;
3355
3356 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3357 return ICE_ERR_CFG;
3358
3359 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3360 return ICE_ERR_PARAM;
3361
3362 mutex_lock(&pi->sched_lock);
3363
3364 ice_for_each_traffic_class(i) {
3365
3366 if (!ice_sched_get_tc_node(pi, i))
3367 continue;
3368
3369 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3370 ice_is_tc_ena(tc_bitmap, i));
3371 if (status)
3372 break;
3373 }
3374
3375 mutex_unlock(&pi->sched_lock);
3376 return status;
3377 }
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388 enum ice_status
3389 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3390 u16 *max_lanqs)
3391 {
3392 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3393 ICE_SCHED_NODE_OWNER_LAN);
3394 }
3395
3396
3397
3398
3399
3400
3401
3402 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3403 {
3404 struct ice_switch_info *sw = hw->switch_info;
3405 u8 i;
3406
3407
3408 ice_rm_all_sw_replay_rule_info(hw);
3409
3410
3411
3412
3413 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3414 list_replace_init(&sw->recp_list[i].filt_rules,
3415 &sw->recp_list[i].filt_replay_rules);
3416
3417 return 0;
3418 }
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3429 {
3430 enum ice_status status;
3431
3432 if (!ice_is_vsi_valid(hw, vsi_handle))
3433 return ICE_ERR_PARAM;
3434
3435
3436 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3437 status = ice_replay_pre_init(hw);
3438 if (status)
3439 return status;
3440 }
3441
3442
3443 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3444 return status;
3445 }
3446
3447
3448
3449
3450
3451
3452
3453 void ice_replay_post(struct ice_hw *hw)
3454 {
3455
3456 ice_rm_all_sw_replay_rule_info(hw);
3457 }
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467 void
3468 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3469 u64 *prev_stat, u64 *cur_stat)
3470 {
3471 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
3472
3473
3474
3475
3476
3477
3478 if (!prev_stat_loaded) {
3479 *prev_stat = new_data;
3480 return;
3481 }
3482
3483
3484
3485
3486 if (new_data >= *prev_stat)
3487 *cur_stat += new_data - *prev_stat;
3488 else
3489
3490 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
3491
3492
3493 *prev_stat = new_data;
3494 }
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504 void
3505 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3506 u64 *prev_stat, u64 *cur_stat)
3507 {
3508 u32 new_data;
3509
3510 new_data = rd32(hw, reg);
3511
3512
3513
3514
3515
3516
3517 if (!prev_stat_loaded) {
3518 *prev_stat = new_data;
3519 return;
3520 }
3521
3522
3523
3524
3525 if (new_data >= *prev_stat)
3526 *cur_stat += new_data - *prev_stat;
3527 else
3528
3529 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
3530
3531
3532 *prev_stat = new_data;
3533 }
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543 enum ice_status
3544 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3545 struct ice_aqc_get_elem *buf)
3546 {
3547 u16 buf_size, num_elem_ret = 0;
3548 enum ice_status status;
3549
3550 buf_size = sizeof(*buf);
3551 memset(buf, 0, buf_size);
3552 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3553 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3554 NULL);
3555 if (status || num_elem_ret != 1)
3556 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3557 return status;
3558 }