This source file includes following definitions.
- hclge_mac_update_stats_defective
- hclge_mac_update_stats_complete
- hclge_mac_query_reg_num
- hclge_mac_update_stats
- hclge_tqps_update_stats
- hclge_tqps_get_stats
- hclge_tqps_get_sset_count
- hclge_tqps_get_strings
- hclge_comm_get_stats
- hclge_comm_get_strings
- hclge_update_stats_for_all
- hclge_update_stats
- hclge_get_sset_count
- hclge_get_strings
- hclge_get_stats
- hclge_get_mac_stat
- hclge_parse_func_status
- hclge_query_function_status
- hclge_query_pf_resource
- hclge_parse_speed
- hclge_check_port_speed
- hclge_convert_setting_sr
- hclge_convert_setting_lr
- hclge_convert_setting_cr
- hclge_convert_setting_kr
- hclge_convert_setting_fec
- hclge_parse_fiber_link_mode
- hclge_parse_backplane_link_mode
- hclge_parse_copper_link_mode
- hclge_parse_link_mode
- hclge_parse_cfg
- hclge_get_cfg
- hclge_get_cap
- hclge_init_kdump_kernel_config
- hclge_configure
- hclge_config_tso
- hclge_config_gro
- hclge_alloc_tqps
- hclge_map_tqps_to_func
- hclge_assign_tqp
- hclge_knic_setup
- hclge_map_tqp_to_vport
- hclge_map_tqp
- hclge_vport_setup
- hclge_alloc_vport
- hclge_cmd_alloc_tx_buff
- hclge_tx_buffer_alloc
- hclge_get_tc_num
- hclge_get_pfc_priv_num
- hclge_get_no_pfc_priv_num
- hclge_get_rx_priv_buff_alloced
- hclge_get_tx_buff_alloced
- hclge_is_rx_buf_ok
- hclge_tx_buffer_calc
- hclge_rx_buf_calc_all
- hclge_drop_nopfc_buf_till_fit
- hclge_drop_pfc_buf_till_fit
- hclge_only_alloc_priv_buff
- hclge_rx_buffer_calc
- hclge_rx_priv_buf_alloc
- hclge_rx_priv_wl_config
- hclge_common_thrd_config
- hclge_common_wl_config
- hclge_buffer_alloc
- hclge_init_roce_base_info
- hclge_init_msi
- hclge_check_speed_dup
- hclge_cfg_mac_speed_dup_hw
- hclge_cfg_mac_speed_dup
- hclge_cfg_mac_speed_dup_h
- hclge_set_autoneg_en
- hclge_set_autoneg
- hclge_get_autoneg
- hclge_restart_autoneg
- hclge_halt_autoneg
- hclge_set_fec_hw
- hclge_set_fec
- hclge_get_fec
- hclge_mac_init
- hclge_mbx_task_schedule
- hclge_reset_task_schedule
- hclge_task_schedule
- hclge_get_mac_link_status
- hclge_get_mac_phy_link
- hclge_update_link_status
- hclge_update_port_capability
- hclge_get_sfp_speed
- hclge_get_sfp_info
- hclge_update_port_info
- hclge_get_status
- hclge_check_event_cause
- hclge_clear_event_cause
- hclge_clear_all_event_cause
- hclge_enable_vector
- hclge_misc_irq_handle
- hclge_free_vector
- hclge_get_misc_vector
- hclge_irq_affinity_notify
- hclge_irq_affinity_release
- hclge_misc_affinity_setup
- hclge_misc_affinity_teardown
- hclge_misc_irq_init
- hclge_misc_irq_uninit
- hclge_notify_client
- hclge_notify_roce_client
- hclge_reset_wait
- hclge_set_vf_rst
- hclge_set_all_vf_rst
- hclge_func_reset_sync_vf
- hclge_report_hw_error
- hclge_handle_imp_error
- hclge_func_reset_cmd
- hclge_do_reset
- hclge_get_reset_level
- hclge_clear_reset_cause
- hclge_reset_prepare_down
- hclge_reset_handshake
- hclge_reset_prepare_wait
- hclge_reset_err_handle
- hclge_set_rst_done
- hclge_reset_prepare_up
- hclge_reset_stack
- hclge_reset
- hclge_reset_event
- hclge_set_def_reset_request
- hclge_reset_timer
- hclge_reset_subtask
- hclge_reset_service_task
- hclge_mailbox_service_task
- hclge_update_vport_alive
- hclge_service_task
- hclge_get_vport
- hclge_get_vector
- hclge_get_vector_index
- hclge_put_vector
- hclge_get_rss_key_size
- hclge_get_rss_indir_size
- hclge_set_rss_algo_key
- hclge_set_rss_indir_table
- hclge_set_rss_tc_mode
- hclge_get_rss_type
- hclge_set_rss_input_tuple
- hclge_get_rss
- hclge_set_rss
- hclge_get_rss_hash_bits
- hclge_set_rss_tuple
- hclge_get_rss_tuple
- hclge_get_tc_size
- hclge_rss_init_hw
- hclge_rss_indir_init_cfg
- hclge_rss_init_cfg
- hclge_bind_ring_with_vector
- hclge_map_ring_to_vector
- hclge_unmap_ring_frm_vector
- hclge_cmd_set_promisc_mode
- hclge_promisc_param_init
- hclge_set_promisc_mode
- hclge_get_fd_mode
- hclge_get_fd_allocation
- hclge_set_fd_key_config
- hclge_init_fd_config
- hclge_fd_tcam_config
- hclge_fd_ad_config
- hclge_fd_convert_tuple
- hclge_get_port_number
- hclge_fd_convert_meta_data
- hclge_config_key
- hclge_config_action
- hclge_fd_check_spec
- hclge_fd_rule_exist
- hclge_fd_update_rule_list
- hclge_fd_get_tuple
- hclge_fd_config_rule
- hclge_add_fd_entry
- hclge_del_fd_entry
- hclge_del_all_fd_entries
- hclge_restore_fd_entries
- hclge_get_fd_rule_cnt
- hclge_get_fd_rule_info
- hclge_get_all_rules
- hclge_fd_get_flow_tuples
- hclge_fd_search_flow_keys
- hclge_fd_build_arfs_rule
- hclge_add_fd_entry_by_arfs
- hclge_rfs_filter_expire
- hclge_clear_arfs_rules
- hclge_get_hw_reset_stat
- hclge_ae_dev_resetting
- hclge_ae_dev_reset_cnt
- hclge_enable_fd
- hclge_cfg_mac_mode
- hclge_config_switch_param
- hclge_phy_link_status_wait
- hclge_mac_link_status_wait
- hclge_mac_phy_link_status_wait
- hclge_set_app_loopback
- hclge_cfg_serdes_loopback
- hclge_set_serdes_loopback
- hclge_enable_phy_loopback
- hclge_disable_phy_loopback
- hclge_set_phy_loopback
- hclge_tqp_enable
- hclge_set_loopback
- hclge_set_default_loopback
- hclge_reset_tqp_stats
- hclge_set_timer_task
- hclge_ae_start
- hclge_ae_stop
- hclge_vport_start
- hclge_vport_stop
- hclge_client_start
- hclge_client_stop
- hclge_get_mac_vlan_cmd_status
- hclge_update_desc_vfid
- hclge_is_all_function_id_zero
- hclge_prepare_mac_addr
- hclge_remove_mac_vlan_tbl
- hclge_lookup_mac_vlan_tbl
- hclge_add_mac_vlan_tbl
- hclge_init_umv_space
- hclge_uninit_umv_space
- hclge_set_umv_space
- hclge_reset_umv_space
- hclge_is_umv_space_full
- hclge_update_umv_space
- hclge_add_uc_addr
- hclge_add_uc_addr_common
- hclge_rm_uc_addr
- hclge_rm_uc_addr_common
- hclge_add_mc_addr
- hclge_add_mc_addr_common
- hclge_rm_mc_addr
- hclge_rm_mc_addr_common
- hclge_add_vport_mac_table
- hclge_rm_vport_mac_table
- hclge_rm_vport_all_mac_table
- hclge_uninit_vport_mac_table
- hclge_get_mac_ethertype_cmd_status
- hclge_add_mgr_tbl
- init_mgr_tbl
- hclge_get_mac_addr
- hclge_set_mac_addr
- hclge_do_ioctl
- hclge_set_vlan_filter_ctrl
- hclge_enable_vlan_filter
- hclge_set_vf_vlan_common
- hclge_set_port_vlan_filter
- hclge_set_vlan_filter_hw
- hclge_set_vlan_tx_offload_cfg
- hclge_set_vlan_rx_offload_cfg
- hclge_vlan_offload_cfg
- hclge_set_vlan_protocol_type
- hclge_init_vlan_config
- hclge_add_vport_vlan_table
- hclge_add_vport_all_vlan_table
- hclge_rm_vport_vlan_table
- hclge_rm_vport_all_vlan_table
- hclge_uninit_vport_vlan_table
- hclge_restore_vlan_table
- hclge_en_hw_strip_rxvtag
- hclge_update_vlan_filter_entries
- hclge_update_port_base_vlan_cfg
- hclge_get_port_base_vlan_state
- hclge_set_vf_vlan_filter
- hclge_set_vlan_filter
- hclge_sync_vlan_filter
- hclge_set_mac_mtu
- hclge_set_mtu
- hclge_set_vport_mtu
- hclge_send_reset_tqp_cmd
- hclge_get_reset_status
- hclge_covert_handle_qid_global
- hclge_reset_tqp
- hclge_reset_vf_queue
- hclge_get_fw_version
- hclge_set_flowctrl_adv
- hclge_cfg_pauseparam
- hclge_cfg_flowctrl
- hclge_get_pauseparam
- hclge_record_user_pauseparam
- hclge_set_pauseparam
- hclge_get_ksettings_an_result
- hclge_get_media_type
- hclge_get_mdix_mode
- hclge_info_show
- hclge_init_nic_client_instance
- hclge_init_roce_client_instance
- hclge_init_client_instance
- hclge_uninit_client_instance
- hclge_pci_init
- hclge_pci_uninit
- hclge_state_init
- hclge_state_uninit
- hclge_flr_prepare
- hclge_flr_done
- hclge_clear_resetting_state
- hclge_init_ae_dev
- hclge_stats_clear
- hclge_reset_vport_state
- hclge_reset_ae_dev
- hclge_uninit_ae_dev
- hclge_get_max_channels
- hclge_get_channels
- hclge_get_tqps_and_rss_info
- hclge_set_channels
- hclge_get_regs_num
- hclge_get_32_bit_regs
- hclge_get_64_bit_regs
- hclge_query_bd_num_cmd_send
- hclge_get_dfx_reg_bd_num
- hclge_dfx_reg_cmd_send
- hclge_dfx_reg_fetch_data
- hclge_get_dfx_reg_len
- hclge_get_dfx_reg
- hclge_fetch_pf_reg
- hclge_get_regs_len
- hclge_get_regs
- hclge_set_led_status
- hclge_set_led_id
- hclge_get_link_mode
- hclge_gro_en
- hclge_init
- hclge_exit
1
2
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
41
42
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
56 #define HCLGE_LINK_STATUS_MS 10
57
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static struct hnae3_ae_algo ae_algo;
72
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81
82 {0, }
83 };
84
85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86
87 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
88 HCLGE_CMDQ_TX_ADDR_H_REG,
89 HCLGE_CMDQ_TX_DEPTH_REG,
90 HCLGE_CMDQ_TX_TAIL_REG,
91 HCLGE_CMDQ_TX_HEAD_REG,
92 HCLGE_CMDQ_RX_ADDR_L_REG,
93 HCLGE_CMDQ_RX_ADDR_H_REG,
94 HCLGE_CMDQ_RX_DEPTH_REG,
95 HCLGE_CMDQ_RX_TAIL_REG,
96 HCLGE_CMDQ_RX_HEAD_REG,
97 HCLGE_VECTOR0_CMDQ_SRC_REG,
98 HCLGE_CMDQ_INTR_STS_REG,
99 HCLGE_CMDQ_INTR_EN_REG,
100 HCLGE_CMDQ_INTR_GEN_REG};
101
102 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
103 HCLGE_VECTOR0_OTER_EN_REG,
104 HCLGE_MISC_RESET_STS_REG,
105 HCLGE_MISC_VECTOR_INT_STS,
106 HCLGE_GLOBAL_RESET_REG,
107 HCLGE_FUN_RST_ING,
108 HCLGE_GRO_EN_REG};
109
110 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
111 HCLGE_RING_RX_ADDR_H_REG,
112 HCLGE_RING_RX_BD_NUM_REG,
113 HCLGE_RING_RX_BD_LENGTH_REG,
114 HCLGE_RING_RX_MERGE_EN_REG,
115 HCLGE_RING_RX_TAIL_REG,
116 HCLGE_RING_RX_HEAD_REG,
117 HCLGE_RING_RX_FBD_NUM_REG,
118 HCLGE_RING_RX_OFFSET_REG,
119 HCLGE_RING_RX_FBD_OFFSET_REG,
120 HCLGE_RING_RX_STASH_REG,
121 HCLGE_RING_RX_BD_ERR_REG,
122 HCLGE_RING_TX_ADDR_L_REG,
123 HCLGE_RING_TX_ADDR_H_REG,
124 HCLGE_RING_TX_BD_NUM_REG,
125 HCLGE_RING_TX_PRIORITY_REG,
126 HCLGE_RING_TX_TC_REG,
127 HCLGE_RING_TX_MERGE_EN_REG,
128 HCLGE_RING_TX_TAIL_REG,
129 HCLGE_RING_TX_HEAD_REG,
130 HCLGE_RING_TX_FBD_NUM_REG,
131 HCLGE_RING_TX_OFFSET_REG,
132 HCLGE_RING_TX_EBD_NUM_REG,
133 HCLGE_RING_TX_EBD_OFFSET_REG,
134 HCLGE_RING_TX_BD_ERR_REG,
135 HCLGE_RING_EN_REG};
136
137 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
138 HCLGE_TQP_INTR_GL0_REG,
139 HCLGE_TQP_INTR_GL1_REG,
140 HCLGE_TQP_INTR_GL2_REG,
141 HCLGE_TQP_INTR_RL_REG};
142
143 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 "App Loopback test",
145 "Serdes serial Loopback test",
146 "Serdes parallel Loopback test",
147 "Phy Loopback test"
148 };
149
150 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
151 {"mac_tx_mac_pause_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
153 {"mac_rx_mac_pause_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
155 {"mac_tx_control_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
157 {"mac_rx_control_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
159 {"mac_tx_pfc_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
161 {"mac_tx_pfc_pri0_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
163 {"mac_tx_pfc_pri1_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
165 {"mac_tx_pfc_pri2_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
167 {"mac_tx_pfc_pri3_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
169 {"mac_tx_pfc_pri4_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
171 {"mac_tx_pfc_pri5_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
173 {"mac_tx_pfc_pri6_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
175 {"mac_tx_pfc_pri7_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
177 {"mac_rx_pfc_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
179 {"mac_rx_pfc_pri0_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
181 {"mac_rx_pfc_pri1_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
183 {"mac_rx_pfc_pri2_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
185 {"mac_rx_pfc_pri3_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
187 {"mac_rx_pfc_pri4_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
189 {"mac_rx_pfc_pri5_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
191 {"mac_rx_pfc_pri6_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
193 {"mac_rx_pfc_pri7_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
195 {"mac_tx_total_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
197 {"mac_tx_total_oct_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
199 {"mac_tx_good_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
201 {"mac_tx_bad_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
203 {"mac_tx_good_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
205 {"mac_tx_bad_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
207 {"mac_tx_uni_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
209 {"mac_tx_multi_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
211 {"mac_tx_broad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
213 {"mac_tx_undersize_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
215 {"mac_tx_oversize_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
217 {"mac_tx_64_oct_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
219 {"mac_tx_65_127_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
221 {"mac_tx_128_255_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
223 {"mac_tx_256_511_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
225 {"mac_tx_512_1023_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
227 {"mac_tx_1024_1518_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
229 {"mac_tx_1519_2047_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
231 {"mac_tx_2048_4095_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
233 {"mac_tx_4096_8191_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
235 {"mac_tx_8192_9216_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
237 {"mac_tx_9217_12287_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
239 {"mac_tx_12288_16383_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
241 {"mac_tx_1519_max_good_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
243 {"mac_tx_1519_max_bad_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
245 {"mac_rx_total_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
247 {"mac_rx_total_oct_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
249 {"mac_rx_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
251 {"mac_rx_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
253 {"mac_rx_good_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
255 {"mac_rx_bad_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
257 {"mac_rx_uni_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
259 {"mac_rx_multi_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
261 {"mac_rx_broad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
263 {"mac_rx_undersize_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
265 {"mac_rx_oversize_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
267 {"mac_rx_64_oct_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
269 {"mac_rx_65_127_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
271 {"mac_rx_128_255_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
273 {"mac_rx_256_511_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
275 {"mac_rx_512_1023_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
277 {"mac_rx_1024_1518_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
279 {"mac_rx_1519_2047_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
281 {"mac_rx_2048_4095_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
283 {"mac_rx_4096_8191_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
285 {"mac_rx_8192_9216_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
287 {"mac_rx_9217_12287_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
289 {"mac_rx_12288_16383_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
291 {"mac_rx_1519_max_good_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
293 {"mac_rx_1519_max_bad_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295
296 {"mac_tx_fragment_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
298 {"mac_tx_undermin_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
300 {"mac_tx_jabber_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
302 {"mac_tx_err_all_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
304 {"mac_tx_from_app_good_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
306 {"mac_tx_from_app_bad_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
308 {"mac_rx_fragment_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
310 {"mac_rx_undermin_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
312 {"mac_rx_jabber_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
314 {"mac_rx_fcs_err_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
316 {"mac_rx_send_app_good_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
318 {"mac_rx_send_app_bad_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
320 };
321
322 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 {
324 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
325 .ethter_type = cpu_to_le16(ETH_P_LLDP),
326 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
327 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
328 .i_port_bitmap = 0x1,
329 },
330 };
331
332 static const u8 hclge_hash_key[] = {
333 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
334 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
335 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
336 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
337 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
338 };
339
340 static const u32 hclge_dfx_bd_offset_list[] = {
341 HCLGE_DFX_BIOS_BD_OFFSET,
342 HCLGE_DFX_SSU_0_BD_OFFSET,
343 HCLGE_DFX_SSU_1_BD_OFFSET,
344 HCLGE_DFX_IGU_BD_OFFSET,
345 HCLGE_DFX_RPU_0_BD_OFFSET,
346 HCLGE_DFX_RPU_1_BD_OFFSET,
347 HCLGE_DFX_NCSI_BD_OFFSET,
348 HCLGE_DFX_RTC_BD_OFFSET,
349 HCLGE_DFX_PPP_BD_OFFSET,
350 HCLGE_DFX_RCB_BD_OFFSET,
351 HCLGE_DFX_TQP_BD_OFFSET,
352 HCLGE_DFX_SSU_2_BD_OFFSET
353 };
354
355 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
356 HCLGE_OPC_DFX_BIOS_COMMON_REG,
357 HCLGE_OPC_DFX_SSU_REG_0,
358 HCLGE_OPC_DFX_SSU_REG_1,
359 HCLGE_OPC_DFX_IGU_EGU_REG,
360 HCLGE_OPC_DFX_RPU_REG_0,
361 HCLGE_OPC_DFX_RPU_REG_1,
362 HCLGE_OPC_DFX_NCSI_REG,
363 HCLGE_OPC_DFX_RTC_REG,
364 HCLGE_OPC_DFX_PPP_REG,
365 HCLGE_OPC_DFX_RCB_REG,
366 HCLGE_OPC_DFX_TQP_REG,
367 HCLGE_OPC_DFX_SSU_REG_2
368 };
369
370 static const struct key_info meta_data_key_info[] = {
371 { PACKET_TYPE_ID, 6},
372 { IP_FRAGEMENT, 1},
373 { ROCE_TYPE, 1},
374 { NEXT_KEY, 5},
375 { VLAN_NUMBER, 2},
376 { SRC_VPORT, 12},
377 { DST_VPORT, 12},
378 { TUNNEL_PACKET, 1},
379 };
380
381 static const struct key_info tuple_key_info[] = {
382 { OUTER_DST_MAC, 48},
383 { OUTER_SRC_MAC, 48},
384 { OUTER_VLAN_TAG_FST, 16},
385 { OUTER_VLAN_TAG_SEC, 16},
386 { OUTER_ETH_TYPE, 16},
387 { OUTER_L2_RSV, 16},
388 { OUTER_IP_TOS, 8},
389 { OUTER_IP_PROTO, 8},
390 { OUTER_SRC_IP, 32},
391 { OUTER_DST_IP, 32},
392 { OUTER_L3_RSV, 16},
393 { OUTER_SRC_PORT, 16},
394 { OUTER_DST_PORT, 16},
395 { OUTER_L4_RSV, 32},
396 { OUTER_TUN_VNI, 24},
397 { OUTER_TUN_FLOW_ID, 8},
398 { INNER_DST_MAC, 48},
399 { INNER_SRC_MAC, 48},
400 { INNER_VLAN_TAG_FST, 16},
401 { INNER_VLAN_TAG_SEC, 16},
402 { INNER_ETH_TYPE, 16},
403 { INNER_L2_RSV, 16},
404 { INNER_IP_TOS, 8},
405 { INNER_IP_PROTO, 8},
406 { INNER_SRC_IP, 32},
407 { INNER_DST_IP, 32},
408 { INNER_L3_RSV, 16},
409 { INNER_SRC_PORT, 16},
410 { INNER_DST_PORT, 16},
411 { INNER_L4_RSV, 32},
412 };
413
414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 {
416 #define HCLGE_MAC_CMD_NUM 21
417
418 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
419 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
420 __le64 *desc_data;
421 int i, k, n;
422 int ret;
423
424 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
425 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 if (ret) {
427 dev_err(&hdev->pdev->dev,
428 "Get MAC pkt stats fail, status = %d.\n", ret);
429
430 return ret;
431 }
432
433 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
434
435 if (unlikely(i == 0)) {
436 desc_data = (__le64 *)(&desc[i].data[0]);
437 n = HCLGE_RD_FIRST_STATS_NUM;
438 } else {
439 desc_data = (__le64 *)(&desc[i]);
440 n = HCLGE_RD_OTHER_STATS_NUM;
441 }
442
443 for (k = 0; k < n; k++) {
444 *data += le64_to_cpu(*desc_data);
445 data++;
446 desc_data++;
447 }
448 }
449
450 return 0;
451 }
452
453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 {
455 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
456 struct hclge_desc *desc;
457 __le64 *desc_data;
458 u16 i, k, n;
459 int ret;
460
461
462
463
464 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
465 if (!desc)
466 return -ENOMEM;
467
468 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
469 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
470 if (ret) {
471 kfree(desc);
472 return ret;
473 }
474
475 for (i = 0; i < desc_num; i++) {
476
477 if (i == 0) {
478 desc_data = (__le64 *)(&desc[i].data[0]);
479 n = HCLGE_RD_FIRST_STATS_NUM;
480 } else {
481 desc_data = (__le64 *)(&desc[i]);
482 n = HCLGE_RD_OTHER_STATS_NUM;
483 }
484
485 for (k = 0; k < n; k++) {
486 *data += le64_to_cpu(*desc_data);
487 data++;
488 desc_data++;
489 }
490 }
491
492 kfree(desc);
493
494 return 0;
495 }
496
497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 {
499 struct hclge_desc desc;
500 __le32 *desc_data;
501 u32 reg_num;
502 int ret;
503
504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
506 if (ret)
507 return ret;
508
509 desc_data = (__le32 *)(&desc.data[0]);
510 reg_num = le32_to_cpu(*desc_data);
511
512 *desc_num = 1 + ((reg_num - 3) >> 2) +
513 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
514
515 return 0;
516 }
517
518 static int hclge_mac_update_stats(struct hclge_dev *hdev)
519 {
520 u32 desc_num;
521 int ret;
522
523 ret = hclge_mac_query_reg_num(hdev, &desc_num);
524
525
526 if (!ret)
527 ret = hclge_mac_update_stats_complete(hdev, desc_num);
528 else if (ret == -EOPNOTSUPP)
529 ret = hclge_mac_update_stats_defective(hdev);
530 else
531 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
532
533 return ret;
534 }
535
536 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 {
538 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
539 struct hclge_vport *vport = hclge_get_vport(handle);
540 struct hclge_dev *hdev = vport->back;
541 struct hnae3_queue *queue;
542 struct hclge_desc desc[1];
543 struct hclge_tqp *tqp;
544 int ret, i;
545
546 for (i = 0; i < kinfo->num_tqps; i++) {
547 queue = handle->kinfo.tqp[i];
548 tqp = container_of(queue, struct hclge_tqp, q);
549
550 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
551 true);
552
553 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
554 ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 if (ret) {
556 dev_err(&hdev->pdev->dev,
557 "Query tqp stat fail, status = %d,queue = %d\n",
558 ret, i);
559 return ret;
560 }
561 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
562 le32_to_cpu(desc[0].data[1]);
563 }
564
565 for (i = 0; i < kinfo->num_tqps; i++) {
566 queue = handle->kinfo.tqp[i];
567 tqp = container_of(queue, struct hclge_tqp, q);
568
569 hclge_cmd_setup_basic_desc(&desc[0],
570 HCLGE_OPC_QUERY_TX_STATUS,
571 true);
572
573 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
574 ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 if (ret) {
576 dev_err(&hdev->pdev->dev,
577 "Query tqp stat fail, status = %d,queue = %d\n",
578 ret, i);
579 return ret;
580 }
581 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
582 le32_to_cpu(desc[0].data[1]);
583 }
584
585 return 0;
586 }
587
588 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 {
590 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
591 struct hclge_tqp *tqp;
592 u64 *buff = data;
593 int i;
594
595 for (i = 0; i < kinfo->num_tqps; i++) {
596 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
597 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
598 }
599
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
603 }
604
605 return buff;
606 }
607
608 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611
612
613 return kinfo->num_tqps * (2);
614 }
615
616 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 {
618 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 u8 *buff = data;
620 int i = 0;
621
622 for (i = 0; i < kinfo->num_tqps; i++) {
623 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
624 struct hclge_tqp, q);
625 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 tqp->index);
627 buff = buff + ETH_GSTRING_LEN;
628 }
629
630 for (i = 0; i < kinfo->num_tqps; i++) {
631 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
632 struct hclge_tqp, q);
633 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 tqp->index);
635 buff = buff + ETH_GSTRING_LEN;
636 }
637
638 return buff;
639 }
640
641 static u64 *hclge_comm_get_stats(const void *comm_stats,
642 const struct hclge_comm_stats_str strs[],
643 int size, u64 *data)
644 {
645 u64 *buf = data;
646 u32 i;
647
648 for (i = 0; i < size; i++)
649 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
650
651 return buf + size;
652 }
653
654 static u8 *hclge_comm_get_strings(u32 stringset,
655 const struct hclge_comm_stats_str strs[],
656 int size, u8 *data)
657 {
658 char *buff = (char *)data;
659 u32 i;
660
661 if (stringset != ETH_SS_STATS)
662 return buff;
663
664 for (i = 0; i < size; i++) {
665 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
666 buff = buff + ETH_GSTRING_LEN;
667 }
668
669 return (u8 *)buff;
670 }
671
672 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 {
674 struct hnae3_handle *handle;
675 int status;
676
677 handle = &hdev->vport[0].nic;
678 if (handle->client) {
679 status = hclge_tqps_update_stats(handle);
680 if (status) {
681 dev_err(&hdev->pdev->dev,
682 "Update TQPS stats fail, status = %d.\n",
683 status);
684 }
685 }
686
687 status = hclge_mac_update_stats(hdev);
688 if (status)
689 dev_err(&hdev->pdev->dev,
690 "Update MAC stats fail, status = %d.\n", status);
691 }
692
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 struct net_device_stats *net_stats)
695 {
696 struct hclge_vport *vport = hclge_get_vport(handle);
697 struct hclge_dev *hdev = vport->back;
698 int status;
699
700 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
701 return;
702
703 status = hclge_mac_update_stats(hdev);
704 if (status)
705 dev_err(&hdev->pdev->dev,
706 "Update MAC stats fail, status = %d.\n",
707 status);
708
709 status = hclge_tqps_update_stats(handle);
710 if (status)
711 dev_err(&hdev->pdev->dev,
712 "Update TQPS stats fail, status = %d.\n",
713 status);
714
715 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
716 }
717
718 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 {
720 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
721 HNAE3_SUPPORT_PHY_LOOPBACK |\
722 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
723 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724
725 struct hclge_vport *vport = hclge_get_vport(handle);
726 struct hclge_dev *hdev = vport->back;
727 int count = 0;
728
729
730
731
732
733
734 if (stringset == ETH_SS_TEST) {
735
736 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 if (hdev->pdev->revision >= 0x21 ||
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 count += 1;
742 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
743 }
744
745 count += 2;
746 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
747 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748
749 if (hdev->hw.mac.phydev) {
750 count += 1;
751 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
752 }
753
754 } else if (stringset == ETH_SS_STATS) {
755 count = ARRAY_SIZE(g_mac_stats_string) +
756 hclge_tqps_get_sset_count(handle, stringset);
757 }
758
759 return count;
760 }
761
762 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
763 u8 *data)
764 {
765 u8 *p = (char *)data;
766 int size;
767
768 if (stringset == ETH_SS_STATS) {
769 size = ARRAY_SIZE(g_mac_stats_string);
770 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 size, p);
772 p = hclge_tqps_get_strings(handle, p);
773 } else if (stringset == ETH_SS_TEST) {
774 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
775 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 ETH_GSTRING_LEN);
777 p += ETH_GSTRING_LEN;
778 }
779 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 ETH_GSTRING_LEN);
782 p += ETH_GSTRING_LEN;
783 }
784 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 memcpy(p,
786 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 ETH_GSTRING_LEN);
788 p += ETH_GSTRING_LEN;
789 }
790 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
791 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 ETH_GSTRING_LEN);
793 p += ETH_GSTRING_LEN;
794 }
795 }
796 }
797
798 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 {
800 struct hclge_vport *vport = hclge_get_vport(handle);
801 struct hclge_dev *hdev = vport->back;
802 u64 *p;
803
804 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
805 ARRAY_SIZE(g_mac_stats_string), data);
806 p = hclge_tqps_get_stats(handle, p);
807 }
808
809 static void hclge_get_mac_stat(struct hnae3_handle *handle,
810 struct hns3_mac_stats *mac_stats)
811 {
812 struct hclge_vport *vport = hclge_get_vport(handle);
813 struct hclge_dev *hdev = vport->back;
814
815 hclge_update_stats(handle, NULL);
816
817 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
818 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
819 }
820
821 static int hclge_parse_func_status(struct hclge_dev *hdev,
822 struct hclge_func_status_cmd *status)
823 {
824 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
825 return -EINVAL;
826
827
828 if (status->pf_state & HCLGE_PF_STATE_MAIN)
829 hdev->flag |= HCLGE_FLAG_MAIN;
830 else
831 hdev->flag &= ~HCLGE_FLAG_MAIN;
832
833 return 0;
834 }
835
836 static int hclge_query_function_status(struct hclge_dev *hdev)
837 {
838 #define HCLGE_QUERY_MAX_CNT 5
839
840 struct hclge_func_status_cmd *req;
841 struct hclge_desc desc;
842 int timeout = 0;
843 int ret;
844
845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 req = (struct hclge_func_status_cmd *)desc.data;
847
848 do {
849 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 if (ret) {
851 dev_err(&hdev->pdev->dev,
852 "query function status failed %d.\n", ret);
853 return ret;
854 }
855
856
857 if (req->pf_state)
858 break;
859 usleep_range(1000, 2000);
860 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
861
862 ret = hclge_parse_func_status(hdev, req);
863
864 return ret;
865 }
866
867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 {
869 struct hclge_pf_res_cmd *req;
870 struct hclge_desc desc;
871 int ret;
872
873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 if (ret) {
876 dev_err(&hdev->pdev->dev,
877 "query pf resource failed %d.\n", ret);
878 return ret;
879 }
880
881 req = (struct hclge_pf_res_cmd *)desc.data;
882 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
883 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884
885 if (req->tx_buf_size)
886 hdev->tx_buf_size =
887 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 else
889 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890
891 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892
893 if (req->dv_buf_size)
894 hdev->dv_buf_size =
895 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 else
897 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898
899 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901 if (hnae3_dev_roce_supported(hdev)) {
902 hdev->roce_base_msix_offset =
903 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
904 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 hdev->num_roce_msi =
906 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
907 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908
909
910 hdev->num_nic_msi = hdev->num_roce_msi;
911
912
913
914
915 hdev->num_msi = hdev->num_roce_msi +
916 hdev->roce_base_msix_offset;
917 } else {
918 hdev->num_msi =
919 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
920 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
921
922 hdev->num_nic_msi = hdev->num_msi;
923 }
924
925 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
926 dev_err(&hdev->pdev->dev,
927 "Just %u msi resources, not enough for pf(min:2).\n",
928 hdev->num_nic_msi);
929 return -EINVAL;
930 }
931
932 return 0;
933 }
934
935 static int hclge_parse_speed(int speed_cmd, int *speed)
936 {
937 switch (speed_cmd) {
938 case 6:
939 *speed = HCLGE_MAC_SPEED_10M;
940 break;
941 case 7:
942 *speed = HCLGE_MAC_SPEED_100M;
943 break;
944 case 0:
945 *speed = HCLGE_MAC_SPEED_1G;
946 break;
947 case 1:
948 *speed = HCLGE_MAC_SPEED_10G;
949 break;
950 case 2:
951 *speed = HCLGE_MAC_SPEED_25G;
952 break;
953 case 3:
954 *speed = HCLGE_MAC_SPEED_40G;
955 break;
956 case 4:
957 *speed = HCLGE_MAC_SPEED_50G;
958 break;
959 case 5:
960 *speed = HCLGE_MAC_SPEED_100G;
961 break;
962 default:
963 return -EINVAL;
964 }
965
966 return 0;
967 }
968
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 struct hclge_vport *vport = hclge_get_vport(handle);
972 struct hclge_dev *hdev = vport->back;
973 u32 speed_ability = hdev->hw.mac.speed_ability;
974 u32 speed_bit = 0;
975
976 switch (speed) {
977 case HCLGE_MAC_SPEED_10M:
978 speed_bit = HCLGE_SUPPORT_10M_BIT;
979 break;
980 case HCLGE_MAC_SPEED_100M:
981 speed_bit = HCLGE_SUPPORT_100M_BIT;
982 break;
983 case HCLGE_MAC_SPEED_1G:
984 speed_bit = HCLGE_SUPPORT_1G_BIT;
985 break;
986 case HCLGE_MAC_SPEED_10G:
987 speed_bit = HCLGE_SUPPORT_10G_BIT;
988 break;
989 case HCLGE_MAC_SPEED_25G:
990 speed_bit = HCLGE_SUPPORT_25G_BIT;
991 break;
992 case HCLGE_MAC_SPEED_40G:
993 speed_bit = HCLGE_SUPPORT_40G_BIT;
994 break;
995 case HCLGE_MAC_SPEED_50G:
996 speed_bit = HCLGE_SUPPORT_50G_BIT;
997 break;
998 case HCLGE_MAC_SPEED_100G:
999 speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 break;
1001 default:
1002 return -EINVAL;
1003 }
1004
1005 if (speed_bit & speed_ability)
1006 return 0;
1007
1008 return -EINVAL;
1009 }
1010
1011 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1012 {
1013 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1014 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1015 mac->supported);
1016 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1018 mac->supported);
1019 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1021 mac->supported);
1022 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1024 mac->supported);
1025 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1027 mac->supported);
1028 }
1029
1030 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1031 {
1032 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034 mac->supported);
1035 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037 mac->supported);
1038 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040 mac->supported);
1041 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043 mac->supported);
1044 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046 mac->supported);
1047 }
1048
1049 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1050 {
1051 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1053 mac->supported);
1054 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1056 mac->supported);
1057 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1059 mac->supported);
1060 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1062 mac->supported);
1063 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1065 mac->supported);
1066 }
1067
1068 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1069 {
1070 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1072 mac->supported);
1073 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1075 mac->supported);
1076 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1081 mac->supported);
1082 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1084 mac->supported);
1085 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1087 mac->supported);
1088 }
1089
1090 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1091 {
1092 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1094
1095 switch (mac->speed) {
1096 case HCLGE_MAC_SPEED_10G:
1097 case HCLGE_MAC_SPEED_40G:
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1099 mac->supported);
1100 mac->fec_ability =
1101 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1102 break;
1103 case HCLGE_MAC_SPEED_25G:
1104 case HCLGE_MAC_SPEED_50G:
1105 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1106 mac->supported);
1107 mac->fec_ability =
1108 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1109 BIT(HNAE3_FEC_AUTO);
1110 break;
1111 case HCLGE_MAC_SPEED_100G:
1112 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1113 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1114 break;
1115 default:
1116 mac->fec_ability = 0;
1117 break;
1118 }
1119 }
1120
1121 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1122 u8 speed_ability)
1123 {
1124 struct hclge_mac *mac = &hdev->hw.mac;
1125
1126 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1127 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1128 mac->supported);
1129
1130 hclge_convert_setting_sr(mac, speed_ability);
1131 hclge_convert_setting_lr(mac, speed_ability);
1132 hclge_convert_setting_cr(mac, speed_ability);
1133 if (hdev->pdev->revision >= 0x21)
1134 hclge_convert_setting_fec(mac);
1135
1136 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1139 }
1140
1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1142 u8 speed_ability)
1143 {
1144 struct hclge_mac *mac = &hdev->hw.mac;
1145
1146 hclge_convert_setting_kr(mac, speed_ability);
1147 if (hdev->pdev->revision >= 0x21)
1148 hclge_convert_setting_fec(mac);
1149 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1152 }
1153
1154 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1155 u8 speed_ability)
1156 {
1157 unsigned long *supported = hdev->hw.mac.supported;
1158
1159
1160 if (!speed_ability)
1161 speed_ability = HCLGE_SUPPORT_GE;
1162
1163 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1165 supported);
1166
1167 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1169 supported);
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1171 supported);
1172 }
1173
1174 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1175 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1177 }
1178
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1183 }
1184
1185 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1186 {
1187 u8 media_type = hdev->hw.mac.media_type;
1188
1189 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1190 hclge_parse_fiber_link_mode(hdev, speed_ability);
1191 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1192 hclge_parse_copper_link_mode(hdev, speed_ability);
1193 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1194 hclge_parse_backplane_link_mode(hdev, speed_ability);
1195 }
1196
1197 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1198 {
1199 struct hclge_cfg_param_cmd *req;
1200 u64 mac_addr_tmp_high;
1201 u64 mac_addr_tmp;
1202 unsigned int i;
1203
1204 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1205
1206
1207 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1208 HCLGE_CFG_VMDQ_M,
1209 HCLGE_CFG_VMDQ_S);
1210 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1211 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1212 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1213 HCLGE_CFG_TQP_DESC_N_M,
1214 HCLGE_CFG_TQP_DESC_N_S);
1215
1216 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1217 HCLGE_CFG_PHY_ADDR_M,
1218 HCLGE_CFG_PHY_ADDR_S);
1219 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1220 HCLGE_CFG_MEDIA_TP_M,
1221 HCLGE_CFG_MEDIA_TP_S);
1222 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1223 HCLGE_CFG_RX_BUF_LEN_M,
1224 HCLGE_CFG_RX_BUF_LEN_S);
1225
1226 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1227 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1228 HCLGE_CFG_MAC_ADDR_H_M,
1229 HCLGE_CFG_MAC_ADDR_H_S);
1230
1231 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1232
1233 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1234 HCLGE_CFG_DEFAULT_SPEED_M,
1235 HCLGE_CFG_DEFAULT_SPEED_S);
1236 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1237 HCLGE_CFG_RSS_SIZE_M,
1238 HCLGE_CFG_RSS_SIZE_S);
1239
1240 for (i = 0; i < ETH_ALEN; i++)
1241 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1242
1243 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1244 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1245
1246 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_SPEED_ABILITY_M,
1248 HCLGE_CFG_SPEED_ABILITY_S);
1249 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_UMV_TBL_SPACE_M,
1251 HCLGE_CFG_UMV_TBL_SPACE_S);
1252 if (!cfg->umv_space)
1253 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1254 }
1255
1256
1257
1258
1259
1260 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1261 {
1262 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1263 struct hclge_cfg_param_cmd *req;
1264 unsigned int i;
1265 int ret;
1266
1267 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1268 u32 offset = 0;
1269
1270 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1271 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1272 true);
1273 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1274 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1275
1276 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1277 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1278 req->offset = cpu_to_le32(offset);
1279 }
1280
1281 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1282 if (ret) {
1283 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1284 return ret;
1285 }
1286
1287 hclge_parse_cfg(hcfg, desc);
1288
1289 return 0;
1290 }
1291
1292 static int hclge_get_cap(struct hclge_dev *hdev)
1293 {
1294 int ret;
1295
1296 ret = hclge_query_function_status(hdev);
1297 if (ret) {
1298 dev_err(&hdev->pdev->dev,
1299 "query function status error %d.\n", ret);
1300 return ret;
1301 }
1302
1303
1304 ret = hclge_query_pf_resource(hdev);
1305 if (ret)
1306 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1307
1308 return ret;
1309 }
1310
1311 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1312 {
1313 #define HCLGE_MIN_TX_DESC 64
1314 #define HCLGE_MIN_RX_DESC 64
1315
1316 if (!is_kdump_kernel())
1317 return;
1318
1319 dev_info(&hdev->pdev->dev,
1320 "Running kdump kernel. Using minimal resources\n");
1321
1322
1323 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1324 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1325 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1326 }
1327
1328 static int hclge_configure(struct hclge_dev *hdev)
1329 {
1330 struct hclge_cfg cfg;
1331 unsigned int i;
1332 int ret;
1333
1334 ret = hclge_get_cfg(hdev, &cfg);
1335 if (ret) {
1336 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1337 return ret;
1338 }
1339
1340 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1341 hdev->base_tqp_pid = 0;
1342 hdev->rss_size_max = cfg.rss_size_max;
1343 hdev->rx_buf_len = cfg.rx_buf_len;
1344 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1345 hdev->hw.mac.media_type = cfg.media_type;
1346 hdev->hw.mac.phy_addr = cfg.phy_addr;
1347 hdev->num_tx_desc = cfg.tqp_desc_num;
1348 hdev->num_rx_desc = cfg.tqp_desc_num;
1349 hdev->tm_info.num_pg = 1;
1350 hdev->tc_max = cfg.tc_num;
1351 hdev->tm_info.hw_pfc_map = 0;
1352 hdev->wanted_umv_size = cfg.umv_space;
1353
1354 if (hnae3_dev_fd_supported(hdev)) {
1355 hdev->fd_en = true;
1356 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1357 }
1358
1359 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1360 if (ret) {
1361 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1362 return ret;
1363 }
1364
1365 hclge_parse_link_mode(hdev, cfg.speed_ability);
1366
1367 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1368 (hdev->tc_max < 1)) {
1369 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1370 hdev->tc_max);
1371 hdev->tc_max = 1;
1372 }
1373
1374
1375 if (!hnae3_dev_dcb_supported(hdev)) {
1376 hdev->tc_max = 1;
1377 hdev->pfc_max = 0;
1378 } else {
1379 hdev->pfc_max = hdev->tc_max;
1380 }
1381
1382 hdev->tm_info.num_tc = 1;
1383
1384
1385 for (i = 0; i < hdev->tm_info.num_tc; i++)
1386 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1387
1388 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1389
1390 hclge_init_kdump_kernel_config(hdev);
1391
1392
1393 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1394 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1395 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1396 &hdev->affinity_mask);
1397
1398 return ret;
1399 }
1400
1401 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1402 unsigned int tso_mss_max)
1403 {
1404 struct hclge_cfg_tso_status_cmd *req;
1405 struct hclge_desc desc;
1406 u16 tso_mss;
1407
1408 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1409
1410 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1411
1412 tso_mss = 0;
1413 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1414 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1415 req->tso_mss_min = cpu_to_le16(tso_mss);
1416
1417 tso_mss = 0;
1418 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1419 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1420 req->tso_mss_max = cpu_to_le16(tso_mss);
1421
1422 return hclge_cmd_send(&hdev->hw, &desc, 1);
1423 }
1424
1425 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1426 {
1427 struct hclge_cfg_gro_status_cmd *req;
1428 struct hclge_desc desc;
1429 int ret;
1430
1431 if (!hnae3_dev_gro_supported(hdev))
1432 return 0;
1433
1434 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1435 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1436
1437 req->gro_en = cpu_to_le16(en ? 1 : 0);
1438
1439 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1440 if (ret)
1441 dev_err(&hdev->pdev->dev,
1442 "GRO hardware config cmd failed, ret = %d\n", ret);
1443
1444 return ret;
1445 }
1446
1447 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1448 {
1449 struct hclge_tqp *tqp;
1450 int i;
1451
1452 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1453 sizeof(struct hclge_tqp), GFP_KERNEL);
1454 if (!hdev->htqp)
1455 return -ENOMEM;
1456
1457 tqp = hdev->htqp;
1458
1459 for (i = 0; i < hdev->num_tqps; i++) {
1460 tqp->dev = &hdev->pdev->dev;
1461 tqp->index = i;
1462
1463 tqp->q.ae_algo = &ae_algo;
1464 tqp->q.buf_size = hdev->rx_buf_len;
1465 tqp->q.tx_desc_num = hdev->num_tx_desc;
1466 tqp->q.rx_desc_num = hdev->num_rx_desc;
1467 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1468 i * HCLGE_TQP_REG_SIZE;
1469
1470 tqp++;
1471 }
1472
1473 return 0;
1474 }
1475
1476 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1477 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1478 {
1479 struct hclge_tqp_map_cmd *req;
1480 struct hclge_desc desc;
1481 int ret;
1482
1483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1484
1485 req = (struct hclge_tqp_map_cmd *)desc.data;
1486 req->tqp_id = cpu_to_le16(tqp_pid);
1487 req->tqp_vf = func_id;
1488 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1489 if (!is_pf)
1490 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1491 req->tqp_vid = cpu_to_le16(tqp_vid);
1492
1493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1494 if (ret)
1495 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1496
1497 return ret;
1498 }
1499
1500 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1501 {
1502 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1503 struct hclge_dev *hdev = vport->back;
1504 int i, alloced;
1505
1506 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1507 alloced < num_tqps; i++) {
1508 if (!hdev->htqp[i].alloced) {
1509 hdev->htqp[i].q.handle = &vport->nic;
1510 hdev->htqp[i].q.tqp_index = alloced;
1511 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1512 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1513 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1514 hdev->htqp[i].alloced = true;
1515 alloced++;
1516 }
1517 }
1518 vport->alloc_tqps = alloced;
1519 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1520 vport->alloc_tqps / hdev->tm_info.num_tc);
1521
1522
1523 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1524 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1525
1526 return 0;
1527 }
1528
1529 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1530 u16 num_tx_desc, u16 num_rx_desc)
1531
1532 {
1533 struct hnae3_handle *nic = &vport->nic;
1534 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1535 struct hclge_dev *hdev = vport->back;
1536 int ret;
1537
1538 kinfo->num_tx_desc = num_tx_desc;
1539 kinfo->num_rx_desc = num_rx_desc;
1540
1541 kinfo->rx_buf_len = hdev->rx_buf_len;
1542
1543 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1544 sizeof(struct hnae3_queue *), GFP_KERNEL);
1545 if (!kinfo->tqp)
1546 return -ENOMEM;
1547
1548 ret = hclge_assign_tqp(vport, num_tqps);
1549 if (ret)
1550 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1551
1552 return ret;
1553 }
1554
1555 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1556 struct hclge_vport *vport)
1557 {
1558 struct hnae3_handle *nic = &vport->nic;
1559 struct hnae3_knic_private_info *kinfo;
1560 u16 i;
1561
1562 kinfo = &nic->kinfo;
1563 for (i = 0; i < vport->alloc_tqps; i++) {
1564 struct hclge_tqp *q =
1565 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1566 bool is_pf;
1567 int ret;
1568
1569 is_pf = !(vport->vport_id);
1570 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1571 i, is_pf);
1572 if (ret)
1573 return ret;
1574 }
1575
1576 return 0;
1577 }
1578
1579 static int hclge_map_tqp(struct hclge_dev *hdev)
1580 {
1581 struct hclge_vport *vport = hdev->vport;
1582 u16 i, num_vport;
1583
1584 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1585 for (i = 0; i < num_vport; i++) {
1586 int ret;
1587
1588 ret = hclge_map_tqp_to_vport(hdev, vport);
1589 if (ret)
1590 return ret;
1591
1592 vport++;
1593 }
1594
1595 return 0;
1596 }
1597
1598 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1599 {
1600 struct hnae3_handle *nic = &vport->nic;
1601 struct hclge_dev *hdev = vport->back;
1602 int ret;
1603
1604 nic->pdev = hdev->pdev;
1605 nic->ae_algo = &ae_algo;
1606 nic->numa_node_mask = hdev->numa_node_mask;
1607
1608 ret = hclge_knic_setup(vport, num_tqps,
1609 hdev->num_tx_desc, hdev->num_rx_desc);
1610 if (ret)
1611 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1612
1613 return ret;
1614 }
1615
1616 static int hclge_alloc_vport(struct hclge_dev *hdev)
1617 {
1618 struct pci_dev *pdev = hdev->pdev;
1619 struct hclge_vport *vport;
1620 u32 tqp_main_vport;
1621 u32 tqp_per_vport;
1622 int num_vport, i;
1623 int ret;
1624
1625
1626 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1627
1628 if (hdev->num_tqps < num_vport) {
1629 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1630 hdev->num_tqps, num_vport);
1631 return -EINVAL;
1632 }
1633
1634
1635 tqp_per_vport = hdev->num_tqps / num_vport;
1636 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1637
1638 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1639 GFP_KERNEL);
1640 if (!vport)
1641 return -ENOMEM;
1642
1643 hdev->vport = vport;
1644 hdev->num_alloc_vport = num_vport;
1645
1646 if (IS_ENABLED(CONFIG_PCI_IOV))
1647 hdev->num_alloc_vfs = hdev->num_req_vfs;
1648
1649 for (i = 0; i < num_vport; i++) {
1650 vport->back = hdev;
1651 vport->vport_id = i;
1652 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1653 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1654 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1655 INIT_LIST_HEAD(&vport->vlan_list);
1656 INIT_LIST_HEAD(&vport->uc_mac_list);
1657 INIT_LIST_HEAD(&vport->mc_mac_list);
1658
1659 if (i == 0)
1660 ret = hclge_vport_setup(vport, tqp_main_vport);
1661 else
1662 ret = hclge_vport_setup(vport, tqp_per_vport);
1663 if (ret) {
1664 dev_err(&pdev->dev,
1665 "vport setup failed for vport %d, %d\n",
1666 i, ret);
1667 return ret;
1668 }
1669
1670 vport++;
1671 }
1672
1673 return 0;
1674 }
1675
1676 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1677 struct hclge_pkt_buf_alloc *buf_alloc)
1678 {
1679
1680 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1681 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1682 struct hclge_tx_buff_alloc_cmd *req;
1683 struct hclge_desc desc;
1684 int ret;
1685 u8 i;
1686
1687 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1688
1689 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1692
1693 req->tx_pkt_buff[i] =
1694 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1695 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1696 }
1697
1698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1699 if (ret)
1700 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1701 ret);
1702
1703 return ret;
1704 }
1705
1706 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1707 struct hclge_pkt_buf_alloc *buf_alloc)
1708 {
1709 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1710
1711 if (ret)
1712 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1713
1714 return ret;
1715 }
1716
1717 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1718 {
1719 unsigned int i;
1720 u32 cnt = 0;
1721
1722 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1723 if (hdev->hw_tc_map & BIT(i))
1724 cnt++;
1725 return cnt;
1726 }
1727
1728
1729 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1730 struct hclge_pkt_buf_alloc *buf_alloc)
1731 {
1732 struct hclge_priv_buf *priv;
1733 unsigned int i;
1734 int cnt = 0;
1735
1736 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1737 priv = &buf_alloc->priv_buf[i];
1738 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1739 priv->enable)
1740 cnt++;
1741 }
1742
1743 return cnt;
1744 }
1745
1746
1747 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1748 struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 struct hclge_priv_buf *priv;
1751 unsigned int i;
1752 int cnt = 0;
1753
1754 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 priv = &buf_alloc->priv_buf[i];
1756 if (hdev->hw_tc_map & BIT(i) &&
1757 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1758 priv->enable)
1759 cnt++;
1760 }
1761
1762 return cnt;
1763 }
1764
1765 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767 struct hclge_priv_buf *priv;
1768 u32 rx_priv = 0;
1769 int i;
1770
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if (priv->enable)
1774 rx_priv += priv->buf_size;
1775 }
1776 return rx_priv;
1777 }
1778
1779 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1780 {
1781 u32 i, total_tx_size = 0;
1782
1783 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1784 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1785
1786 return total_tx_size;
1787 }
1788
1789 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1790 struct hclge_pkt_buf_alloc *buf_alloc,
1791 u32 rx_all)
1792 {
1793 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1794 u32 tc_num = hclge_get_tc_num(hdev);
1795 u32 shared_buf, aligned_mps;
1796 u32 rx_priv;
1797 int i;
1798
1799 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1800
1801 if (hnae3_dev_dcb_supported(hdev))
1802 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1803 hdev->dv_buf_size;
1804 else
1805 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1806 + hdev->dv_buf_size;
1807
1808 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1809 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1810 HCLGE_BUF_SIZE_UNIT);
1811
1812 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1813 if (rx_all < rx_priv + shared_std)
1814 return false;
1815
1816 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1817 buf_alloc->s_buf.buf_size = shared_buf;
1818 if (hnae3_dev_dcb_supported(hdev)) {
1819 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1820 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1821 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1822 HCLGE_BUF_SIZE_UNIT);
1823 } else {
1824 buf_alloc->s_buf.self.high = aligned_mps +
1825 HCLGE_NON_DCB_ADDITIONAL_BUF;
1826 buf_alloc->s_buf.self.low = aligned_mps;
1827 }
1828
1829 if (hnae3_dev_dcb_supported(hdev)) {
1830 hi_thrd = shared_buf - hdev->dv_buf_size;
1831
1832 if (tc_num <= NEED_RESERVE_TC_NUM)
1833 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1834 / BUF_MAX_PERCENT;
1835
1836 if (tc_num)
1837 hi_thrd = hi_thrd / tc_num;
1838
1839 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1840 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1841 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1842 } else {
1843 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1844 lo_thrd = aligned_mps;
1845 }
1846
1847 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1849 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1850 }
1851
1852 return true;
1853 }
1854
1855 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1856 struct hclge_pkt_buf_alloc *buf_alloc)
1857 {
1858 u32 i, total_size;
1859
1860 total_size = hdev->pkt_buf_size;
1861
1862
1863 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1864 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1865
1866 if (hdev->hw_tc_map & BIT(i)) {
1867 if (total_size < hdev->tx_buf_size)
1868 return -ENOMEM;
1869
1870 priv->tx_buf_size = hdev->tx_buf_size;
1871 } else {
1872 priv->tx_buf_size = 0;
1873 }
1874
1875 total_size -= priv->tx_buf_size;
1876 }
1877
1878 return 0;
1879 }
1880
1881 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1882 struct hclge_pkt_buf_alloc *buf_alloc)
1883 {
1884 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1885 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1886 unsigned int i;
1887
1888 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890
1891 priv->enable = 0;
1892 priv->wl.low = 0;
1893 priv->wl.high = 0;
1894 priv->buf_size = 0;
1895
1896 if (!(hdev->hw_tc_map & BIT(i)))
1897 continue;
1898
1899 priv->enable = 1;
1900
1901 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1902 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1903 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1904 HCLGE_BUF_SIZE_UNIT);
1905 } else {
1906 priv->wl.low = 0;
1907 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1908 aligned_mps;
1909 }
1910
1911 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1912 }
1913
1914 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1915 }
1916
1917 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1918 struct hclge_pkt_buf_alloc *buf_alloc)
1919 {
1920 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1921 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1922 int i;
1923
1924
1925 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1926 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1927 unsigned int mask = BIT((unsigned int)i);
1928
1929 if (hdev->hw_tc_map & mask &&
1930 !(hdev->tm_info.hw_pfc_map & mask)) {
1931
1932 priv->wl.low = 0;
1933 priv->wl.high = 0;
1934 priv->buf_size = 0;
1935 priv->enable = 0;
1936 no_pfc_priv_num--;
1937 }
1938
1939 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1940 no_pfc_priv_num == 0)
1941 break;
1942 }
1943
1944 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1945 }
1946
1947 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1948 struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1951 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1952 int i;
1953
1954
1955 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1956 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1957 unsigned int mask = BIT((unsigned int)i);
1958
1959 if (hdev->hw_tc_map & mask &&
1960 hdev->tm_info.hw_pfc_map & mask) {
1961
1962 priv->wl.low = 0;
1963 priv->enable = 0;
1964 priv->wl.high = 0;
1965 priv->buf_size = 0;
1966 pfc_priv_num--;
1967 }
1968
1969 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1970 pfc_priv_num == 0)
1971 break;
1972 }
1973
1974 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1975 }
1976
1977 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1978 struct hclge_pkt_buf_alloc *buf_alloc)
1979 {
1980 #define COMPENSATE_BUFFER 0x3C00
1981 #define COMPENSATE_HALF_MPS_NUM 5
1982 #define PRIV_WL_GAP 0x1800
1983
1984 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1985 u32 tc_num = hclge_get_tc_num(hdev);
1986 u32 half_mps = hdev->mps >> 1;
1987 u32 min_rx_priv;
1988 unsigned int i;
1989
1990 if (tc_num)
1991 rx_priv = rx_priv / tc_num;
1992
1993 if (tc_num <= NEED_RESERVE_TC_NUM)
1994 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1995
1996 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1997 COMPENSATE_HALF_MPS_NUM * half_mps;
1998 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1999 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2000
2001 if (rx_priv < min_rx_priv)
2002 return false;
2003
2004 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2005 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2006
2007 priv->enable = 0;
2008 priv->wl.low = 0;
2009 priv->wl.high = 0;
2010 priv->buf_size = 0;
2011
2012 if (!(hdev->hw_tc_map & BIT(i)))
2013 continue;
2014
2015 priv->enable = 1;
2016 priv->buf_size = rx_priv;
2017 priv->wl.high = rx_priv - hdev->dv_buf_size;
2018 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2019 }
2020
2021 buf_alloc->s_buf.buf_size = 0;
2022
2023 return true;
2024 }
2025
2026
2027
2028
2029
2030
2031 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2032 struct hclge_pkt_buf_alloc *buf_alloc)
2033 {
2034
2035 if (!hnae3_dev_dcb_supported(hdev)) {
2036 u32 rx_all = hdev->pkt_buf_size;
2037
2038 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2039 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2040 return -ENOMEM;
2041
2042 return 0;
2043 }
2044
2045 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2046 return 0;
2047
2048 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2049 return 0;
2050
2051
2052 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2053 return 0;
2054
2055 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2056 return 0;
2057
2058 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2059 return 0;
2060
2061 return -ENOMEM;
2062 }
2063
2064 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067 struct hclge_rx_priv_buff_cmd *req;
2068 struct hclge_desc desc;
2069 int ret;
2070 int i;
2071
2072 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2073 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2074
2075
2076 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078
2079 req->buf_num[i] =
2080 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2081 req->buf_num[i] |=
2082 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2083 }
2084
2085 req->shared_buf =
2086 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2087 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2088
2089 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2090 if (ret)
2091 dev_err(&hdev->pdev->dev,
2092 "rx private buffer alloc cmd failed %d\n", ret);
2093
2094 return ret;
2095 }
2096
2097 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 struct hclge_rx_priv_wl_buf *req;
2101 struct hclge_priv_buf *priv;
2102 struct hclge_desc desc[2];
2103 int i, j;
2104 int ret;
2105
2106 for (i = 0; i < 2; i++) {
2107 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2108 false);
2109 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2110
2111
2112 if (i == 0)
2113 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2114 else
2115 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2116
2117 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2118 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2119
2120 priv = &buf_alloc->priv_buf[idx];
2121 req->tc_wl[j].high =
2122 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2123 req->tc_wl[j].high |=
2124 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2125 req->tc_wl[j].low =
2126 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2127 req->tc_wl[j].low |=
2128 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2129 }
2130 }
2131
2132
2133 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2134 if (ret)
2135 dev_err(&hdev->pdev->dev,
2136 "rx private waterline config cmd failed %d\n",
2137 ret);
2138 return ret;
2139 }
2140
2141 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2142 struct hclge_pkt_buf_alloc *buf_alloc)
2143 {
2144 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2145 struct hclge_rx_com_thrd *req;
2146 struct hclge_desc desc[2];
2147 struct hclge_tc_thrd *tc;
2148 int i, j;
2149 int ret;
2150
2151 for (i = 0; i < 2; i++) {
2152 hclge_cmd_setup_basic_desc(&desc[i],
2153 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2154 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2155
2156
2157 if (i == 0)
2158 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2159 else
2160 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2161
2162 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2163 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2164
2165 req->com_thrd[j].high =
2166 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2167 req->com_thrd[j].high |=
2168 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2169 req->com_thrd[j].low =
2170 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2171 req->com_thrd[j].low |=
2172 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2173 }
2174 }
2175
2176
2177 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2178 if (ret)
2179 dev_err(&hdev->pdev->dev,
2180 "common threshold config cmd failed %d\n", ret);
2181 return ret;
2182 }
2183
2184 static int hclge_common_wl_config(struct hclge_dev *hdev,
2185 struct hclge_pkt_buf_alloc *buf_alloc)
2186 {
2187 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2188 struct hclge_rx_com_wl *req;
2189 struct hclge_desc desc;
2190 int ret;
2191
2192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2193
2194 req = (struct hclge_rx_com_wl *)desc.data;
2195 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2196 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2197
2198 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2199 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2200
2201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 if (ret)
2203 dev_err(&hdev->pdev->dev,
2204 "common waterline config cmd failed %d\n", ret);
2205
2206 return ret;
2207 }
2208
2209 int hclge_buffer_alloc(struct hclge_dev *hdev)
2210 {
2211 struct hclge_pkt_buf_alloc *pkt_buf;
2212 int ret;
2213
2214 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2215 if (!pkt_buf)
2216 return -ENOMEM;
2217
2218 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2219 if (ret) {
2220 dev_err(&hdev->pdev->dev,
2221 "could not calc tx buffer size for all TCs %d\n", ret);
2222 goto out;
2223 }
2224
2225 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2226 if (ret) {
2227 dev_err(&hdev->pdev->dev,
2228 "could not alloc tx buffers %d\n", ret);
2229 goto out;
2230 }
2231
2232 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2233 if (ret) {
2234 dev_err(&hdev->pdev->dev,
2235 "could not calc rx priv buffer size for all TCs %d\n",
2236 ret);
2237 goto out;
2238 }
2239
2240 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2241 if (ret) {
2242 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2243 ret);
2244 goto out;
2245 }
2246
2247 if (hnae3_dev_dcb_supported(hdev)) {
2248 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2249 if (ret) {
2250 dev_err(&hdev->pdev->dev,
2251 "could not configure rx private waterline %d\n",
2252 ret);
2253 goto out;
2254 }
2255
2256 ret = hclge_common_thrd_config(hdev, pkt_buf);
2257 if (ret) {
2258 dev_err(&hdev->pdev->dev,
2259 "could not configure common threshold %d\n",
2260 ret);
2261 goto out;
2262 }
2263 }
2264
2265 ret = hclge_common_wl_config(hdev, pkt_buf);
2266 if (ret)
2267 dev_err(&hdev->pdev->dev,
2268 "could not configure common waterline %d\n", ret);
2269
2270 out:
2271 kfree(pkt_buf);
2272 return ret;
2273 }
2274
2275 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2276 {
2277 struct hnae3_handle *roce = &vport->roce;
2278 struct hnae3_handle *nic = &vport->nic;
2279
2280 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2281
2282 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2283 vport->back->num_msi_left == 0)
2284 return -EINVAL;
2285
2286 roce->rinfo.base_vector = vport->back->roce_base_vector;
2287
2288 roce->rinfo.netdev = nic->kinfo.netdev;
2289 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2290
2291 roce->pdev = nic->pdev;
2292 roce->ae_algo = nic->ae_algo;
2293 roce->numa_node_mask = nic->numa_node_mask;
2294
2295 return 0;
2296 }
2297
2298 static int hclge_init_msi(struct hclge_dev *hdev)
2299 {
2300 struct pci_dev *pdev = hdev->pdev;
2301 int vectors;
2302 int i;
2303
2304 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2305 hdev->num_msi,
2306 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2307 if (vectors < 0) {
2308 dev_err(&pdev->dev,
2309 "failed(%d) to allocate MSI/MSI-X vectors\n",
2310 vectors);
2311 return vectors;
2312 }
2313 if (vectors < hdev->num_msi)
2314 dev_warn(&hdev->pdev->dev,
2315 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2316 hdev->num_msi, vectors);
2317
2318 hdev->num_msi = vectors;
2319 hdev->num_msi_left = vectors;
2320
2321 hdev->base_msi_vector = pdev->irq;
2322 hdev->roce_base_vector = hdev->base_msi_vector +
2323 hdev->roce_base_msix_offset;
2324
2325 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2326 sizeof(u16), GFP_KERNEL);
2327 if (!hdev->vector_status) {
2328 pci_free_irq_vectors(pdev);
2329 return -ENOMEM;
2330 }
2331
2332 for (i = 0; i < hdev->num_msi; i++)
2333 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2334
2335 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2336 sizeof(int), GFP_KERNEL);
2337 if (!hdev->vector_irq) {
2338 pci_free_irq_vectors(pdev);
2339 return -ENOMEM;
2340 }
2341
2342 return 0;
2343 }
2344
2345 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2346 {
2347 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2348 duplex = HCLGE_MAC_FULL;
2349
2350 return duplex;
2351 }
2352
2353 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2354 u8 duplex)
2355 {
2356 struct hclge_config_mac_speed_dup_cmd *req;
2357 struct hclge_desc desc;
2358 int ret;
2359
2360 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2361
2362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2363
2364 if (duplex)
2365 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2366
2367 switch (speed) {
2368 case HCLGE_MAC_SPEED_10M:
2369 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2370 HCLGE_CFG_SPEED_S, 6);
2371 break;
2372 case HCLGE_MAC_SPEED_100M:
2373 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2374 HCLGE_CFG_SPEED_S, 7);
2375 break;
2376 case HCLGE_MAC_SPEED_1G:
2377 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2378 HCLGE_CFG_SPEED_S, 0);
2379 break;
2380 case HCLGE_MAC_SPEED_10G:
2381 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2382 HCLGE_CFG_SPEED_S, 1);
2383 break;
2384 case HCLGE_MAC_SPEED_25G:
2385 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2386 HCLGE_CFG_SPEED_S, 2);
2387 break;
2388 case HCLGE_MAC_SPEED_40G:
2389 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2390 HCLGE_CFG_SPEED_S, 3);
2391 break;
2392 case HCLGE_MAC_SPEED_50G:
2393 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394 HCLGE_CFG_SPEED_S, 4);
2395 break;
2396 case HCLGE_MAC_SPEED_100G:
2397 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398 HCLGE_CFG_SPEED_S, 5);
2399 break;
2400 default:
2401 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2402 return -EINVAL;
2403 }
2404
2405 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2406 1);
2407
2408 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 if (ret) {
2410 dev_err(&hdev->pdev->dev,
2411 "mac speed/duplex config cmd failed %d.\n", ret);
2412 return ret;
2413 }
2414
2415 return 0;
2416 }
2417
2418 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2419 {
2420 struct hclge_mac *mac = &hdev->hw.mac;
2421 int ret;
2422
2423 duplex = hclge_check_speed_dup(duplex, speed);
2424 if (!mac->support_autoneg && mac->speed == speed &&
2425 mac->duplex == duplex)
2426 return 0;
2427
2428 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2429 if (ret)
2430 return ret;
2431
2432 hdev->hw.mac.speed = speed;
2433 hdev->hw.mac.duplex = duplex;
2434
2435 return 0;
2436 }
2437
2438 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2439 u8 duplex)
2440 {
2441 struct hclge_vport *vport = hclge_get_vport(handle);
2442 struct hclge_dev *hdev = vport->back;
2443
2444 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2445 }
2446
2447 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2448 {
2449 struct hclge_config_auto_neg_cmd *req;
2450 struct hclge_desc desc;
2451 u32 flag = 0;
2452 int ret;
2453
2454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2455
2456 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2457 if (enable)
2458 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2459 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2460
2461 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2462 if (ret)
2463 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2464 ret);
2465
2466 return ret;
2467 }
2468
2469 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2470 {
2471 struct hclge_vport *vport = hclge_get_vport(handle);
2472 struct hclge_dev *hdev = vport->back;
2473
2474 if (!hdev->hw.mac.support_autoneg) {
2475 if (enable) {
2476 dev_err(&hdev->pdev->dev,
2477 "autoneg is not supported by current port\n");
2478 return -EOPNOTSUPP;
2479 } else {
2480 return 0;
2481 }
2482 }
2483
2484 return hclge_set_autoneg_en(hdev, enable);
2485 }
2486
2487 static int hclge_get_autoneg(struct hnae3_handle *handle)
2488 {
2489 struct hclge_vport *vport = hclge_get_vport(handle);
2490 struct hclge_dev *hdev = vport->back;
2491 struct phy_device *phydev = hdev->hw.mac.phydev;
2492
2493 if (phydev)
2494 return phydev->autoneg;
2495
2496 return hdev->hw.mac.autoneg;
2497 }
2498
2499 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2500 {
2501 struct hclge_vport *vport = hclge_get_vport(handle);
2502 struct hclge_dev *hdev = vport->back;
2503 int ret;
2504
2505 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2506
2507 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2508 if (ret)
2509 return ret;
2510 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2511 }
2512
2513 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2514 {
2515 struct hclge_vport *vport = hclge_get_vport(handle);
2516 struct hclge_dev *hdev = vport->back;
2517
2518 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2519 return hclge_set_autoneg_en(hdev, !halt);
2520
2521 return 0;
2522 }
2523
2524 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2525 {
2526 struct hclge_config_fec_cmd *req;
2527 struct hclge_desc desc;
2528 int ret;
2529
2530 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2531
2532 req = (struct hclge_config_fec_cmd *)desc.data;
2533 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2534 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2535 if (fec_mode & BIT(HNAE3_FEC_RS))
2536 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2537 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2538 if (fec_mode & BIT(HNAE3_FEC_BASER))
2539 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2540 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2541
2542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2543 if (ret)
2544 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2545
2546 return ret;
2547 }
2548
2549 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2550 {
2551 struct hclge_vport *vport = hclge_get_vport(handle);
2552 struct hclge_dev *hdev = vport->back;
2553 struct hclge_mac *mac = &hdev->hw.mac;
2554 int ret;
2555
2556 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2557 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2558 return -EINVAL;
2559 }
2560
2561 ret = hclge_set_fec_hw(hdev, fec_mode);
2562 if (ret)
2563 return ret;
2564
2565 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2566 return 0;
2567 }
2568
2569 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2570 u8 *fec_mode)
2571 {
2572 struct hclge_vport *vport = hclge_get_vport(handle);
2573 struct hclge_dev *hdev = vport->back;
2574 struct hclge_mac *mac = &hdev->hw.mac;
2575
2576 if (fec_ability)
2577 *fec_ability = mac->fec_ability;
2578 if (fec_mode)
2579 *fec_mode = mac->fec_mode;
2580 }
2581
2582 static int hclge_mac_init(struct hclge_dev *hdev)
2583 {
2584 struct hclge_mac *mac = &hdev->hw.mac;
2585 int ret;
2586
2587 hdev->support_sfp_query = true;
2588 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2589 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2590 hdev->hw.mac.duplex);
2591 if (ret) {
2592 dev_err(&hdev->pdev->dev,
2593 "Config mac speed dup fail ret=%d\n", ret);
2594 return ret;
2595 }
2596
2597 if (hdev->hw.mac.support_autoneg) {
2598 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2599 if (ret) {
2600 dev_err(&hdev->pdev->dev,
2601 "Config mac autoneg fail ret=%d\n", ret);
2602 return ret;
2603 }
2604 }
2605
2606 mac->link = 0;
2607
2608 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2609 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2610 if (ret) {
2611 dev_err(&hdev->pdev->dev,
2612 "Fec mode init fail, ret = %d\n", ret);
2613 return ret;
2614 }
2615 }
2616
2617 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2618 if (ret) {
2619 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2620 return ret;
2621 }
2622
2623 ret = hclge_set_default_loopback(hdev);
2624 if (ret)
2625 return ret;
2626
2627 ret = hclge_buffer_alloc(hdev);
2628 if (ret)
2629 dev_err(&hdev->pdev->dev,
2630 "allocate buffer fail, ret=%d\n", ret);
2631
2632 return ret;
2633 }
2634
2635 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2636 {
2637 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2638 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2639 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2640 &hdev->mbx_service_task);
2641 }
2642
2643 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2644 {
2645 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2646 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2647 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2648 &hdev->rst_service_task);
2649 }
2650
2651 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2652 {
2653 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2654 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2655 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2656 hdev->hw_stats.stats_timer++;
2657 hdev->fd_arfs_expire_timer++;
2658 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2659 system_wq, &hdev->service_task,
2660 delay_time);
2661 }
2662 }
2663
2664 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2665 {
2666 struct hclge_link_status_cmd *req;
2667 struct hclge_desc desc;
2668 int link_status;
2669 int ret;
2670
2671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2673 if (ret) {
2674 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2675 ret);
2676 return ret;
2677 }
2678
2679 req = (struct hclge_link_status_cmd *)desc.data;
2680 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2681
2682 return !!link_status;
2683 }
2684
2685 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2686 {
2687 unsigned int mac_state;
2688 int link_stat;
2689
2690 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2691 return 0;
2692
2693 mac_state = hclge_get_mac_link_status(hdev);
2694
2695 if (hdev->hw.mac.phydev) {
2696 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2697 link_stat = mac_state &
2698 hdev->hw.mac.phydev->link;
2699 else
2700 link_stat = 0;
2701
2702 } else {
2703 link_stat = mac_state;
2704 }
2705
2706 return !!link_stat;
2707 }
2708
2709 static void hclge_update_link_status(struct hclge_dev *hdev)
2710 {
2711 struct hnae3_client *rclient = hdev->roce_client;
2712 struct hnae3_client *client = hdev->nic_client;
2713 struct hnae3_handle *rhandle;
2714 struct hnae3_handle *handle;
2715 int state;
2716 int i;
2717
2718 if (!client)
2719 return;
2720 state = hclge_get_mac_phy_link(hdev);
2721 if (state != hdev->hw.mac.link) {
2722 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2723 handle = &hdev->vport[i].nic;
2724 client->ops->link_status_change(handle, state);
2725 hclge_config_mac_tnl_int(hdev, state);
2726 rhandle = &hdev->vport[i].roce;
2727 if (rclient && rclient->ops->link_status_change)
2728 rclient->ops->link_status_change(rhandle,
2729 state);
2730 }
2731 hdev->hw.mac.link = state;
2732 }
2733 }
2734
2735 static void hclge_update_port_capability(struct hclge_mac *mac)
2736 {
2737
2738 hclge_convert_setting_fec(mac);
2739
2740
2741
2742
2743 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2744 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2745 mac->module_type = HNAE3_MODULE_TYPE_KR;
2746 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2747 mac->module_type = HNAE3_MODULE_TYPE_TP;
2748
2749 if (mac->support_autoneg == true) {
2750 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2751 linkmode_copy(mac->advertising, mac->supported);
2752 } else {
2753 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2754 mac->supported);
2755 linkmode_zero(mac->advertising);
2756 }
2757 }
2758
2759 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2760 {
2761 struct hclge_sfp_info_cmd *resp;
2762 struct hclge_desc desc;
2763 int ret;
2764
2765 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2766 resp = (struct hclge_sfp_info_cmd *)desc.data;
2767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2768 if (ret == -EOPNOTSUPP) {
2769 dev_warn(&hdev->pdev->dev,
2770 "IMP do not support get SFP speed %d\n", ret);
2771 return ret;
2772 } else if (ret) {
2773 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2774 return ret;
2775 }
2776
2777 *speed = le32_to_cpu(resp->speed);
2778
2779 return 0;
2780 }
2781
2782 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2783 {
2784 struct hclge_sfp_info_cmd *resp;
2785 struct hclge_desc desc;
2786 int ret;
2787
2788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2789 resp = (struct hclge_sfp_info_cmd *)desc.data;
2790
2791 resp->query_type = QUERY_ACTIVE_SPEED;
2792
2793 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2794 if (ret == -EOPNOTSUPP) {
2795 dev_warn(&hdev->pdev->dev,
2796 "IMP does not support get SFP info %d\n", ret);
2797 return ret;
2798 } else if (ret) {
2799 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2800 return ret;
2801 }
2802
2803 mac->speed = le32_to_cpu(resp->speed);
2804
2805
2806
2807 if (resp->speed_ability) {
2808 mac->module_type = le32_to_cpu(resp->module_type);
2809 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2810 mac->autoneg = resp->autoneg;
2811 mac->support_autoneg = resp->autoneg_ability;
2812 mac->speed_type = QUERY_ACTIVE_SPEED;
2813 if (!resp->active_fec)
2814 mac->fec_mode = 0;
2815 else
2816 mac->fec_mode = BIT(resp->active_fec);
2817 } else {
2818 mac->speed_type = QUERY_SFP_SPEED;
2819 }
2820
2821 return 0;
2822 }
2823
2824 static int hclge_update_port_info(struct hclge_dev *hdev)
2825 {
2826 struct hclge_mac *mac = &hdev->hw.mac;
2827 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2828 int ret;
2829
2830
2831 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2832 return 0;
2833
2834
2835 if (!hdev->support_sfp_query)
2836 return 0;
2837
2838 if (hdev->pdev->revision >= 0x21)
2839 ret = hclge_get_sfp_info(hdev, mac);
2840 else
2841 ret = hclge_get_sfp_speed(hdev, &speed);
2842
2843 if (ret == -EOPNOTSUPP) {
2844 hdev->support_sfp_query = false;
2845 return ret;
2846 } else if (ret) {
2847 return ret;
2848 }
2849
2850 if (hdev->pdev->revision >= 0x21) {
2851 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2852 hclge_update_port_capability(mac);
2853 return 0;
2854 }
2855 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2856 HCLGE_MAC_FULL);
2857 } else {
2858 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2859 return 0;
2860
2861
2862 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2863 }
2864 }
2865
2866 static int hclge_get_status(struct hnae3_handle *handle)
2867 {
2868 struct hclge_vport *vport = hclge_get_vport(handle);
2869 struct hclge_dev *hdev = vport->back;
2870
2871 hclge_update_link_status(hdev);
2872
2873 return hdev->hw.mac.link;
2874 }
2875
2876 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2877 {
2878 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2879
2880
2881 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2882 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2883 msix_src_reg = hclge_read_dev(&hdev->hw,
2884 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2895 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2896 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2897 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2898 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2899 hdev->rst_stats.imp_rst_cnt++;
2900 return HCLGE_VECTOR0_EVENT_RST;
2901 }
2902
2903 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2904 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2905 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2906 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2907 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2908 hdev->rst_stats.global_rst_cnt++;
2909 return HCLGE_VECTOR0_EVENT_RST;
2910 }
2911
2912
2913 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2914 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2915 msix_src_reg);
2916 *clearval = msix_src_reg;
2917 return HCLGE_VECTOR0_EVENT_ERR;
2918 }
2919
2920
2921 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2922 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2923 *clearval = cmdq_src_reg;
2924 return HCLGE_VECTOR0_EVENT_MBX;
2925 }
2926
2927
2928 dev_info(&hdev->pdev->dev,
2929 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2930 cmdq_src_reg, msix_src_reg);
2931 *clearval = msix_src_reg;
2932
2933 return HCLGE_VECTOR0_EVENT_OTHER;
2934 }
2935
2936 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2937 u32 regclr)
2938 {
2939 switch (event_type) {
2940 case HCLGE_VECTOR0_EVENT_RST:
2941 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2942 break;
2943 case HCLGE_VECTOR0_EVENT_MBX:
2944 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2945 break;
2946 default:
2947 break;
2948 }
2949 }
2950
2951 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2952 {
2953 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2954 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2955 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2956 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2957 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2958 }
2959
2960 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2961 {
2962 writel(enable ? 1 : 0, vector->addr);
2963 }
2964
2965 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2966 {
2967 struct hclge_dev *hdev = data;
2968 u32 clearval = 0;
2969 u32 event_cause;
2970
2971 hclge_enable_vector(&hdev->misc_vector, false);
2972 event_cause = hclge_check_event_cause(hdev, &clearval);
2973
2974
2975 switch (event_cause) {
2976 case HCLGE_VECTOR0_EVENT_ERR:
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2988
2989 case HCLGE_VECTOR0_EVENT_RST:
2990 hclge_reset_task_schedule(hdev);
2991 break;
2992 case HCLGE_VECTOR0_EVENT_MBX:
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002 hclge_mbx_task_schedule(hdev);
3003 break;
3004 default:
3005 dev_warn(&hdev->pdev->dev,
3006 "received unknown or unhandled event of vector0\n");
3007 break;
3008 }
3009
3010 hclge_clear_event_cause(hdev, event_cause, clearval);
3011
3012
3013
3014
3015
3016
3017 if (!clearval ||
3018 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3019 hclge_enable_vector(&hdev->misc_vector, true);
3020 }
3021
3022 return IRQ_HANDLED;
3023 }
3024
3025 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3026 {
3027 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3028 dev_warn(&hdev->pdev->dev,
3029 "vector(vector_id %d) has been freed.\n", vector_id);
3030 return;
3031 }
3032
3033 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3034 hdev->num_msi_left += 1;
3035 hdev->num_msi_used -= 1;
3036 }
3037
3038 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3039 {
3040 struct hclge_misc_vector *vector = &hdev->misc_vector;
3041
3042 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3043
3044 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3045 hdev->vector_status[0] = 0;
3046
3047 hdev->num_msi_left -= 1;
3048 hdev->num_msi_used += 1;
3049 }
3050
3051 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3052 const cpumask_t *mask)
3053 {
3054 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3055 affinity_notify);
3056
3057 cpumask_copy(&hdev->affinity_mask, mask);
3058 }
3059
3060 static void hclge_irq_affinity_release(struct kref *ref)
3061 {
3062 }
3063
3064 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3065 {
3066 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3067 &hdev->affinity_mask);
3068
3069 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3070 hdev->affinity_notify.release = hclge_irq_affinity_release;
3071 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3072 &hdev->affinity_notify);
3073 }
3074
3075 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3076 {
3077 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3078 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3079 }
3080
3081 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3082 {
3083 int ret;
3084
3085 hclge_get_misc_vector(hdev);
3086
3087
3088 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3089 0, "hclge_misc", hdev);
3090 if (ret) {
3091 hclge_free_vector(hdev, 0);
3092 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3093 hdev->misc_vector.vector_irq);
3094 }
3095
3096 return ret;
3097 }
3098
3099 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3100 {
3101 free_irq(hdev->misc_vector.vector_irq, hdev);
3102 hclge_free_vector(hdev, 0);
3103 }
3104
3105 int hclge_notify_client(struct hclge_dev *hdev,
3106 enum hnae3_reset_notify_type type)
3107 {
3108 struct hnae3_client *client = hdev->nic_client;
3109 u16 i;
3110
3111 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3112 return 0;
3113
3114 if (!client->ops->reset_notify)
3115 return -EOPNOTSUPP;
3116
3117 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3118 struct hnae3_handle *handle = &hdev->vport[i].nic;
3119 int ret;
3120
3121 ret = client->ops->reset_notify(handle, type);
3122 if (ret) {
3123 dev_err(&hdev->pdev->dev,
3124 "notify nic client failed %d(%d)\n", type, ret);
3125 return ret;
3126 }
3127 }
3128
3129 return 0;
3130 }
3131
3132 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3133 enum hnae3_reset_notify_type type)
3134 {
3135 struct hnae3_client *client = hdev->roce_client;
3136 int ret = 0;
3137 u16 i;
3138
3139 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3140 return 0;
3141
3142 if (!client->ops->reset_notify)
3143 return -EOPNOTSUPP;
3144
3145 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3146 struct hnae3_handle *handle = &hdev->vport[i].roce;
3147
3148 ret = client->ops->reset_notify(handle, type);
3149 if (ret) {
3150 dev_err(&hdev->pdev->dev,
3151 "notify roce client failed %d(%d)",
3152 type, ret);
3153 return ret;
3154 }
3155 }
3156
3157 return ret;
3158 }
3159
3160 static int hclge_reset_wait(struct hclge_dev *hdev)
3161 {
3162 #define HCLGE_RESET_WATI_MS 100
3163 #define HCLGE_RESET_WAIT_CNT 200
3164 u32 val, reg, reg_bit;
3165 u32 cnt = 0;
3166
3167 switch (hdev->reset_type) {
3168 case HNAE3_IMP_RESET:
3169 reg = HCLGE_GLOBAL_RESET_REG;
3170 reg_bit = HCLGE_IMP_RESET_BIT;
3171 break;
3172 case HNAE3_GLOBAL_RESET:
3173 reg = HCLGE_GLOBAL_RESET_REG;
3174 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3175 break;
3176 case HNAE3_FUNC_RESET:
3177 reg = HCLGE_FUN_RST_ING;
3178 reg_bit = HCLGE_FUN_RST_ING_B;
3179 break;
3180 case HNAE3_FLR_RESET:
3181 break;
3182 default:
3183 dev_err(&hdev->pdev->dev,
3184 "Wait for unsupported reset type: %d\n",
3185 hdev->reset_type);
3186 return -EINVAL;
3187 }
3188
3189 if (hdev->reset_type == HNAE3_FLR_RESET) {
3190 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3191 cnt++ < HCLGE_RESET_WAIT_CNT)
3192 msleep(HCLGE_RESET_WATI_MS);
3193
3194 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3195 dev_err(&hdev->pdev->dev,
3196 "flr wait timeout: %d\n", cnt);
3197 return -EBUSY;
3198 }
3199
3200 return 0;
3201 }
3202
3203 val = hclge_read_dev(&hdev->hw, reg);
3204 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3205 msleep(HCLGE_RESET_WATI_MS);
3206 val = hclge_read_dev(&hdev->hw, reg);
3207 cnt++;
3208 }
3209
3210 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3211 dev_warn(&hdev->pdev->dev,
3212 "Wait for reset timeout: %d\n", hdev->reset_type);
3213 return -EBUSY;
3214 }
3215
3216 return 0;
3217 }
3218
3219 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3220 {
3221 struct hclge_vf_rst_cmd *req;
3222 struct hclge_desc desc;
3223
3224 req = (struct hclge_vf_rst_cmd *)desc.data;
3225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3226 req->dest_vfid = func_id;
3227
3228 if (reset)
3229 req->vf_rst = 0x1;
3230
3231 return hclge_cmd_send(&hdev->hw, &desc, 1);
3232 }
3233
3234 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3235 {
3236 int i;
3237
3238 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3239 struct hclge_vport *vport = &hdev->vport[i];
3240 int ret;
3241
3242
3243 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3244 if (ret) {
3245 dev_err(&hdev->pdev->dev,
3246 "set vf(%d) rst failed %d!\n",
3247 vport->vport_id, ret);
3248 return ret;
3249 }
3250
3251 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3252 continue;
3253
3254
3255
3256
3257
3258 ret = hclge_inform_reset_assert_to_vf(vport);
3259 if (ret)
3260 dev_warn(&hdev->pdev->dev,
3261 "inform reset to vf(%d) failed %d!\n",
3262 vport->vport_id, ret);
3263 }
3264
3265 return 0;
3266 }
3267
3268 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3269 {
3270 struct hclge_pf_rst_sync_cmd *req;
3271 struct hclge_desc desc;
3272 int cnt = 0;
3273 int ret;
3274
3275 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3277
3278 do {
3279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3280
3281
3282
3283 if (ret == -EOPNOTSUPP) {
3284 msleep(HCLGE_RESET_SYNC_TIME);
3285 return 0;
3286 } else if (ret) {
3287 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3288 ret);
3289 return ret;
3290 } else if (req->all_vf_ready) {
3291 return 0;
3292 }
3293 msleep(HCLGE_PF_RESET_SYNC_TIME);
3294 hclge_cmd_reuse_desc(&desc, true);
3295 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3296
3297 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3298 return -ETIME;
3299 }
3300
3301 void hclge_report_hw_error(struct hclge_dev *hdev,
3302 enum hnae3_hw_error_type type)
3303 {
3304 struct hnae3_client *client = hdev->nic_client;
3305 u16 i;
3306
3307 if (!client || !client->ops->process_hw_error ||
3308 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3309 return;
3310
3311 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3312 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3313 }
3314
3315 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3316 {
3317 u32 reg_val;
3318
3319 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3320 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3321 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3322 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3323 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3324 }
3325
3326 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3327 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3328 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3329 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3330 }
3331 }
3332
3333 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3334 {
3335 struct hclge_desc desc;
3336 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3337 int ret;
3338
3339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3340 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3341 req->fun_reset_vfid = func_id;
3342
3343 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3344 if (ret)
3345 dev_err(&hdev->pdev->dev,
3346 "send function reset cmd fail, status =%d\n", ret);
3347
3348 return ret;
3349 }
3350
3351 static void hclge_do_reset(struct hclge_dev *hdev)
3352 {
3353 struct hnae3_handle *handle = &hdev->vport[0].nic;
3354 struct pci_dev *pdev = hdev->pdev;
3355 u32 val;
3356
3357 if (hclge_get_hw_reset_stat(handle)) {
3358 dev_info(&pdev->dev, "Hardware reset not finish\n");
3359 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3360 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3361 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3362 return;
3363 }
3364
3365 switch (hdev->reset_type) {
3366 case HNAE3_GLOBAL_RESET:
3367 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3368 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3369 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3370 dev_info(&pdev->dev, "Global Reset requested\n");
3371 break;
3372 case HNAE3_FUNC_RESET:
3373 dev_info(&pdev->dev, "PF Reset requested\n");
3374
3375 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3376 hclge_reset_task_schedule(hdev);
3377 break;
3378 case HNAE3_FLR_RESET:
3379 dev_info(&pdev->dev, "FLR requested\n");
3380
3381 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3382 hclge_reset_task_schedule(hdev);
3383 break;
3384 default:
3385 dev_warn(&pdev->dev,
3386 "Unsupported reset type: %d\n", hdev->reset_type);
3387 break;
3388 }
3389 }
3390
3391 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3392 unsigned long *addr)
3393 {
3394 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3395 struct hclge_dev *hdev = ae_dev->priv;
3396
3397
3398 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3399
3400
3401
3402 hclge_handle_hw_msix_error(hdev, addr);
3403 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3404
3405
3406
3407
3408
3409
3410
3411 hclge_enable_vector(&hdev->misc_vector, true);
3412 }
3413
3414
3415 if (test_bit(HNAE3_IMP_RESET, addr)) {
3416 rst_level = HNAE3_IMP_RESET;
3417 clear_bit(HNAE3_IMP_RESET, addr);
3418 clear_bit(HNAE3_GLOBAL_RESET, addr);
3419 clear_bit(HNAE3_FUNC_RESET, addr);
3420 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3421 rst_level = HNAE3_GLOBAL_RESET;
3422 clear_bit(HNAE3_GLOBAL_RESET, addr);
3423 clear_bit(HNAE3_FUNC_RESET, addr);
3424 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3425 rst_level = HNAE3_FUNC_RESET;
3426 clear_bit(HNAE3_FUNC_RESET, addr);
3427 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3428 rst_level = HNAE3_FLR_RESET;
3429 clear_bit(HNAE3_FLR_RESET, addr);
3430 }
3431
3432 if (hdev->reset_type != HNAE3_NONE_RESET &&
3433 rst_level < hdev->reset_type)
3434 return HNAE3_NONE_RESET;
3435
3436 return rst_level;
3437 }
3438
3439 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3440 {
3441 u32 clearval = 0;
3442
3443 switch (hdev->reset_type) {
3444 case HNAE3_IMP_RESET:
3445 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3446 break;
3447 case HNAE3_GLOBAL_RESET:
3448 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3449 break;
3450 default:
3451 break;
3452 }
3453
3454 if (!clearval)
3455 return;
3456
3457
3458
3459
3460 if (hdev->pdev->revision == 0x20)
3461 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3462 clearval);
3463
3464 hclge_enable_vector(&hdev->misc_vector, true);
3465 }
3466
3467 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3468 {
3469 int ret = 0;
3470
3471 switch (hdev->reset_type) {
3472 case HNAE3_FUNC_RESET:
3473
3474 case HNAE3_FLR_RESET:
3475 ret = hclge_set_all_vf_rst(hdev, true);
3476 break;
3477 default:
3478 break;
3479 }
3480
3481 return ret;
3482 }
3483
3484 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3485 {
3486 u32 reg_val;
3487
3488 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3489 if (enable)
3490 reg_val |= HCLGE_NIC_SW_RST_RDY;
3491 else
3492 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3493
3494 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3495 }
3496
3497 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3498 {
3499 u32 reg_val;
3500 int ret = 0;
3501
3502 switch (hdev->reset_type) {
3503 case HNAE3_FUNC_RESET:
3504
3505
3506
3507 ret = hclge_func_reset_sync_vf(hdev);
3508 if (ret)
3509 return ret;
3510
3511 ret = hclge_func_reset_cmd(hdev, 0);
3512 if (ret) {
3513 dev_err(&hdev->pdev->dev,
3514 "asserting function reset fail %d!\n", ret);
3515 return ret;
3516 }
3517
3518
3519
3520
3521
3522
3523 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3524 hdev->rst_stats.pf_rst_cnt++;
3525 break;
3526 case HNAE3_FLR_RESET:
3527
3528
3529
3530 ret = hclge_func_reset_sync_vf(hdev);
3531 if (ret)
3532 return ret;
3533
3534 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3535 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3536 hdev->rst_stats.flr_rst_cnt++;
3537 break;
3538 case HNAE3_IMP_RESET:
3539 hclge_handle_imp_error(hdev);
3540 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3541 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3542 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3543 break;
3544 default:
3545 break;
3546 }
3547
3548
3549 msleep(HCLGE_RESET_SYNC_TIME);
3550 hclge_reset_handshake(hdev, true);
3551 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3552
3553 return ret;
3554 }
3555
3556 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3557 {
3558 #define MAX_RESET_FAIL_CNT 5
3559
3560 if (hdev->reset_pending) {
3561 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3562 hdev->reset_pending);
3563 return true;
3564 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3565 HCLGE_RESET_INT_M) {
3566 dev_info(&hdev->pdev->dev,
3567 "reset failed because new reset interrupt\n");
3568 hclge_clear_reset_cause(hdev);
3569 return false;
3570 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3571 hdev->rst_stats.reset_fail_cnt++;
3572 set_bit(hdev->reset_type, &hdev->reset_pending);
3573 dev_info(&hdev->pdev->dev,
3574 "re-schedule reset task(%d)\n",
3575 hdev->rst_stats.reset_fail_cnt);
3576 return true;
3577 }
3578
3579 hclge_clear_reset_cause(hdev);
3580
3581
3582 hclge_reset_handshake(hdev, true);
3583
3584 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3585 return false;
3586 }
3587
3588 static int hclge_set_rst_done(struct hclge_dev *hdev)
3589 {
3590 struct hclge_pf_rst_done_cmd *req;
3591 struct hclge_desc desc;
3592 int ret;
3593
3594 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3595 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3596 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3597
3598 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3599
3600
3601
3602
3603 if (ret == -EOPNOTSUPP) {
3604 dev_warn(&hdev->pdev->dev,
3605 "current firmware does not support command(0x%x)!\n",
3606 HCLGE_OPC_PF_RST_DONE);
3607 return 0;
3608 } else if (ret) {
3609 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3610 ret);
3611 }
3612
3613 return ret;
3614 }
3615
3616 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3617 {
3618 int ret = 0;
3619
3620 switch (hdev->reset_type) {
3621 case HNAE3_FUNC_RESET:
3622
3623 case HNAE3_FLR_RESET:
3624 ret = hclge_set_all_vf_rst(hdev, false);
3625 break;
3626 case HNAE3_GLOBAL_RESET:
3627
3628 case HNAE3_IMP_RESET:
3629 ret = hclge_set_rst_done(hdev);
3630 break;
3631 default:
3632 break;
3633 }
3634
3635
3636 hclge_reset_handshake(hdev, false);
3637
3638 return ret;
3639 }
3640
3641 static int hclge_reset_stack(struct hclge_dev *hdev)
3642 {
3643 int ret;
3644
3645 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3646 if (ret)
3647 return ret;
3648
3649 ret = hclge_reset_ae_dev(hdev->ae_dev);
3650 if (ret)
3651 return ret;
3652
3653 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3654 if (ret)
3655 return ret;
3656
3657 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3658 }
3659
3660 static void hclge_reset(struct hclge_dev *hdev)
3661 {
3662 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3663 enum hnae3_reset_type reset_level;
3664 int ret;
3665
3666
3667
3668
3669 ae_dev->reset_type = hdev->reset_type;
3670 hdev->rst_stats.reset_cnt++;
3671
3672 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3673 if (ret)
3674 goto err_reset;
3675
3676 ret = hclge_reset_prepare_down(hdev);
3677 if (ret)
3678 goto err_reset;
3679
3680 rtnl_lock();
3681 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3682 if (ret)
3683 goto err_reset_lock;
3684
3685 rtnl_unlock();
3686
3687 ret = hclge_reset_prepare_wait(hdev);
3688 if (ret)
3689 goto err_reset;
3690
3691 if (hclge_reset_wait(hdev))
3692 goto err_reset;
3693
3694 hdev->rst_stats.hw_reset_done_cnt++;
3695
3696 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3697 if (ret)
3698 goto err_reset;
3699
3700 rtnl_lock();
3701
3702 ret = hclge_reset_stack(hdev);
3703 if (ret)
3704 goto err_reset_lock;
3705
3706 hclge_clear_reset_cause(hdev);
3707
3708 ret = hclge_reset_prepare_up(hdev);
3709 if (ret)
3710 goto err_reset_lock;
3711
3712 rtnl_unlock();
3713
3714 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3715
3716
3717
3718 if (ret &&
3719 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3720 goto err_reset;
3721
3722 rtnl_lock();
3723
3724 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3725 if (ret)
3726 goto err_reset_lock;
3727
3728 rtnl_unlock();
3729
3730 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3731 if (ret)
3732 goto err_reset;
3733
3734 hdev->last_reset_time = jiffies;
3735 hdev->rst_stats.reset_fail_cnt = 0;
3736 hdev->rst_stats.reset_done_cnt++;
3737 ae_dev->reset_type = HNAE3_NONE_RESET;
3738
3739
3740
3741
3742
3743 reset_level = hclge_get_reset_level(ae_dev,
3744 &hdev->default_reset_request);
3745 if (reset_level != HNAE3_NONE_RESET)
3746 set_bit(reset_level, &hdev->reset_request);
3747
3748 return;
3749
3750 err_reset_lock:
3751 rtnl_unlock();
3752 err_reset:
3753 if (hclge_reset_err_handle(hdev))
3754 hclge_reset_task_schedule(hdev);
3755 }
3756
3757 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3758 {
3759 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3760 struct hclge_dev *hdev = ae_dev->priv;
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777 if (!handle)
3778 handle = &hdev->vport[0].nic;
3779
3780 if (time_before(jiffies, (hdev->last_reset_time +
3781 HCLGE_RESET_INTERVAL))) {
3782 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3783 return;
3784 } else if (hdev->default_reset_request)
3785 hdev->reset_level =
3786 hclge_get_reset_level(ae_dev,
3787 &hdev->default_reset_request);
3788 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3789 hdev->reset_level = HNAE3_FUNC_RESET;
3790
3791 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3792 hdev->reset_level);
3793
3794
3795 set_bit(hdev->reset_level, &hdev->reset_request);
3796 hclge_reset_task_schedule(hdev);
3797
3798 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3799 hdev->reset_level++;
3800 }
3801
3802 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3803 enum hnae3_reset_type rst_type)
3804 {
3805 struct hclge_dev *hdev = ae_dev->priv;
3806
3807 set_bit(rst_type, &hdev->default_reset_request);
3808 }
3809
3810 static void hclge_reset_timer(struct timer_list *t)
3811 {
3812 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3813
3814
3815
3816
3817 if (!hdev->default_reset_request)
3818 return;
3819
3820 dev_info(&hdev->pdev->dev,
3821 "triggering reset in reset timer\n");
3822 hclge_reset_event(hdev->pdev, NULL);
3823 }
3824
3825 static void hclge_reset_subtask(struct hclge_dev *hdev)
3826 {
3827 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838 hdev->last_reset_time = jiffies;
3839 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3840 if (hdev->reset_type != HNAE3_NONE_RESET)
3841 hclge_reset(hdev);
3842
3843
3844 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3845 if (hdev->reset_type != HNAE3_NONE_RESET)
3846 hclge_do_reset(hdev);
3847
3848 hdev->reset_type = HNAE3_NONE_RESET;
3849 }
3850
3851 static void hclge_reset_service_task(struct work_struct *work)
3852 {
3853 struct hclge_dev *hdev =
3854 container_of(work, struct hclge_dev, rst_service_task);
3855
3856 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3857 return;
3858
3859 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3860
3861 hclge_reset_subtask(hdev);
3862
3863 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3864 }
3865
3866 static void hclge_mailbox_service_task(struct work_struct *work)
3867 {
3868 struct hclge_dev *hdev =
3869 container_of(work, struct hclge_dev, mbx_service_task);
3870
3871 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3872 return;
3873
3874 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3875
3876 hclge_mbx_handler(hdev);
3877
3878 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3879 }
3880
3881 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3882 {
3883 int i;
3884
3885
3886 for (i = 1; i < hdev->num_alloc_vport; i++) {
3887 struct hclge_vport *vport = &hdev->vport[i];
3888
3889 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3890 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3891
3892
3893 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3894 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3895 }
3896 }
3897
3898 static void hclge_service_task(struct work_struct *work)
3899 {
3900 struct hclge_dev *hdev =
3901 container_of(work, struct hclge_dev, service_task.work);
3902
3903 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3904
3905 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3906 hclge_update_stats_for_all(hdev);
3907 hdev->hw_stats.stats_timer = 0;
3908 }
3909
3910 hclge_update_port_info(hdev);
3911 hclge_update_link_status(hdev);
3912 hclge_update_vport_alive(hdev);
3913 hclge_sync_vlan_filter(hdev);
3914 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3915 hclge_rfs_filter_expire(hdev);
3916 hdev->fd_arfs_expire_timer = 0;
3917 }
3918
3919 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3920 }
3921
3922 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3923 {
3924
3925 if (!handle->client)
3926 return container_of(handle, struct hclge_vport, nic);
3927 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3928 return container_of(handle, struct hclge_vport, roce);
3929 else
3930 return container_of(handle, struct hclge_vport, nic);
3931 }
3932
3933 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3934 struct hnae3_vector_info *vector_info)
3935 {
3936 struct hclge_vport *vport = hclge_get_vport(handle);
3937 struct hnae3_vector_info *vector = vector_info;
3938 struct hclge_dev *hdev = vport->back;
3939 int alloc = 0;
3940 int i, j;
3941
3942 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
3943 vector_num = min(hdev->num_msi_left, vector_num);
3944
3945 for (j = 0; j < vector_num; j++) {
3946 for (i = 1; i < hdev->num_msi; i++) {
3947 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3948 vector->vector = pci_irq_vector(hdev->pdev, i);
3949 vector->io_addr = hdev->hw.io_base +
3950 HCLGE_VECTOR_REG_BASE +
3951 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3952 vport->vport_id *
3953 HCLGE_VECTOR_VF_OFFSET;
3954 hdev->vector_status[i] = vport->vport_id;
3955 hdev->vector_irq[i] = vector->vector;
3956
3957 vector++;
3958 alloc++;
3959
3960 break;
3961 }
3962 }
3963 }
3964 hdev->num_msi_left -= alloc;
3965 hdev->num_msi_used += alloc;
3966
3967 return alloc;
3968 }
3969
3970 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3971 {
3972 int i;
3973
3974 for (i = 0; i < hdev->num_msi; i++)
3975 if (vector == hdev->vector_irq[i])
3976 return i;
3977
3978 return -EINVAL;
3979 }
3980
3981 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3982 {
3983 struct hclge_vport *vport = hclge_get_vport(handle);
3984 struct hclge_dev *hdev = vport->back;
3985 int vector_id;
3986
3987 vector_id = hclge_get_vector_index(hdev, vector);
3988 if (vector_id < 0) {
3989 dev_err(&hdev->pdev->dev,
3990 "Get vector index fail. vector_id =%d\n", vector_id);
3991 return vector_id;
3992 }
3993
3994 hclge_free_vector(hdev, vector_id);
3995
3996 return 0;
3997 }
3998
3999 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4000 {
4001 return HCLGE_RSS_KEY_SIZE;
4002 }
4003
4004 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4005 {
4006 return HCLGE_RSS_IND_TBL_SIZE;
4007 }
4008
4009 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4010 const u8 hfunc, const u8 *key)
4011 {
4012 struct hclge_rss_config_cmd *req;
4013 unsigned int key_offset = 0;
4014 struct hclge_desc desc;
4015 int key_counts;
4016 int key_size;
4017 int ret;
4018
4019 key_counts = HCLGE_RSS_KEY_SIZE;
4020 req = (struct hclge_rss_config_cmd *)desc.data;
4021
4022 while (key_counts) {
4023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4024 false);
4025
4026 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4027 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4028
4029 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4030 memcpy(req->hash_key,
4031 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4032
4033 key_counts -= key_size;
4034 key_offset++;
4035 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4036 if (ret) {
4037 dev_err(&hdev->pdev->dev,
4038 "Configure RSS config fail, status = %d\n",
4039 ret);
4040 return ret;
4041 }
4042 }
4043 return 0;
4044 }
4045
4046 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4047 {
4048 struct hclge_rss_indirection_table_cmd *req;
4049 struct hclge_desc desc;
4050 int i, j;
4051 int ret;
4052
4053 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4054
4055 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4056 hclge_cmd_setup_basic_desc
4057 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4058
4059 req->start_table_index =
4060 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4061 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4062
4063 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4064 req->rss_result[j] =
4065 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4066
4067 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4068 if (ret) {
4069 dev_err(&hdev->pdev->dev,
4070 "Configure rss indir table fail,status = %d\n",
4071 ret);
4072 return ret;
4073 }
4074 }
4075 return 0;
4076 }
4077
4078 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4079 u16 *tc_size, u16 *tc_offset)
4080 {
4081 struct hclge_rss_tc_mode_cmd *req;
4082 struct hclge_desc desc;
4083 int ret;
4084 int i;
4085
4086 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4087 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4088
4089 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4090 u16 mode = 0;
4091
4092 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4093 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4094 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4095 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4096 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4097
4098 req->rss_tc_mode[i] = cpu_to_le16(mode);
4099 }
4100
4101 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4102 if (ret)
4103 dev_err(&hdev->pdev->dev,
4104 "Configure rss tc mode fail, status = %d\n", ret);
4105
4106 return ret;
4107 }
4108
4109 static void hclge_get_rss_type(struct hclge_vport *vport)
4110 {
4111 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4112 vport->rss_tuple_sets.ipv4_udp_en ||
4113 vport->rss_tuple_sets.ipv4_sctp_en ||
4114 vport->rss_tuple_sets.ipv6_tcp_en ||
4115 vport->rss_tuple_sets.ipv6_udp_en ||
4116 vport->rss_tuple_sets.ipv6_sctp_en)
4117 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4118 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4119 vport->rss_tuple_sets.ipv6_fragment_en)
4120 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4121 else
4122 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4123 }
4124
4125 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4126 {
4127 struct hclge_rss_input_tuple_cmd *req;
4128 struct hclge_desc desc;
4129 int ret;
4130
4131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4132
4133 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4134
4135
4136 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4137 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4138 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4139 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4140 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4141 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4142 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4143 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4144 hclge_get_rss_type(&hdev->vport[0]);
4145 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4146 if (ret)
4147 dev_err(&hdev->pdev->dev,
4148 "Configure rss input fail, status = %d\n", ret);
4149 return ret;
4150 }
4151
4152 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4153 u8 *key, u8 *hfunc)
4154 {
4155 struct hclge_vport *vport = hclge_get_vport(handle);
4156 int i;
4157
4158
4159 if (hfunc) {
4160 switch (vport->rss_algo) {
4161 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4162 *hfunc = ETH_RSS_HASH_TOP;
4163 break;
4164 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4165 *hfunc = ETH_RSS_HASH_XOR;
4166 break;
4167 default:
4168 *hfunc = ETH_RSS_HASH_UNKNOWN;
4169 break;
4170 }
4171 }
4172
4173
4174 if (key)
4175 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4176
4177
4178 if (indir)
4179 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4180 indir[i] = vport->rss_indirection_tbl[i];
4181
4182 return 0;
4183 }
4184
4185 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4186 const u8 *key, const u8 hfunc)
4187 {
4188 struct hclge_vport *vport = hclge_get_vport(handle);
4189 struct hclge_dev *hdev = vport->back;
4190 u8 hash_algo;
4191 int ret, i;
4192
4193
4194 if (key) {
4195 switch (hfunc) {
4196 case ETH_RSS_HASH_TOP:
4197 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4198 break;
4199 case ETH_RSS_HASH_XOR:
4200 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4201 break;
4202 case ETH_RSS_HASH_NO_CHANGE:
4203 hash_algo = vport->rss_algo;
4204 break;
4205 default:
4206 return -EINVAL;
4207 }
4208
4209 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4210 if (ret)
4211 return ret;
4212
4213
4214 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4215 vport->rss_algo = hash_algo;
4216 }
4217
4218
4219 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4220 vport->rss_indirection_tbl[i] = indir[i];
4221
4222
4223 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4224 }
4225
4226 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4227 {
4228 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4229
4230 if (nfc->data & RXH_L4_B_2_3)
4231 hash_sets |= HCLGE_D_PORT_BIT;
4232 else
4233 hash_sets &= ~HCLGE_D_PORT_BIT;
4234
4235 if (nfc->data & RXH_IP_SRC)
4236 hash_sets |= HCLGE_S_IP_BIT;
4237 else
4238 hash_sets &= ~HCLGE_S_IP_BIT;
4239
4240 if (nfc->data & RXH_IP_DST)
4241 hash_sets |= HCLGE_D_IP_BIT;
4242 else
4243 hash_sets &= ~HCLGE_D_IP_BIT;
4244
4245 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4246 hash_sets |= HCLGE_V_TAG_BIT;
4247
4248 return hash_sets;
4249 }
4250
4251 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4252 struct ethtool_rxnfc *nfc)
4253 {
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4255 struct hclge_dev *hdev = vport->back;
4256 struct hclge_rss_input_tuple_cmd *req;
4257 struct hclge_desc desc;
4258 u8 tuple_sets;
4259 int ret;
4260
4261 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4262 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4263 return -EINVAL;
4264
4265 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4266 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4267
4268 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4269 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4270 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4271 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4272 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4273 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4274 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4275 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4276
4277 tuple_sets = hclge_get_rss_hash_bits(nfc);
4278 switch (nfc->flow_type) {
4279 case TCP_V4_FLOW:
4280 req->ipv4_tcp_en = tuple_sets;
4281 break;
4282 case TCP_V6_FLOW:
4283 req->ipv6_tcp_en = tuple_sets;
4284 break;
4285 case UDP_V4_FLOW:
4286 req->ipv4_udp_en = tuple_sets;
4287 break;
4288 case UDP_V6_FLOW:
4289 req->ipv6_udp_en = tuple_sets;
4290 break;
4291 case SCTP_V4_FLOW:
4292 req->ipv4_sctp_en = tuple_sets;
4293 break;
4294 case SCTP_V6_FLOW:
4295 if ((nfc->data & RXH_L4_B_0_1) ||
4296 (nfc->data & RXH_L4_B_2_3))
4297 return -EINVAL;
4298
4299 req->ipv6_sctp_en = tuple_sets;
4300 break;
4301 case IPV4_FLOW:
4302 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4303 break;
4304 case IPV6_FLOW:
4305 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4306 break;
4307 default:
4308 return -EINVAL;
4309 }
4310
4311 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4312 if (ret) {
4313 dev_err(&hdev->pdev->dev,
4314 "Set rss tuple fail, status = %d\n", ret);
4315 return ret;
4316 }
4317
4318 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4319 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4320 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4321 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4322 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4323 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4324 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4325 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4326 hclge_get_rss_type(vport);
4327 return 0;
4328 }
4329
4330 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4331 struct ethtool_rxnfc *nfc)
4332 {
4333 struct hclge_vport *vport = hclge_get_vport(handle);
4334 u8 tuple_sets;
4335
4336 nfc->data = 0;
4337
4338 switch (nfc->flow_type) {
4339 case TCP_V4_FLOW:
4340 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4341 break;
4342 case UDP_V4_FLOW:
4343 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4344 break;
4345 case TCP_V6_FLOW:
4346 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4347 break;
4348 case UDP_V6_FLOW:
4349 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4350 break;
4351 case SCTP_V4_FLOW:
4352 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4353 break;
4354 case SCTP_V6_FLOW:
4355 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4356 break;
4357 case IPV4_FLOW:
4358 case IPV6_FLOW:
4359 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4360 break;
4361 default:
4362 return -EINVAL;
4363 }
4364
4365 if (!tuple_sets)
4366 return 0;
4367
4368 if (tuple_sets & HCLGE_D_PORT_BIT)
4369 nfc->data |= RXH_L4_B_2_3;
4370 if (tuple_sets & HCLGE_S_PORT_BIT)
4371 nfc->data |= RXH_L4_B_0_1;
4372 if (tuple_sets & HCLGE_D_IP_BIT)
4373 nfc->data |= RXH_IP_DST;
4374 if (tuple_sets & HCLGE_S_IP_BIT)
4375 nfc->data |= RXH_IP_SRC;
4376
4377 return 0;
4378 }
4379
4380 static int hclge_get_tc_size(struct hnae3_handle *handle)
4381 {
4382 struct hclge_vport *vport = hclge_get_vport(handle);
4383 struct hclge_dev *hdev = vport->back;
4384
4385 return hdev->rss_size_max;
4386 }
4387
4388 int hclge_rss_init_hw(struct hclge_dev *hdev)
4389 {
4390 struct hclge_vport *vport = hdev->vport;
4391 u8 *rss_indir = vport[0].rss_indirection_tbl;
4392 u16 rss_size = vport[0].alloc_rss_size;
4393 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4394 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4395 u8 *key = vport[0].rss_hash_key;
4396 u8 hfunc = vport[0].rss_algo;
4397 u16 tc_valid[HCLGE_MAX_TC_NUM];
4398 u16 roundup_size;
4399 unsigned int i;
4400 int ret;
4401
4402 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4403 if (ret)
4404 return ret;
4405
4406 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4407 if (ret)
4408 return ret;
4409
4410 ret = hclge_set_rss_input_tuple(hdev);
4411 if (ret)
4412 return ret;
4413
4414
4415
4416
4417
4418 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4419 dev_err(&hdev->pdev->dev,
4420 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4421 rss_size);
4422 return -EINVAL;
4423 }
4424
4425 roundup_size = roundup_pow_of_two(rss_size);
4426 roundup_size = ilog2(roundup_size);
4427
4428 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4429 tc_valid[i] = 0;
4430
4431 if (!(hdev->hw_tc_map & BIT(i)))
4432 continue;
4433
4434 tc_valid[i] = 1;
4435 tc_size[i] = roundup_size;
4436 tc_offset[i] = rss_size * i;
4437 }
4438
4439 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4440 }
4441
4442 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4443 {
4444 struct hclge_vport *vport = hdev->vport;
4445 int i, j;
4446
4447 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4448 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4449 vport[j].rss_indirection_tbl[i] =
4450 i % vport[j].alloc_rss_size;
4451 }
4452 }
4453
4454 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4455 {
4456 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4457 struct hclge_vport *vport = hdev->vport;
4458
4459 if (hdev->pdev->revision >= 0x21)
4460 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4461
4462 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4463 vport[i].rss_tuple_sets.ipv4_tcp_en =
4464 HCLGE_RSS_INPUT_TUPLE_OTHER;
4465 vport[i].rss_tuple_sets.ipv4_udp_en =
4466 HCLGE_RSS_INPUT_TUPLE_OTHER;
4467 vport[i].rss_tuple_sets.ipv4_sctp_en =
4468 HCLGE_RSS_INPUT_TUPLE_SCTP;
4469 vport[i].rss_tuple_sets.ipv4_fragment_en =
4470 HCLGE_RSS_INPUT_TUPLE_OTHER;
4471 vport[i].rss_tuple_sets.ipv6_tcp_en =
4472 HCLGE_RSS_INPUT_TUPLE_OTHER;
4473 vport[i].rss_tuple_sets.ipv6_udp_en =
4474 HCLGE_RSS_INPUT_TUPLE_OTHER;
4475 vport[i].rss_tuple_sets.ipv6_sctp_en =
4476 HCLGE_RSS_INPUT_TUPLE_SCTP;
4477 vport[i].rss_tuple_sets.ipv6_fragment_en =
4478 HCLGE_RSS_INPUT_TUPLE_OTHER;
4479
4480 vport[i].rss_algo = rss_algo;
4481
4482 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4483 HCLGE_RSS_KEY_SIZE);
4484 }
4485
4486 hclge_rss_indir_init_cfg(hdev);
4487 }
4488
4489 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4490 int vector_id, bool en,
4491 struct hnae3_ring_chain_node *ring_chain)
4492 {
4493 struct hclge_dev *hdev = vport->back;
4494 struct hnae3_ring_chain_node *node;
4495 struct hclge_desc desc;
4496 struct hclge_ctrl_vector_chain_cmd *req =
4497 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4498 enum hclge_cmd_status status;
4499 enum hclge_opcode_type op;
4500 u16 tqp_type_and_id;
4501 int i;
4502
4503 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4504 hclge_cmd_setup_basic_desc(&desc, op, false);
4505 req->int_vector_id = vector_id;
4506
4507 i = 0;
4508 for (node = ring_chain; node; node = node->next) {
4509 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4510 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4511 HCLGE_INT_TYPE_S,
4512 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4513 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4514 HCLGE_TQP_ID_S, node->tqp_index);
4515 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4516 HCLGE_INT_GL_IDX_S,
4517 hnae3_get_field(node->int_gl_idx,
4518 HNAE3_RING_GL_IDX_M,
4519 HNAE3_RING_GL_IDX_S));
4520 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4521 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4522 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4523 req->vfid = vport->vport_id;
4524
4525 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4526 if (status) {
4527 dev_err(&hdev->pdev->dev,
4528 "Map TQP fail, status is %d.\n",
4529 status);
4530 return -EIO;
4531 }
4532 i = 0;
4533
4534 hclge_cmd_setup_basic_desc(&desc,
4535 op,
4536 false);
4537 req->int_vector_id = vector_id;
4538 }
4539 }
4540
4541 if (i > 0) {
4542 req->int_cause_num = i;
4543 req->vfid = vport->vport_id;
4544 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4545 if (status) {
4546 dev_err(&hdev->pdev->dev,
4547 "Map TQP fail, status is %d.\n", status);
4548 return -EIO;
4549 }
4550 }
4551
4552 return 0;
4553 }
4554
4555 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4556 struct hnae3_ring_chain_node *ring_chain)
4557 {
4558 struct hclge_vport *vport = hclge_get_vport(handle);
4559 struct hclge_dev *hdev = vport->back;
4560 int vector_id;
4561
4562 vector_id = hclge_get_vector_index(hdev, vector);
4563 if (vector_id < 0) {
4564 dev_err(&hdev->pdev->dev,
4565 "Get vector index fail. vector_id =%d\n", vector_id);
4566 return vector_id;
4567 }
4568
4569 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4570 }
4571
4572 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4573 struct hnae3_ring_chain_node *ring_chain)
4574 {
4575 struct hclge_vport *vport = hclge_get_vport(handle);
4576 struct hclge_dev *hdev = vport->back;
4577 int vector_id, ret;
4578
4579 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4580 return 0;
4581
4582 vector_id = hclge_get_vector_index(hdev, vector);
4583 if (vector_id < 0) {
4584 dev_err(&handle->pdev->dev,
4585 "Get vector index fail. ret =%d\n", vector_id);
4586 return vector_id;
4587 }
4588
4589 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4590 if (ret)
4591 dev_err(&handle->pdev->dev,
4592 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4593 vector_id, ret);
4594
4595 return ret;
4596 }
4597
4598 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4599 struct hclge_promisc_param *param)
4600 {
4601 struct hclge_promisc_cfg_cmd *req;
4602 struct hclge_desc desc;
4603 int ret;
4604
4605 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4606
4607 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4608 req->vf_id = param->vf_id;
4609
4610
4611
4612
4613
4614
4615 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4616 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4617
4618 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4619 if (ret)
4620 dev_err(&hdev->pdev->dev,
4621 "Set promisc mode fail, status is %d.\n", ret);
4622
4623 return ret;
4624 }
4625
4626 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4627 bool en_mc, bool en_bc, int vport_id)
4628 {
4629 if (!param)
4630 return;
4631
4632 memset(param, 0, sizeof(struct hclge_promisc_param));
4633 if (en_uc)
4634 param->enable = HCLGE_PROMISC_EN_UC;
4635 if (en_mc)
4636 param->enable |= HCLGE_PROMISC_EN_MC;
4637 if (en_bc)
4638 param->enable |= HCLGE_PROMISC_EN_BC;
4639 param->vf_id = vport_id;
4640 }
4641
4642 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4643 bool en_mc_pmc)
4644 {
4645 struct hclge_vport *vport = hclge_get_vport(handle);
4646 struct hclge_dev *hdev = vport->back;
4647 struct hclge_promisc_param param;
4648 bool en_bc_pmc = true;
4649
4650
4651
4652
4653
4654 if (handle->pdev->revision == 0x20)
4655 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4656
4657 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4658 vport->vport_id);
4659 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4660 }
4661
4662 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4663 {
4664 struct hclge_get_fd_mode_cmd *req;
4665 struct hclge_desc desc;
4666 int ret;
4667
4668 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4669
4670 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4671
4672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4673 if (ret) {
4674 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4675 return ret;
4676 }
4677
4678 *fd_mode = req->mode;
4679
4680 return ret;
4681 }
4682
4683 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4684 u32 *stage1_entry_num,
4685 u32 *stage2_entry_num,
4686 u16 *stage1_counter_num,
4687 u16 *stage2_counter_num)
4688 {
4689 struct hclge_get_fd_allocation_cmd *req;
4690 struct hclge_desc desc;
4691 int ret;
4692
4693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4694
4695 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4696
4697 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4698 if (ret) {
4699 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4700 ret);
4701 return ret;
4702 }
4703
4704 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4705 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4706 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4707 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4708
4709 return ret;
4710 }
4711
4712 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4713 {
4714 struct hclge_set_fd_key_config_cmd *req;
4715 struct hclge_fd_key_cfg *stage;
4716 struct hclge_desc desc;
4717 int ret;
4718
4719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4720
4721 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4722 stage = &hdev->fd_cfg.key_cfg[stage_num];
4723 req->stage = stage_num;
4724 req->key_select = stage->key_sel;
4725 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4726 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4727 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4728 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4729 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4730 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4731
4732 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4733 if (ret)
4734 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4735
4736 return ret;
4737 }
4738
4739 static int hclge_init_fd_config(struct hclge_dev *hdev)
4740 {
4741 #define LOW_2_WORDS 0x03
4742 struct hclge_fd_key_cfg *key_cfg;
4743 int ret;
4744
4745 if (!hnae3_dev_fd_supported(hdev))
4746 return 0;
4747
4748 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4749 if (ret)
4750 return ret;
4751
4752 switch (hdev->fd_cfg.fd_mode) {
4753 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4754 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4755 break;
4756 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4757 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4758 break;
4759 default:
4760 dev_err(&hdev->pdev->dev,
4761 "Unsupported flow director mode %d\n",
4762 hdev->fd_cfg.fd_mode);
4763 return -EOPNOTSUPP;
4764 }
4765
4766 hdev->fd_cfg.proto_support =
4767 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4768 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4769 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4770 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4771 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4772 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4773 key_cfg->outer_sipv6_word_en = 0;
4774 key_cfg->outer_dipv6_word_en = 0;
4775
4776 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4777 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4778 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4779 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4780
4781
4782 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4783 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4784 key_cfg->tuple_active |=
4785 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4786 }
4787
4788
4789
4790
4791 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4792
4793 ret = hclge_get_fd_allocation(hdev,
4794 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4795 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4796 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4797 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4798 if (ret)
4799 return ret;
4800
4801 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4802 }
4803
4804 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4805 int loc, u8 *key, bool is_add)
4806 {
4807 struct hclge_fd_tcam_config_1_cmd *req1;
4808 struct hclge_fd_tcam_config_2_cmd *req2;
4809 struct hclge_fd_tcam_config_3_cmd *req3;
4810 struct hclge_desc desc[3];
4811 int ret;
4812
4813 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4814 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4815 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4816 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4817 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4818
4819 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4820 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4821 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4822
4823 req1->stage = stage;
4824 req1->xy_sel = sel_x ? 1 : 0;
4825 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4826 req1->index = cpu_to_le32(loc);
4827 req1->entry_vld = sel_x ? is_add : 0;
4828
4829 if (key) {
4830 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4831 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4832 sizeof(req2->tcam_data));
4833 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4834 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4835 }
4836
4837 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4838 if (ret)
4839 dev_err(&hdev->pdev->dev,
4840 "config tcam key fail, ret=%d\n",
4841 ret);
4842
4843 return ret;
4844 }
4845
4846 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4847 struct hclge_fd_ad_data *action)
4848 {
4849 struct hclge_fd_ad_config_cmd *req;
4850 struct hclge_desc desc;
4851 u64 ad_data = 0;
4852 int ret;
4853
4854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4855
4856 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4857 req->index = cpu_to_le32(loc);
4858 req->stage = stage;
4859
4860 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4861 action->write_rule_id_to_bd);
4862 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4863 action->rule_id);
4864 ad_data <<= 32;
4865 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4866 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4867 action->forward_to_direct_queue);
4868 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4869 action->queue_id);
4870 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4871 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4872 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4873 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4874 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4875 action->counter_id);
4876
4877 req->ad_data = cpu_to_le64(ad_data);
4878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4879 if (ret)
4880 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4881
4882 return ret;
4883 }
4884
4885 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4886 struct hclge_fd_rule *rule)
4887 {
4888 u16 tmp_x_s, tmp_y_s;
4889 u32 tmp_x_l, tmp_y_l;
4890 int i;
4891
4892 if (rule->unused_tuple & tuple_bit)
4893 return true;
4894
4895 switch (tuple_bit) {
4896 case 0:
4897 return false;
4898 case BIT(INNER_DST_MAC):
4899 for (i = 0; i < ETH_ALEN; i++) {
4900 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4901 rule->tuples_mask.dst_mac[i]);
4902 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4903 rule->tuples_mask.dst_mac[i]);
4904 }
4905
4906 return true;
4907 case BIT(INNER_SRC_MAC):
4908 for (i = 0; i < ETH_ALEN; i++) {
4909 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4910 rule->tuples.src_mac[i]);
4911 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4912 rule->tuples.src_mac[i]);
4913 }
4914
4915 return true;
4916 case BIT(INNER_VLAN_TAG_FST):
4917 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4918 rule->tuples_mask.vlan_tag1);
4919 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4920 rule->tuples_mask.vlan_tag1);
4921 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4922 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4923
4924 return true;
4925 case BIT(INNER_ETH_TYPE):
4926 calc_x(tmp_x_s, rule->tuples.ether_proto,
4927 rule->tuples_mask.ether_proto);
4928 calc_y(tmp_y_s, rule->tuples.ether_proto,
4929 rule->tuples_mask.ether_proto);
4930 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4931 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4932
4933 return true;
4934 case BIT(INNER_IP_TOS):
4935 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4936 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4937
4938 return true;
4939 case BIT(INNER_IP_PROTO):
4940 calc_x(*key_x, rule->tuples.ip_proto,
4941 rule->tuples_mask.ip_proto);
4942 calc_y(*key_y, rule->tuples.ip_proto,
4943 rule->tuples_mask.ip_proto);
4944
4945 return true;
4946 case BIT(INNER_SRC_IP):
4947 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4948 rule->tuples_mask.src_ip[IPV4_INDEX]);
4949 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4950 rule->tuples_mask.src_ip[IPV4_INDEX]);
4951 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4952 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4953
4954 return true;
4955 case BIT(INNER_DST_IP):
4956 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4957 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4958 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4959 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4960 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4961 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4962
4963 return true;
4964 case BIT(INNER_SRC_PORT):
4965 calc_x(tmp_x_s, rule->tuples.src_port,
4966 rule->tuples_mask.src_port);
4967 calc_y(tmp_y_s, rule->tuples.src_port,
4968 rule->tuples_mask.src_port);
4969 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4970 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4971
4972 return true;
4973 case BIT(INNER_DST_PORT):
4974 calc_x(tmp_x_s, rule->tuples.dst_port,
4975 rule->tuples_mask.dst_port);
4976 calc_y(tmp_y_s, rule->tuples.dst_port,
4977 rule->tuples_mask.dst_port);
4978 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4979 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4980
4981 return true;
4982 default:
4983 return false;
4984 }
4985 }
4986
4987 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4988 u8 vf_id, u8 network_port_id)
4989 {
4990 u32 port_number = 0;
4991
4992 if (port_type == HOST_PORT) {
4993 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4994 pf_id);
4995 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4996 vf_id);
4997 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4998 } else {
4999 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5000 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5001 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5002 }
5003
5004 return port_number;
5005 }
5006
5007 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5008 __le32 *key_x, __le32 *key_y,
5009 struct hclge_fd_rule *rule)
5010 {
5011 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5012 u8 cur_pos = 0, tuple_size, shift_bits;
5013 unsigned int i;
5014
5015 for (i = 0; i < MAX_META_DATA; i++) {
5016 tuple_size = meta_data_key_info[i].key_length;
5017 tuple_bit = key_cfg->meta_data_active & BIT(i);
5018
5019 switch (tuple_bit) {
5020 case BIT(ROCE_TYPE):
5021 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5022 cur_pos += tuple_size;
5023 break;
5024 case BIT(DST_VPORT):
5025 port_number = hclge_get_port_number(HOST_PORT, 0,
5026 rule->vf_id, 0);
5027 hnae3_set_field(meta_data,
5028 GENMASK(cur_pos + tuple_size, cur_pos),
5029 cur_pos, port_number);
5030 cur_pos += tuple_size;
5031 break;
5032 default:
5033 break;
5034 }
5035 }
5036
5037 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5038 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5039 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5040
5041 *key_x = cpu_to_le32(tmp_x << shift_bits);
5042 *key_y = cpu_to_le32(tmp_y << shift_bits);
5043 }
5044
5045
5046
5047
5048
5049 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5050 struct hclge_fd_rule *rule)
5051 {
5052 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5053 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5054 u8 *cur_key_x, *cur_key_y;
5055 unsigned int i;
5056 int ret, tuple_size;
5057 u8 meta_data_region;
5058
5059 memset(key_x, 0, sizeof(key_x));
5060 memset(key_y, 0, sizeof(key_y));
5061 cur_key_x = key_x;
5062 cur_key_y = key_y;
5063
5064 for (i = 0 ; i < MAX_TUPLE; i++) {
5065 bool tuple_valid;
5066 u32 check_tuple;
5067
5068 tuple_size = tuple_key_info[i].key_length / 8;
5069 check_tuple = key_cfg->tuple_active & BIT(i);
5070
5071 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5072 cur_key_y, rule);
5073 if (tuple_valid) {
5074 cur_key_x += tuple_size;
5075 cur_key_y += tuple_size;
5076 }
5077 }
5078
5079 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5080 MAX_META_DATA_LENGTH / 8;
5081
5082 hclge_fd_convert_meta_data(key_cfg,
5083 (__le32 *)(key_x + meta_data_region),
5084 (__le32 *)(key_y + meta_data_region),
5085 rule);
5086
5087 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5088 true);
5089 if (ret) {
5090 dev_err(&hdev->pdev->dev,
5091 "fd key_y config fail, loc=%d, ret=%d\n",
5092 rule->queue_id, ret);
5093 return ret;
5094 }
5095
5096 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5097 true);
5098 if (ret)
5099 dev_err(&hdev->pdev->dev,
5100 "fd key_x config fail, loc=%d, ret=%d\n",
5101 rule->queue_id, ret);
5102 return ret;
5103 }
5104
5105 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5106 struct hclge_fd_rule *rule)
5107 {
5108 struct hclge_fd_ad_data ad_data;
5109
5110 ad_data.ad_id = rule->location;
5111
5112 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5113 ad_data.drop_packet = true;
5114 ad_data.forward_to_direct_queue = false;
5115 ad_data.queue_id = 0;
5116 } else {
5117 ad_data.drop_packet = false;
5118 ad_data.forward_to_direct_queue = true;
5119 ad_data.queue_id = rule->queue_id;
5120 }
5121
5122 ad_data.use_counter = false;
5123 ad_data.counter_id = 0;
5124
5125 ad_data.use_next_stage = false;
5126 ad_data.next_input_key = 0;
5127
5128 ad_data.write_rule_id_to_bd = true;
5129 ad_data.rule_id = rule->location;
5130
5131 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5132 }
5133
5134 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5135 struct ethtool_rx_flow_spec *fs, u32 *unused)
5136 {
5137 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5138 struct ethtool_usrip4_spec *usr_ip4_spec;
5139 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5140 struct ethtool_usrip6_spec *usr_ip6_spec;
5141 struct ethhdr *ether_spec;
5142
5143 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5144 return -EINVAL;
5145
5146 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5147 return -EOPNOTSUPP;
5148
5149 if ((fs->flow_type & FLOW_EXT) &&
5150 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5151 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5152 return -EOPNOTSUPP;
5153 }
5154
5155 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5156 case SCTP_V4_FLOW:
5157 case TCP_V4_FLOW:
5158 case UDP_V4_FLOW:
5159 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5160 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5161
5162 if (!tcp_ip4_spec->ip4src)
5163 *unused |= BIT(INNER_SRC_IP);
5164
5165 if (!tcp_ip4_spec->ip4dst)
5166 *unused |= BIT(INNER_DST_IP);
5167
5168 if (!tcp_ip4_spec->psrc)
5169 *unused |= BIT(INNER_SRC_PORT);
5170
5171 if (!tcp_ip4_spec->pdst)
5172 *unused |= BIT(INNER_DST_PORT);
5173
5174 if (!tcp_ip4_spec->tos)
5175 *unused |= BIT(INNER_IP_TOS);
5176
5177 break;
5178 case IP_USER_FLOW:
5179 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5180 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5181 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5182
5183 if (!usr_ip4_spec->ip4src)
5184 *unused |= BIT(INNER_SRC_IP);
5185
5186 if (!usr_ip4_spec->ip4dst)
5187 *unused |= BIT(INNER_DST_IP);
5188
5189 if (!usr_ip4_spec->tos)
5190 *unused |= BIT(INNER_IP_TOS);
5191
5192 if (!usr_ip4_spec->proto)
5193 *unused |= BIT(INNER_IP_PROTO);
5194
5195 if (usr_ip4_spec->l4_4_bytes)
5196 return -EOPNOTSUPP;
5197
5198 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5199 return -EOPNOTSUPP;
5200
5201 break;
5202 case SCTP_V6_FLOW:
5203 case TCP_V6_FLOW:
5204 case UDP_V6_FLOW:
5205 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5206 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5207 BIT(INNER_IP_TOS);
5208
5209
5210 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5211 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5212 *unused |= BIT(INNER_SRC_IP);
5213
5214 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5215 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5216 *unused |= BIT(INNER_DST_IP);
5217
5218 if (!tcp_ip6_spec->psrc)
5219 *unused |= BIT(INNER_SRC_PORT);
5220
5221 if (!tcp_ip6_spec->pdst)
5222 *unused |= BIT(INNER_DST_PORT);
5223
5224 if (tcp_ip6_spec->tclass)
5225 return -EOPNOTSUPP;
5226
5227 break;
5228 case IPV6_USER_FLOW:
5229 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5230 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5231 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5232 BIT(INNER_DST_PORT);
5233
5234
5235 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5236 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5237 *unused |= BIT(INNER_SRC_IP);
5238
5239 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5240 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5241 *unused |= BIT(INNER_DST_IP);
5242
5243 if (!usr_ip6_spec->l4_proto)
5244 *unused |= BIT(INNER_IP_PROTO);
5245
5246 if (usr_ip6_spec->tclass)
5247 return -EOPNOTSUPP;
5248
5249 if (usr_ip6_spec->l4_4_bytes)
5250 return -EOPNOTSUPP;
5251
5252 break;
5253 case ETHER_FLOW:
5254 ether_spec = &fs->h_u.ether_spec;
5255 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5256 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5257 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5258
5259 if (is_zero_ether_addr(ether_spec->h_source))
5260 *unused |= BIT(INNER_SRC_MAC);
5261
5262 if (is_zero_ether_addr(ether_spec->h_dest))
5263 *unused |= BIT(INNER_DST_MAC);
5264
5265 if (!ether_spec->h_proto)
5266 *unused |= BIT(INNER_ETH_TYPE);
5267
5268 break;
5269 default:
5270 return -EOPNOTSUPP;
5271 }
5272
5273 if ((fs->flow_type & FLOW_EXT)) {
5274 if (fs->h_ext.vlan_etype)
5275 return -EOPNOTSUPP;
5276 if (!fs->h_ext.vlan_tci)
5277 *unused |= BIT(INNER_VLAN_TAG_FST);
5278
5279 if (fs->m_ext.vlan_tci) {
5280 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5281 return -EINVAL;
5282 }
5283 } else {
5284 *unused |= BIT(INNER_VLAN_TAG_FST);
5285 }
5286
5287 if (fs->flow_type & FLOW_MAC_EXT) {
5288 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5289 return -EOPNOTSUPP;
5290
5291 if (is_zero_ether_addr(fs->h_ext.h_dest))
5292 *unused |= BIT(INNER_DST_MAC);
5293 else
5294 *unused &= ~(BIT(INNER_DST_MAC));
5295 }
5296
5297 return 0;
5298 }
5299
5300 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5301 {
5302 struct hclge_fd_rule *rule = NULL;
5303 struct hlist_node *node2;
5304
5305 spin_lock_bh(&hdev->fd_rule_lock);
5306 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5307 if (rule->location >= location)
5308 break;
5309 }
5310
5311 spin_unlock_bh(&hdev->fd_rule_lock);
5312
5313 return rule && rule->location == location;
5314 }
5315
5316
5317 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5318 struct hclge_fd_rule *new_rule,
5319 u16 location,
5320 bool is_add)
5321 {
5322 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5323 struct hlist_node *node2;
5324
5325 if (is_add && !new_rule)
5326 return -EINVAL;
5327
5328 hlist_for_each_entry_safe(rule, node2,
5329 &hdev->fd_rule_list, rule_node) {
5330 if (rule->location >= location)
5331 break;
5332 parent = rule;
5333 }
5334
5335 if (rule && rule->location == location) {
5336 hlist_del(&rule->rule_node);
5337 kfree(rule);
5338 hdev->hclge_fd_rule_num--;
5339
5340 if (!is_add) {
5341 if (!hdev->hclge_fd_rule_num)
5342 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5343 clear_bit(location, hdev->fd_bmap);
5344
5345 return 0;
5346 }
5347 } else if (!is_add) {
5348 dev_err(&hdev->pdev->dev,
5349 "delete fail, rule %d is inexistent\n",
5350 location);
5351 return -EINVAL;
5352 }
5353
5354 INIT_HLIST_NODE(&new_rule->rule_node);
5355
5356 if (parent)
5357 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5358 else
5359 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5360
5361 set_bit(location, hdev->fd_bmap);
5362 hdev->hclge_fd_rule_num++;
5363 hdev->fd_active_type = new_rule->rule_type;
5364
5365 return 0;
5366 }
5367
5368 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5369 struct ethtool_rx_flow_spec *fs,
5370 struct hclge_fd_rule *rule)
5371 {
5372 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5373
5374 switch (flow_type) {
5375 case SCTP_V4_FLOW:
5376 case TCP_V4_FLOW:
5377 case UDP_V4_FLOW:
5378 rule->tuples.src_ip[IPV4_INDEX] =
5379 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5380 rule->tuples_mask.src_ip[IPV4_INDEX] =
5381 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5382
5383 rule->tuples.dst_ip[IPV4_INDEX] =
5384 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5385 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5386 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5387
5388 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5389 rule->tuples_mask.src_port =
5390 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5391
5392 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5393 rule->tuples_mask.dst_port =
5394 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5395
5396 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5397 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5398
5399 rule->tuples.ether_proto = ETH_P_IP;
5400 rule->tuples_mask.ether_proto = 0xFFFF;
5401
5402 break;
5403 case IP_USER_FLOW:
5404 rule->tuples.src_ip[IPV4_INDEX] =
5405 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5406 rule->tuples_mask.src_ip[IPV4_INDEX] =
5407 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5408
5409 rule->tuples.dst_ip[IPV4_INDEX] =
5410 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5411 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5412 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5413
5414 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5415 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5416
5417 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5418 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5419
5420 rule->tuples.ether_proto = ETH_P_IP;
5421 rule->tuples_mask.ether_proto = 0xFFFF;
5422
5423 break;
5424 case SCTP_V6_FLOW:
5425 case TCP_V6_FLOW:
5426 case UDP_V6_FLOW:
5427 be32_to_cpu_array(rule->tuples.src_ip,
5428 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5429 be32_to_cpu_array(rule->tuples_mask.src_ip,
5430 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5431
5432 be32_to_cpu_array(rule->tuples.dst_ip,
5433 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5434 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5435 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5436
5437 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5438 rule->tuples_mask.src_port =
5439 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5440
5441 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5442 rule->tuples_mask.dst_port =
5443 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5444
5445 rule->tuples.ether_proto = ETH_P_IPV6;
5446 rule->tuples_mask.ether_proto = 0xFFFF;
5447
5448 break;
5449 case IPV6_USER_FLOW:
5450 be32_to_cpu_array(rule->tuples.src_ip,
5451 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5452 be32_to_cpu_array(rule->tuples_mask.src_ip,
5453 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5454
5455 be32_to_cpu_array(rule->tuples.dst_ip,
5456 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5457 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5458 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5459
5460 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5461 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5462
5463 rule->tuples.ether_proto = ETH_P_IPV6;
5464 rule->tuples_mask.ether_proto = 0xFFFF;
5465
5466 break;
5467 case ETHER_FLOW:
5468 ether_addr_copy(rule->tuples.src_mac,
5469 fs->h_u.ether_spec.h_source);
5470 ether_addr_copy(rule->tuples_mask.src_mac,
5471 fs->m_u.ether_spec.h_source);
5472
5473 ether_addr_copy(rule->tuples.dst_mac,
5474 fs->h_u.ether_spec.h_dest);
5475 ether_addr_copy(rule->tuples_mask.dst_mac,
5476 fs->m_u.ether_spec.h_dest);
5477
5478 rule->tuples.ether_proto =
5479 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5480 rule->tuples_mask.ether_proto =
5481 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5482
5483 break;
5484 default:
5485 return -EOPNOTSUPP;
5486 }
5487
5488 switch (flow_type) {
5489 case SCTP_V4_FLOW:
5490 case SCTP_V6_FLOW:
5491 rule->tuples.ip_proto = IPPROTO_SCTP;
5492 rule->tuples_mask.ip_proto = 0xFF;
5493 break;
5494 case TCP_V4_FLOW:
5495 case TCP_V6_FLOW:
5496 rule->tuples.ip_proto = IPPROTO_TCP;
5497 rule->tuples_mask.ip_proto = 0xFF;
5498 break;
5499 case UDP_V4_FLOW:
5500 case UDP_V6_FLOW:
5501 rule->tuples.ip_proto = IPPROTO_UDP;
5502 rule->tuples_mask.ip_proto = 0xFF;
5503 break;
5504 default:
5505 break;
5506 }
5507
5508 if ((fs->flow_type & FLOW_EXT)) {
5509 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5510 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5511 }
5512
5513 if (fs->flow_type & FLOW_MAC_EXT) {
5514 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5515 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5516 }
5517
5518 return 0;
5519 }
5520
5521
5522 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5523 struct hclge_fd_rule *rule)
5524 {
5525 int ret;
5526
5527 if (!rule) {
5528 dev_err(&hdev->pdev->dev,
5529 "The flow director rule is NULL\n");
5530 return -EINVAL;
5531 }
5532
5533
5534 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5535
5536 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5537 if (ret)
5538 goto clear_rule;
5539
5540 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5541 if (ret)
5542 goto clear_rule;
5543
5544 return 0;
5545
5546 clear_rule:
5547 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5548 return ret;
5549 }
5550
5551 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5552 struct ethtool_rxnfc *cmd)
5553 {
5554 struct hclge_vport *vport = hclge_get_vport(handle);
5555 struct hclge_dev *hdev = vport->back;
5556 u16 dst_vport_id = 0, q_index = 0;
5557 struct ethtool_rx_flow_spec *fs;
5558 struct hclge_fd_rule *rule;
5559 u32 unused = 0;
5560 u8 action;
5561 int ret;
5562
5563 if (!hnae3_dev_fd_supported(hdev))
5564 return -EOPNOTSUPP;
5565
5566 if (!hdev->fd_en) {
5567 dev_warn(&hdev->pdev->dev,
5568 "Please enable flow director first\n");
5569 return -EOPNOTSUPP;
5570 }
5571
5572 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5573
5574 ret = hclge_fd_check_spec(hdev, fs, &unused);
5575 if (ret) {
5576 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5577 return ret;
5578 }
5579
5580 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5581 action = HCLGE_FD_ACTION_DROP_PACKET;
5582 } else {
5583 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5584 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5585 u16 tqps;
5586
5587 if (vf > hdev->num_req_vfs) {
5588 dev_err(&hdev->pdev->dev,
5589 "Error: vf id (%d) > max vf num (%d)\n",
5590 vf, hdev->num_req_vfs);
5591 return -EINVAL;
5592 }
5593
5594 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5595 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5596
5597 if (ring >= tqps) {
5598 dev_err(&hdev->pdev->dev,
5599 "Error: queue id (%d) > max tqp num (%d)\n",
5600 ring, tqps - 1);
5601 return -EINVAL;
5602 }
5603
5604 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5605 q_index = ring;
5606 }
5607
5608 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5609 if (!rule)
5610 return -ENOMEM;
5611
5612 ret = hclge_fd_get_tuple(hdev, fs, rule);
5613 if (ret) {
5614 kfree(rule);
5615 return ret;
5616 }
5617
5618 rule->flow_type = fs->flow_type;
5619
5620 rule->location = fs->location;
5621 rule->unused_tuple = unused;
5622 rule->vf_id = dst_vport_id;
5623 rule->queue_id = q_index;
5624 rule->action = action;
5625 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5626
5627
5628
5629
5630 hclge_clear_arfs_rules(handle);
5631
5632 spin_lock_bh(&hdev->fd_rule_lock);
5633 ret = hclge_fd_config_rule(hdev, rule);
5634
5635 spin_unlock_bh(&hdev->fd_rule_lock);
5636
5637 return ret;
5638 }
5639
5640 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5641 struct ethtool_rxnfc *cmd)
5642 {
5643 struct hclge_vport *vport = hclge_get_vport(handle);
5644 struct hclge_dev *hdev = vport->back;
5645 struct ethtool_rx_flow_spec *fs;
5646 int ret;
5647
5648 if (!hnae3_dev_fd_supported(hdev))
5649 return -EOPNOTSUPP;
5650
5651 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5652
5653 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5654 return -EINVAL;
5655
5656 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5657 dev_err(&hdev->pdev->dev,
5658 "Delete fail, rule %d is inexistent\n", fs->location);
5659 return -ENOENT;
5660 }
5661
5662 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5663 NULL, false);
5664 if (ret)
5665 return ret;
5666
5667 spin_lock_bh(&hdev->fd_rule_lock);
5668 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5669
5670 spin_unlock_bh(&hdev->fd_rule_lock);
5671
5672 return ret;
5673 }
5674
5675 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5676 bool clear_list)
5677 {
5678 struct hclge_vport *vport = hclge_get_vport(handle);
5679 struct hclge_dev *hdev = vport->back;
5680 struct hclge_fd_rule *rule;
5681 struct hlist_node *node;
5682 u16 location;
5683
5684 if (!hnae3_dev_fd_supported(hdev))
5685 return;
5686
5687 spin_lock_bh(&hdev->fd_rule_lock);
5688 for_each_set_bit(location, hdev->fd_bmap,
5689 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5690 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5691 NULL, false);
5692
5693 if (clear_list) {
5694 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5695 rule_node) {
5696 hlist_del(&rule->rule_node);
5697 kfree(rule);
5698 }
5699 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5700 hdev->hclge_fd_rule_num = 0;
5701 bitmap_zero(hdev->fd_bmap,
5702 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5703 }
5704
5705 spin_unlock_bh(&hdev->fd_rule_lock);
5706 }
5707
5708 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5709 {
5710 struct hclge_vport *vport = hclge_get_vport(handle);
5711 struct hclge_dev *hdev = vport->back;
5712 struct hclge_fd_rule *rule;
5713 struct hlist_node *node;
5714 int ret;
5715
5716
5717
5718
5719
5720 if (!hnae3_dev_fd_supported(hdev))
5721 return 0;
5722
5723
5724 if (!hdev->fd_en)
5725 return 0;
5726
5727 spin_lock_bh(&hdev->fd_rule_lock);
5728 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5729 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5730 if (!ret)
5731 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5732
5733 if (ret) {
5734 dev_warn(&hdev->pdev->dev,
5735 "Restore rule %d failed, remove it\n",
5736 rule->location);
5737 clear_bit(rule->location, hdev->fd_bmap);
5738 hlist_del(&rule->rule_node);
5739 kfree(rule);
5740 hdev->hclge_fd_rule_num--;
5741 }
5742 }
5743
5744 if (hdev->hclge_fd_rule_num)
5745 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5746
5747 spin_unlock_bh(&hdev->fd_rule_lock);
5748
5749 return 0;
5750 }
5751
5752 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5753 struct ethtool_rxnfc *cmd)
5754 {
5755 struct hclge_vport *vport = hclge_get_vport(handle);
5756 struct hclge_dev *hdev = vport->back;
5757
5758 if (!hnae3_dev_fd_supported(hdev))
5759 return -EOPNOTSUPP;
5760
5761 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5762 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5763
5764 return 0;
5765 }
5766
5767 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5768 struct ethtool_rxnfc *cmd)
5769 {
5770 struct hclge_vport *vport = hclge_get_vport(handle);
5771 struct hclge_fd_rule *rule = NULL;
5772 struct hclge_dev *hdev = vport->back;
5773 struct ethtool_rx_flow_spec *fs;
5774 struct hlist_node *node2;
5775
5776 if (!hnae3_dev_fd_supported(hdev))
5777 return -EOPNOTSUPP;
5778
5779 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5780
5781 spin_lock_bh(&hdev->fd_rule_lock);
5782
5783 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5784 if (rule->location >= fs->location)
5785 break;
5786 }
5787
5788 if (!rule || fs->location != rule->location) {
5789 spin_unlock_bh(&hdev->fd_rule_lock);
5790
5791 return -ENOENT;
5792 }
5793
5794 fs->flow_type = rule->flow_type;
5795 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5796 case SCTP_V4_FLOW:
5797 case TCP_V4_FLOW:
5798 case UDP_V4_FLOW:
5799 fs->h_u.tcp_ip4_spec.ip4src =
5800 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5801 fs->m_u.tcp_ip4_spec.ip4src =
5802 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5803 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5804
5805 fs->h_u.tcp_ip4_spec.ip4dst =
5806 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5807 fs->m_u.tcp_ip4_spec.ip4dst =
5808 rule->unused_tuple & BIT(INNER_DST_IP) ?
5809 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5810
5811 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5812 fs->m_u.tcp_ip4_spec.psrc =
5813 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5814 0 : cpu_to_be16(rule->tuples_mask.src_port);
5815
5816 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5817 fs->m_u.tcp_ip4_spec.pdst =
5818 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5819 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5820
5821 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5822 fs->m_u.tcp_ip4_spec.tos =
5823 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5824 0 : rule->tuples_mask.ip_tos;
5825
5826 break;
5827 case IP_USER_FLOW:
5828 fs->h_u.usr_ip4_spec.ip4src =
5829 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5830 fs->m_u.tcp_ip4_spec.ip4src =
5831 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5832 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5833
5834 fs->h_u.usr_ip4_spec.ip4dst =
5835 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5836 fs->m_u.usr_ip4_spec.ip4dst =
5837 rule->unused_tuple & BIT(INNER_DST_IP) ?
5838 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5839
5840 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5841 fs->m_u.usr_ip4_spec.tos =
5842 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5843 0 : rule->tuples_mask.ip_tos;
5844
5845 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5846 fs->m_u.usr_ip4_spec.proto =
5847 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5848 0 : rule->tuples_mask.ip_proto;
5849
5850 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5851
5852 break;
5853 case SCTP_V6_FLOW:
5854 case TCP_V6_FLOW:
5855 case UDP_V6_FLOW:
5856 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5857 rule->tuples.src_ip, IPV6_SIZE);
5858 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5859 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5860 sizeof(int) * IPV6_SIZE);
5861 else
5862 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5863 rule->tuples_mask.src_ip, IPV6_SIZE);
5864
5865 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5866 rule->tuples.dst_ip, IPV6_SIZE);
5867 if (rule->unused_tuple & BIT(INNER_DST_IP))
5868 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5869 sizeof(int) * IPV6_SIZE);
5870 else
5871 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5872 rule->tuples_mask.dst_ip, IPV6_SIZE);
5873
5874 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5875 fs->m_u.tcp_ip6_spec.psrc =
5876 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5877 0 : cpu_to_be16(rule->tuples_mask.src_port);
5878
5879 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5880 fs->m_u.tcp_ip6_spec.pdst =
5881 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5882 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5883
5884 break;
5885 case IPV6_USER_FLOW:
5886 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5887 rule->tuples.src_ip, IPV6_SIZE);
5888 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5889 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5890 sizeof(int) * IPV6_SIZE);
5891 else
5892 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5893 rule->tuples_mask.src_ip, IPV6_SIZE);
5894
5895 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5896 rule->tuples.dst_ip, IPV6_SIZE);
5897 if (rule->unused_tuple & BIT(INNER_DST_IP))
5898 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5899 sizeof(int) * IPV6_SIZE);
5900 else
5901 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5902 rule->tuples_mask.dst_ip, IPV6_SIZE);
5903
5904 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5905 fs->m_u.usr_ip6_spec.l4_proto =
5906 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5907 0 : rule->tuples_mask.ip_proto;
5908
5909 break;
5910 case ETHER_FLOW:
5911 ether_addr_copy(fs->h_u.ether_spec.h_source,
5912 rule->tuples.src_mac);
5913 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5914 eth_zero_addr(fs->m_u.ether_spec.h_source);
5915 else
5916 ether_addr_copy(fs->m_u.ether_spec.h_source,
5917 rule->tuples_mask.src_mac);
5918
5919 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5920 rule->tuples.dst_mac);
5921 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5922 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5923 else
5924 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5925 rule->tuples_mask.dst_mac);
5926
5927 fs->h_u.ether_spec.h_proto =
5928 cpu_to_be16(rule->tuples.ether_proto);
5929 fs->m_u.ether_spec.h_proto =
5930 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5931 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5932
5933 break;
5934 default:
5935 spin_unlock_bh(&hdev->fd_rule_lock);
5936 return -EOPNOTSUPP;
5937 }
5938
5939 if (fs->flow_type & FLOW_EXT) {
5940 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5941 fs->m_ext.vlan_tci =
5942 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5943 cpu_to_be16(VLAN_VID_MASK) :
5944 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5945 }
5946
5947 if (fs->flow_type & FLOW_MAC_EXT) {
5948 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5949 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5950 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5951 else
5952 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5953 rule->tuples_mask.dst_mac);
5954 }
5955
5956 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5957 fs->ring_cookie = RX_CLS_FLOW_DISC;
5958 } else {
5959 u64 vf_id;
5960
5961 fs->ring_cookie = rule->queue_id;
5962 vf_id = rule->vf_id;
5963 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5964 fs->ring_cookie |= vf_id;
5965 }
5966
5967 spin_unlock_bh(&hdev->fd_rule_lock);
5968
5969 return 0;
5970 }
5971
5972 static int hclge_get_all_rules(struct hnae3_handle *handle,
5973 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5974 {
5975 struct hclge_vport *vport = hclge_get_vport(handle);
5976 struct hclge_dev *hdev = vport->back;
5977 struct hclge_fd_rule *rule;
5978 struct hlist_node *node2;
5979 int cnt = 0;
5980
5981 if (!hnae3_dev_fd_supported(hdev))
5982 return -EOPNOTSUPP;
5983
5984 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5985
5986 spin_lock_bh(&hdev->fd_rule_lock);
5987 hlist_for_each_entry_safe(rule, node2,
5988 &hdev->fd_rule_list, rule_node) {
5989 if (cnt == cmd->rule_cnt) {
5990 spin_unlock_bh(&hdev->fd_rule_lock);
5991 return -EMSGSIZE;
5992 }
5993
5994 rule_locs[cnt] = rule->location;
5995 cnt++;
5996 }
5997
5998 spin_unlock_bh(&hdev->fd_rule_lock);
5999
6000 cmd->rule_cnt = cnt;
6001
6002 return 0;
6003 }
6004
6005 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6006 struct hclge_fd_rule_tuples *tuples)
6007 {
6008 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6009 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6010
6011 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6012 tuples->ip_proto = fkeys->basic.ip_proto;
6013 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6014
6015 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6016 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6017 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6018 } else {
6019 int i;
6020
6021 for (i = 0; i < IPV6_SIZE; i++) {
6022 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6023 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6024 }
6025 }
6026 }
6027
6028
6029 static struct hclge_fd_rule *
6030 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6031 const struct hclge_fd_rule_tuples *tuples)
6032 {
6033 struct hclge_fd_rule *rule = NULL;
6034 struct hlist_node *node;
6035
6036 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6037 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6038 return rule;
6039 }
6040
6041 return NULL;
6042 }
6043
6044 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6045 struct hclge_fd_rule *rule)
6046 {
6047 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6048 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6049 BIT(INNER_SRC_PORT);
6050 rule->action = 0;
6051 rule->vf_id = 0;
6052 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6053 if (tuples->ether_proto == ETH_P_IP) {
6054 if (tuples->ip_proto == IPPROTO_TCP)
6055 rule->flow_type = TCP_V4_FLOW;
6056 else
6057 rule->flow_type = UDP_V4_FLOW;
6058 } else {
6059 if (tuples->ip_proto == IPPROTO_TCP)
6060 rule->flow_type = TCP_V6_FLOW;
6061 else
6062 rule->flow_type = UDP_V6_FLOW;
6063 }
6064 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6065 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6066 }
6067
6068 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6069 u16 flow_id, struct flow_keys *fkeys)
6070 {
6071 struct hclge_vport *vport = hclge_get_vport(handle);
6072 struct hclge_fd_rule_tuples new_tuples;
6073 struct hclge_dev *hdev = vport->back;
6074 struct hclge_fd_rule *rule;
6075 u16 tmp_queue_id;
6076 u16 bit_id;
6077 int ret;
6078
6079 if (!hnae3_dev_fd_supported(hdev))
6080 return -EOPNOTSUPP;
6081
6082 memset(&new_tuples, 0, sizeof(new_tuples));
6083 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6084
6085 spin_lock_bh(&hdev->fd_rule_lock);
6086
6087
6088
6089
6090 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6091 spin_unlock_bh(&hdev->fd_rule_lock);
6092
6093 return -EOPNOTSUPP;
6094 }
6095
6096
6097
6098
6099
6100
6101 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6102 if (!rule) {
6103 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6104 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6105 spin_unlock_bh(&hdev->fd_rule_lock);
6106
6107 return -ENOSPC;
6108 }
6109
6110 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6111 if (!rule) {
6112 spin_unlock_bh(&hdev->fd_rule_lock);
6113
6114 return -ENOMEM;
6115 }
6116
6117 set_bit(bit_id, hdev->fd_bmap);
6118 rule->location = bit_id;
6119 rule->flow_id = flow_id;
6120 rule->queue_id = queue_id;
6121 hclge_fd_build_arfs_rule(&new_tuples, rule);
6122 ret = hclge_fd_config_rule(hdev, rule);
6123
6124 spin_unlock_bh(&hdev->fd_rule_lock);
6125
6126 if (ret)
6127 return ret;
6128
6129 return rule->location;
6130 }
6131
6132 spin_unlock_bh(&hdev->fd_rule_lock);
6133
6134 if (rule->queue_id == queue_id)
6135 return rule->location;
6136
6137 tmp_queue_id = rule->queue_id;
6138 rule->queue_id = queue_id;
6139 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6140 if (ret) {
6141 rule->queue_id = tmp_queue_id;
6142 return ret;
6143 }
6144
6145 return rule->location;
6146 }
6147
6148 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6149 {
6150 #ifdef CONFIG_RFS_ACCEL
6151 struct hnae3_handle *handle = &hdev->vport[0].nic;
6152 struct hclge_fd_rule *rule;
6153 struct hlist_node *node;
6154 HLIST_HEAD(del_list);
6155
6156 spin_lock_bh(&hdev->fd_rule_lock);
6157 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6158 spin_unlock_bh(&hdev->fd_rule_lock);
6159 return;
6160 }
6161 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6162 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6163 rule->flow_id, rule->location)) {
6164 hlist_del_init(&rule->rule_node);
6165 hlist_add_head(&rule->rule_node, &del_list);
6166 hdev->hclge_fd_rule_num--;
6167 clear_bit(rule->location, hdev->fd_bmap);
6168 }
6169 }
6170 spin_unlock_bh(&hdev->fd_rule_lock);
6171
6172 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6173 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6174 rule->location, NULL, false);
6175 kfree(rule);
6176 }
6177 #endif
6178 }
6179
6180 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6181 {
6182 #ifdef CONFIG_RFS_ACCEL
6183 struct hclge_vport *vport = hclge_get_vport(handle);
6184 struct hclge_dev *hdev = vport->back;
6185
6186 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6187 hclge_del_all_fd_entries(handle, true);
6188 #endif
6189 }
6190
6191 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6192 {
6193 struct hclge_vport *vport = hclge_get_vport(handle);
6194 struct hclge_dev *hdev = vport->back;
6195
6196 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6197 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6198 }
6199
6200 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6201 {
6202 struct hclge_vport *vport = hclge_get_vport(handle);
6203 struct hclge_dev *hdev = vport->back;
6204
6205 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6206 }
6207
6208 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6209 {
6210 struct hclge_vport *vport = hclge_get_vport(handle);
6211 struct hclge_dev *hdev = vport->back;
6212
6213 return hdev->rst_stats.hw_reset_done_cnt;
6214 }
6215
6216 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6217 {
6218 struct hclge_vport *vport = hclge_get_vport(handle);
6219 struct hclge_dev *hdev = vport->back;
6220 bool clear;
6221
6222 hdev->fd_en = enable;
6223 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6224 if (!enable)
6225 hclge_del_all_fd_entries(handle, clear);
6226 else
6227 hclge_restore_fd_entries(handle);
6228 }
6229
6230 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6231 {
6232 struct hclge_desc desc;
6233 struct hclge_config_mac_mode_cmd *req =
6234 (struct hclge_config_mac_mode_cmd *)desc.data;
6235 u32 loop_en = 0;
6236 int ret;
6237
6238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6239
6240 if (enable) {
6241 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6242 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6243 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6244 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6245 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6246 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6247 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6248 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6249 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6250 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6251 }
6252
6253 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6254
6255 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6256 if (ret)
6257 dev_err(&hdev->pdev->dev,
6258 "mac enable fail, ret =%d.\n", ret);
6259 }
6260
6261 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6262 u8 switch_param, u8 param_mask)
6263 {
6264 struct hclge_mac_vlan_switch_cmd *req;
6265 struct hclge_desc desc;
6266 u32 func_id;
6267 int ret;
6268
6269 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6270 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6271
6272
6273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6274 true);
6275 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6276 req->func_id = cpu_to_le32(func_id);
6277
6278 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6279 if (ret) {
6280 dev_err(&hdev->pdev->dev,
6281 "read mac vlan switch parameter fail, ret = %d\n", ret);
6282 return ret;
6283 }
6284
6285
6286 hclge_cmd_reuse_desc(&desc, false);
6287 req->switch_param = (req->switch_param & param_mask) | switch_param;
6288 req->param_mask = param_mask;
6289
6290 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6291 if (ret)
6292 dev_err(&hdev->pdev->dev,
6293 "set mac vlan switch parameter fail, ret = %d\n", ret);
6294 return ret;
6295 }
6296
6297 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6298 int link_ret)
6299 {
6300 #define HCLGE_PHY_LINK_STATUS_NUM 200
6301
6302 struct phy_device *phydev = hdev->hw.mac.phydev;
6303 int i = 0;
6304 int ret;
6305
6306 do {
6307 ret = phy_read_status(phydev);
6308 if (ret) {
6309 dev_err(&hdev->pdev->dev,
6310 "phy update link status fail, ret = %d\n", ret);
6311 return;
6312 }
6313
6314 if (phydev->link == link_ret)
6315 break;
6316
6317 msleep(HCLGE_LINK_STATUS_MS);
6318 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6319 }
6320
6321 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6322 {
6323 #define HCLGE_MAC_LINK_STATUS_NUM 100
6324
6325 int i = 0;
6326 int ret;
6327
6328 do {
6329 ret = hclge_get_mac_link_status(hdev);
6330 if (ret < 0)
6331 return ret;
6332 else if (ret == link_ret)
6333 return 0;
6334
6335 msleep(HCLGE_LINK_STATUS_MS);
6336 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6337 return -EBUSY;
6338 }
6339
6340 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6341 bool is_phy)
6342 {
6343 #define HCLGE_LINK_STATUS_DOWN 0
6344 #define HCLGE_LINK_STATUS_UP 1
6345
6346 int link_ret;
6347
6348 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6349
6350 if (is_phy)
6351 hclge_phy_link_status_wait(hdev, link_ret);
6352
6353 return hclge_mac_link_status_wait(hdev, link_ret);
6354 }
6355
6356 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6357 {
6358 struct hclge_config_mac_mode_cmd *req;
6359 struct hclge_desc desc;
6360 u32 loop_en;
6361 int ret;
6362
6363 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6364
6365 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6367 if (ret) {
6368 dev_err(&hdev->pdev->dev,
6369 "mac loopback get fail, ret =%d.\n", ret);
6370 return ret;
6371 }
6372
6373
6374 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6375 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6376 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6377 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6378
6379 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6380
6381
6382
6383
6384 hclge_cmd_reuse_desc(&desc, false);
6385 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6386 if (ret)
6387 dev_err(&hdev->pdev->dev,
6388 "mac loopback set fail, ret =%d.\n", ret);
6389 return ret;
6390 }
6391
6392 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6393 enum hnae3_loop loop_mode)
6394 {
6395 #define HCLGE_SERDES_RETRY_MS 10
6396 #define HCLGE_SERDES_RETRY_NUM 100
6397
6398 struct hclge_serdes_lb_cmd *req;
6399 struct hclge_desc desc;
6400 int ret, i = 0;
6401 u8 loop_mode_b;
6402
6403 req = (struct hclge_serdes_lb_cmd *)desc.data;
6404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6405
6406 switch (loop_mode) {
6407 case HNAE3_LOOP_SERIAL_SERDES:
6408 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6409 break;
6410 case HNAE3_LOOP_PARALLEL_SERDES:
6411 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6412 break;
6413 default:
6414 dev_err(&hdev->pdev->dev,
6415 "unsupported serdes loopback mode %d\n", loop_mode);
6416 return -ENOTSUPP;
6417 }
6418
6419 if (en) {
6420 req->enable = loop_mode_b;
6421 req->mask = loop_mode_b;
6422 } else {
6423 req->mask = loop_mode_b;
6424 }
6425
6426 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6427 if (ret) {
6428 dev_err(&hdev->pdev->dev,
6429 "serdes loopback set fail, ret = %d\n", ret);
6430 return ret;
6431 }
6432
6433 do {
6434 msleep(HCLGE_SERDES_RETRY_MS);
6435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6436 true);
6437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6438 if (ret) {
6439 dev_err(&hdev->pdev->dev,
6440 "serdes loopback get, ret = %d\n", ret);
6441 return ret;
6442 }
6443 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6444 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6445
6446 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6447 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6448 return -EBUSY;
6449 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6450 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6451 return -EIO;
6452 }
6453 return ret;
6454 }
6455
6456 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6457 enum hnae3_loop loop_mode)
6458 {
6459 int ret;
6460
6461 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6462 if (ret)
6463 return ret;
6464
6465 hclge_cfg_mac_mode(hdev, en);
6466
6467 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6468 if (ret)
6469 dev_err(&hdev->pdev->dev,
6470 "serdes loopback config mac mode timeout\n");
6471
6472 return ret;
6473 }
6474
6475 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6476 struct phy_device *phydev)
6477 {
6478 int ret;
6479
6480 if (!phydev->suspended) {
6481 ret = phy_suspend(phydev);
6482 if (ret)
6483 return ret;
6484 }
6485
6486 ret = phy_resume(phydev);
6487 if (ret)
6488 return ret;
6489
6490 return phy_loopback(phydev, true);
6491 }
6492
6493 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6494 struct phy_device *phydev)
6495 {
6496 int ret;
6497
6498 ret = phy_loopback(phydev, false);
6499 if (ret)
6500 return ret;
6501
6502 return phy_suspend(phydev);
6503 }
6504
6505 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6506 {
6507 struct phy_device *phydev = hdev->hw.mac.phydev;
6508 int ret;
6509
6510 if (!phydev)
6511 return -ENOTSUPP;
6512
6513 if (en)
6514 ret = hclge_enable_phy_loopback(hdev, phydev);
6515 else
6516 ret = hclge_disable_phy_loopback(hdev, phydev);
6517 if (ret) {
6518 dev_err(&hdev->pdev->dev,
6519 "set phy loopback fail, ret = %d\n", ret);
6520 return ret;
6521 }
6522
6523 hclge_cfg_mac_mode(hdev, en);
6524
6525 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6526 if (ret)
6527 dev_err(&hdev->pdev->dev,
6528 "phy loopback config mac mode timeout\n");
6529
6530 return ret;
6531 }
6532
6533 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6534 int stream_id, bool enable)
6535 {
6536 struct hclge_desc desc;
6537 struct hclge_cfg_com_tqp_queue_cmd *req =
6538 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6539 int ret;
6540
6541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6542 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6543 req->stream_id = cpu_to_le16(stream_id);
6544 if (enable)
6545 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6546
6547 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6548 if (ret)
6549 dev_err(&hdev->pdev->dev,
6550 "Tqp enable fail, status =%d.\n", ret);
6551 return ret;
6552 }
6553
6554 static int hclge_set_loopback(struct hnae3_handle *handle,
6555 enum hnae3_loop loop_mode, bool en)
6556 {
6557 struct hclge_vport *vport = hclge_get_vport(handle);
6558 struct hnae3_knic_private_info *kinfo;
6559 struct hclge_dev *hdev = vport->back;
6560 int i, ret;
6561
6562
6563
6564
6565
6566
6567 if (hdev->pdev->revision >= 0x21) {
6568 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6569
6570 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6571 HCLGE_SWITCH_ALW_LPBK_MASK);
6572 if (ret)
6573 return ret;
6574 }
6575
6576 switch (loop_mode) {
6577 case HNAE3_LOOP_APP:
6578 ret = hclge_set_app_loopback(hdev, en);
6579 break;
6580 case HNAE3_LOOP_SERIAL_SERDES:
6581 case HNAE3_LOOP_PARALLEL_SERDES:
6582 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6583 break;
6584 case HNAE3_LOOP_PHY:
6585 ret = hclge_set_phy_loopback(hdev, en);
6586 break;
6587 default:
6588 ret = -ENOTSUPP;
6589 dev_err(&hdev->pdev->dev,
6590 "loop_mode %d is not supported\n", loop_mode);
6591 break;
6592 }
6593
6594 if (ret)
6595 return ret;
6596
6597 kinfo = &vport->nic.kinfo;
6598 for (i = 0; i < kinfo->num_tqps; i++) {
6599 ret = hclge_tqp_enable(hdev, i, 0, en);
6600 if (ret)
6601 return ret;
6602 }
6603
6604 return 0;
6605 }
6606
6607 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6608 {
6609 int ret;
6610
6611 ret = hclge_set_app_loopback(hdev, false);
6612 if (ret)
6613 return ret;
6614
6615 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6616 if (ret)
6617 return ret;
6618
6619 return hclge_cfg_serdes_loopback(hdev, false,
6620 HNAE3_LOOP_PARALLEL_SERDES);
6621 }
6622
6623 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6624 {
6625 struct hclge_vport *vport = hclge_get_vport(handle);
6626 struct hnae3_knic_private_info *kinfo;
6627 struct hnae3_queue *queue;
6628 struct hclge_tqp *tqp;
6629 int i;
6630
6631 kinfo = &vport->nic.kinfo;
6632 for (i = 0; i < kinfo->num_tqps; i++) {
6633 queue = handle->kinfo.tqp[i];
6634 tqp = container_of(queue, struct hclge_tqp, q);
6635 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6636 }
6637 }
6638
6639 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6640 {
6641 struct hclge_vport *vport = hclge_get_vport(handle);
6642 struct hclge_dev *hdev = vport->back;
6643
6644 if (enable) {
6645 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6646 } else {
6647
6648
6649
6650 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6651 cancel_delayed_work_sync(&hdev->service_task);
6652 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6653 }
6654 }
6655
6656 static int hclge_ae_start(struct hnae3_handle *handle)
6657 {
6658 struct hclge_vport *vport = hclge_get_vport(handle);
6659 struct hclge_dev *hdev = vport->back;
6660
6661
6662 hclge_cfg_mac_mode(hdev, true);
6663 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6664 hdev->hw.mac.link = 0;
6665
6666
6667 hclge_reset_tqp_stats(handle);
6668
6669 hclge_mac_start_phy(hdev);
6670
6671 return 0;
6672 }
6673
6674 static void hclge_ae_stop(struct hnae3_handle *handle)
6675 {
6676 struct hclge_vport *vport = hclge_get_vport(handle);
6677 struct hclge_dev *hdev = vport->back;
6678 int i;
6679
6680 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6681
6682 hclge_clear_arfs_rules(handle);
6683
6684
6685
6686
6687 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6688 hdev->reset_type != HNAE3_FUNC_RESET) {
6689 hclge_mac_stop_phy(hdev);
6690 hclge_update_link_status(hdev);
6691 return;
6692 }
6693
6694 for (i = 0; i < handle->kinfo.num_tqps; i++)
6695 hclge_reset_tqp(handle, i);
6696
6697 hclge_config_mac_tnl_int(hdev, false);
6698
6699
6700 hclge_cfg_mac_mode(hdev, false);
6701
6702 hclge_mac_stop_phy(hdev);
6703
6704
6705 hclge_reset_tqp_stats(handle);
6706 hclge_update_link_status(hdev);
6707 }
6708
6709 int hclge_vport_start(struct hclge_vport *vport)
6710 {
6711 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6712 vport->last_active_jiffies = jiffies;
6713 return 0;
6714 }
6715
6716 void hclge_vport_stop(struct hclge_vport *vport)
6717 {
6718 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6719 }
6720
6721 static int hclge_client_start(struct hnae3_handle *handle)
6722 {
6723 struct hclge_vport *vport = hclge_get_vport(handle);
6724
6725 return hclge_vport_start(vport);
6726 }
6727
6728 static void hclge_client_stop(struct hnae3_handle *handle)
6729 {
6730 struct hclge_vport *vport = hclge_get_vport(handle);
6731
6732 hclge_vport_stop(vport);
6733 }
6734
6735 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6736 u16 cmdq_resp, u8 resp_code,
6737 enum hclge_mac_vlan_tbl_opcode op)
6738 {
6739 struct hclge_dev *hdev = vport->back;
6740
6741 if (cmdq_resp) {
6742 dev_err(&hdev->pdev->dev,
6743 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6744 cmdq_resp);
6745 return -EIO;
6746 }
6747
6748 if (op == HCLGE_MAC_VLAN_ADD) {
6749 if ((!resp_code) || (resp_code == 1)) {
6750 return 0;
6751 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6752 dev_err(&hdev->pdev->dev,
6753 "add mac addr failed for uc_overflow.\n");
6754 return -ENOSPC;
6755 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6756 dev_err(&hdev->pdev->dev,
6757 "add mac addr failed for mc_overflow.\n");
6758 return -ENOSPC;
6759 }
6760
6761 dev_err(&hdev->pdev->dev,
6762 "add mac addr failed for undefined, code=%u.\n",
6763 resp_code);
6764 return -EIO;
6765 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6766 if (!resp_code) {
6767 return 0;
6768 } else if (resp_code == 1) {
6769 dev_dbg(&hdev->pdev->dev,
6770 "remove mac addr failed for miss.\n");
6771 return -ENOENT;
6772 }
6773
6774 dev_err(&hdev->pdev->dev,
6775 "remove mac addr failed for undefined, code=%u.\n",
6776 resp_code);
6777 return -EIO;
6778 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6779 if (!resp_code) {
6780 return 0;
6781 } else if (resp_code == 1) {
6782 dev_dbg(&hdev->pdev->dev,
6783 "lookup mac addr failed for miss.\n");
6784 return -ENOENT;
6785 }
6786
6787 dev_err(&hdev->pdev->dev,
6788 "lookup mac addr failed for undefined, code=%u.\n",
6789 resp_code);
6790 return -EIO;
6791 }
6792
6793 dev_err(&hdev->pdev->dev,
6794 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6795
6796 return -EINVAL;
6797 }
6798
6799 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6800 {
6801 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6802
6803 unsigned int word_num;
6804 unsigned int bit_num;
6805
6806 if (vfid > 255 || vfid < 0)
6807 return -EIO;
6808
6809 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6810 word_num = vfid / 32;
6811 bit_num = vfid % 32;
6812 if (clr)
6813 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6814 else
6815 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6816 } else {
6817 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6818 bit_num = vfid % 32;
6819 if (clr)
6820 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6821 else
6822 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6823 }
6824
6825 return 0;
6826 }
6827
6828 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6829 {
6830 #define HCLGE_DESC_NUMBER 3
6831 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6832 int i, j;
6833
6834 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6835 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6836 if (desc[i].data[j])
6837 return false;
6838
6839 return true;
6840 }
6841
6842 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6843 const u8 *addr, bool is_mc)
6844 {
6845 const unsigned char *mac_addr = addr;
6846 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6847 (mac_addr[0]) | (mac_addr[1] << 8);
6848 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6849
6850 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6851 if (is_mc) {
6852 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6853 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6854 }
6855
6856 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6857 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6858 }
6859
6860 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6861 struct hclge_mac_vlan_tbl_entry_cmd *req)
6862 {
6863 struct hclge_dev *hdev = vport->back;
6864 struct hclge_desc desc;
6865 u8 resp_code;
6866 u16 retval;
6867 int ret;
6868
6869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6870
6871 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6872
6873 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6874 if (ret) {
6875 dev_err(&hdev->pdev->dev,
6876 "del mac addr failed for cmd_send, ret =%d.\n",
6877 ret);
6878 return ret;
6879 }
6880 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6881 retval = le16_to_cpu(desc.retval);
6882
6883 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6884 HCLGE_MAC_VLAN_REMOVE);
6885 }
6886
6887 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6888 struct hclge_mac_vlan_tbl_entry_cmd *req,
6889 struct hclge_desc *desc,
6890 bool is_mc)
6891 {
6892 struct hclge_dev *hdev = vport->back;
6893 u8 resp_code;
6894 u16 retval;
6895 int ret;
6896
6897 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6898 if (is_mc) {
6899 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6900 memcpy(desc[0].data,
6901 req,
6902 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6903 hclge_cmd_setup_basic_desc(&desc[1],
6904 HCLGE_OPC_MAC_VLAN_ADD,
6905 true);
6906 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6907 hclge_cmd_setup_basic_desc(&desc[2],
6908 HCLGE_OPC_MAC_VLAN_ADD,
6909 true);
6910 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6911 } else {
6912 memcpy(desc[0].data,
6913 req,
6914 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6915 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6916 }
6917 if (ret) {
6918 dev_err(&hdev->pdev->dev,
6919 "lookup mac addr failed for cmd_send, ret =%d.\n",
6920 ret);
6921 return ret;
6922 }
6923 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6924 retval = le16_to_cpu(desc[0].retval);
6925
6926 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6927 HCLGE_MAC_VLAN_LKUP);
6928 }
6929
6930 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6931 struct hclge_mac_vlan_tbl_entry_cmd *req,
6932 struct hclge_desc *mc_desc)
6933 {
6934 struct hclge_dev *hdev = vport->back;
6935 int cfg_status;
6936 u8 resp_code;
6937 u16 retval;
6938 int ret;
6939
6940 if (!mc_desc) {
6941 struct hclge_desc desc;
6942
6943 hclge_cmd_setup_basic_desc(&desc,
6944 HCLGE_OPC_MAC_VLAN_ADD,
6945 false);
6946 memcpy(desc.data, req,
6947 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6949 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6950 retval = le16_to_cpu(desc.retval);
6951
6952 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6953 resp_code,
6954 HCLGE_MAC_VLAN_ADD);
6955 } else {
6956 hclge_cmd_reuse_desc(&mc_desc[0], false);
6957 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6958 hclge_cmd_reuse_desc(&mc_desc[1], false);
6959 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6960 hclge_cmd_reuse_desc(&mc_desc[2], false);
6961 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6962 memcpy(mc_desc[0].data, req,
6963 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6964 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6965 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6966 retval = le16_to_cpu(mc_desc[0].retval);
6967
6968 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6969 resp_code,
6970 HCLGE_MAC_VLAN_ADD);
6971 }
6972
6973 if (ret) {
6974 dev_err(&hdev->pdev->dev,
6975 "add mac addr failed for cmd_send, ret =%d.\n",
6976 ret);
6977 return ret;
6978 }
6979
6980 return cfg_status;
6981 }
6982
6983 static int hclge_init_umv_space(struct hclge_dev *hdev)
6984 {
6985 u16 allocated_size = 0;
6986 int ret;
6987
6988 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6989 true);
6990 if (ret)
6991 return ret;
6992
6993 if (allocated_size < hdev->wanted_umv_size)
6994 dev_warn(&hdev->pdev->dev,
6995 "Alloc umv space failed, want %d, get %d\n",
6996 hdev->wanted_umv_size, allocated_size);
6997
6998 mutex_init(&hdev->umv_mutex);
6999 hdev->max_umv_size = allocated_size;
7000
7001
7002
7003
7004 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7005 hdev->share_umv_size = hdev->priv_umv_size +
7006 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7007
7008 return 0;
7009 }
7010
7011 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7012 {
7013 int ret;
7014
7015 if (hdev->max_umv_size > 0) {
7016 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7017 false);
7018 if (ret)
7019 return ret;
7020 hdev->max_umv_size = 0;
7021 }
7022 mutex_destroy(&hdev->umv_mutex);
7023
7024 return 0;
7025 }
7026
7027 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7028 u16 *allocated_size, bool is_alloc)
7029 {
7030 struct hclge_umv_spc_alc_cmd *req;
7031 struct hclge_desc desc;
7032 int ret;
7033
7034 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7035 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7036 if (!is_alloc)
7037 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7038
7039 req->space_size = cpu_to_le32(space_size);
7040
7041 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7042 if (ret) {
7043 dev_err(&hdev->pdev->dev,
7044 "%s umv space failed for cmd_send, ret =%d\n",
7045 is_alloc ? "allocate" : "free", ret);
7046 return ret;
7047 }
7048
7049 if (is_alloc && allocated_size)
7050 *allocated_size = le32_to_cpu(desc.data[1]);
7051
7052 return 0;
7053 }
7054
7055 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7056 {
7057 struct hclge_vport *vport;
7058 int i;
7059
7060 for (i = 0; i < hdev->num_alloc_vport; i++) {
7061 vport = &hdev->vport[i];
7062 vport->used_umv_num = 0;
7063 }
7064
7065 mutex_lock(&hdev->umv_mutex);
7066 hdev->share_umv_size = hdev->priv_umv_size +
7067 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7068 mutex_unlock(&hdev->umv_mutex);
7069 }
7070
7071 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7072 {
7073 struct hclge_dev *hdev = vport->back;
7074 bool is_full;
7075
7076 mutex_lock(&hdev->umv_mutex);
7077 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7078 hdev->share_umv_size == 0);
7079 mutex_unlock(&hdev->umv_mutex);
7080
7081 return is_full;
7082 }
7083
7084 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7085 {
7086 struct hclge_dev *hdev = vport->back;
7087
7088 mutex_lock(&hdev->umv_mutex);
7089 if (is_free) {
7090 if (vport->used_umv_num > hdev->priv_umv_size)
7091 hdev->share_umv_size++;
7092
7093 if (vport->used_umv_num > 0)
7094 vport->used_umv_num--;
7095 } else {
7096 if (vport->used_umv_num >= hdev->priv_umv_size &&
7097 hdev->share_umv_size > 0)
7098 hdev->share_umv_size--;
7099 vport->used_umv_num++;
7100 }
7101 mutex_unlock(&hdev->umv_mutex);
7102 }
7103
7104 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7105 const unsigned char *addr)
7106 {
7107 struct hclge_vport *vport = hclge_get_vport(handle);
7108
7109 return hclge_add_uc_addr_common(vport, addr);
7110 }
7111
7112 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7113 const unsigned char *addr)
7114 {
7115 struct hclge_dev *hdev = vport->back;
7116 struct hclge_mac_vlan_tbl_entry_cmd req;
7117 struct hclge_desc desc;
7118 u16 egress_port = 0;
7119 int ret;
7120
7121
7122 if (is_zero_ether_addr(addr) ||
7123 is_broadcast_ether_addr(addr) ||
7124 is_multicast_ether_addr(addr)) {
7125 dev_err(&hdev->pdev->dev,
7126 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7127 addr, is_zero_ether_addr(addr),
7128 is_broadcast_ether_addr(addr),
7129 is_multicast_ether_addr(addr));
7130 return -EINVAL;
7131 }
7132
7133 memset(&req, 0, sizeof(req));
7134
7135 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7136 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7137
7138 req.egress_port = cpu_to_le16(egress_port);
7139
7140 hclge_prepare_mac_addr(&req, addr, false);
7141
7142
7143
7144
7145
7146 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7147 if (ret == -ENOENT) {
7148 if (!hclge_is_umv_space_full(vport)) {
7149 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7150 if (!ret)
7151 hclge_update_umv_space(vport, false);
7152 return ret;
7153 }
7154
7155 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7156 hdev->priv_umv_size);
7157
7158 return -ENOSPC;
7159 }
7160
7161
7162 if (!ret) {
7163 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7164 vport->vport_id, addr);
7165 return 0;
7166 }
7167
7168 dev_err(&hdev->pdev->dev,
7169 "PF failed to add unicast entry(%pM) in the MAC table\n",
7170 addr);
7171
7172 return ret;
7173 }
7174
7175 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7176 const unsigned char *addr)
7177 {
7178 struct hclge_vport *vport = hclge_get_vport(handle);
7179
7180 return hclge_rm_uc_addr_common(vport, addr);
7181 }
7182
7183 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7184 const unsigned char *addr)
7185 {
7186 struct hclge_dev *hdev = vport->back;
7187 struct hclge_mac_vlan_tbl_entry_cmd req;
7188 int ret;
7189
7190
7191 if (is_zero_ether_addr(addr) ||
7192 is_broadcast_ether_addr(addr) ||
7193 is_multicast_ether_addr(addr)) {
7194 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7195 addr);
7196 return -EINVAL;
7197 }
7198
7199 memset(&req, 0, sizeof(req));
7200 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7201 hclge_prepare_mac_addr(&req, addr, false);
7202 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7203 if (!ret)
7204 hclge_update_umv_space(vport, true);
7205
7206 return ret;
7207 }
7208
7209 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7210 const unsigned char *addr)
7211 {
7212 struct hclge_vport *vport = hclge_get_vport(handle);
7213
7214 return hclge_add_mc_addr_common(vport, addr);
7215 }
7216
7217 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7218 const unsigned char *addr)
7219 {
7220 struct hclge_dev *hdev = vport->back;
7221 struct hclge_mac_vlan_tbl_entry_cmd req;
7222 struct hclge_desc desc[3];
7223 int status;
7224
7225
7226 if (!is_multicast_ether_addr(addr)) {
7227 dev_err(&hdev->pdev->dev,
7228 "Add mc mac err! invalid mac:%pM.\n",
7229 addr);
7230 return -EINVAL;
7231 }
7232 memset(&req, 0, sizeof(req));
7233 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7234 hclge_prepare_mac_addr(&req, addr, true);
7235 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7236 if (status) {
7237
7238 memset(desc[0].data, 0, sizeof(desc[0].data));
7239 memset(desc[1].data, 0, sizeof(desc[0].data));
7240 memset(desc[2].data, 0, sizeof(desc[0].data));
7241 }
7242 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7243 if (status)
7244 return status;
7245 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7246
7247 if (status == -ENOSPC)
7248 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7249
7250 return status;
7251 }
7252
7253 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7254 const unsigned char *addr)
7255 {
7256 struct hclge_vport *vport = hclge_get_vport(handle);
7257
7258 return hclge_rm_mc_addr_common(vport, addr);
7259 }
7260
7261 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7262 const unsigned char *addr)
7263 {
7264 struct hclge_dev *hdev = vport->back;
7265 struct hclge_mac_vlan_tbl_entry_cmd req;
7266 enum hclge_cmd_status status;
7267 struct hclge_desc desc[3];
7268
7269
7270 if (!is_multicast_ether_addr(addr)) {
7271 dev_dbg(&hdev->pdev->dev,
7272 "Remove mc mac err! invalid mac:%pM.\n",
7273 addr);
7274 return -EINVAL;
7275 }
7276
7277 memset(&req, 0, sizeof(req));
7278 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7279 hclge_prepare_mac_addr(&req, addr, true);
7280 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7281 if (!status) {
7282
7283 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7284 if (status)
7285 return status;
7286
7287 if (hclge_is_all_function_id_zero(desc))
7288
7289 status = hclge_remove_mac_vlan_tbl(vport, &req);
7290 else
7291
7292 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7293
7294 } else {
7295
7296
7297
7298
7299
7300
7301 status = 0;
7302 }
7303
7304 return status;
7305 }
7306
7307 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7308 enum HCLGE_MAC_ADDR_TYPE mac_type)
7309 {
7310 struct hclge_vport_mac_addr_cfg *mac_cfg;
7311 struct list_head *list;
7312
7313 if (!vport->vport_id)
7314 return;
7315
7316 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7317 if (!mac_cfg)
7318 return;
7319
7320 mac_cfg->hd_tbl_status = true;
7321 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7322
7323 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7324 &vport->uc_mac_list : &vport->mc_mac_list;
7325
7326 list_add_tail(&mac_cfg->node, list);
7327 }
7328
7329 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7330 bool is_write_tbl,
7331 enum HCLGE_MAC_ADDR_TYPE mac_type)
7332 {
7333 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7334 struct list_head *list;
7335 bool uc_flag, mc_flag;
7336
7337 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7338 &vport->uc_mac_list : &vport->mc_mac_list;
7339
7340 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7341 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7342
7343 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7344 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7345 if (uc_flag && mac_cfg->hd_tbl_status)
7346 hclge_rm_uc_addr_common(vport, mac_addr);
7347
7348 if (mc_flag && mac_cfg->hd_tbl_status)
7349 hclge_rm_mc_addr_common(vport, mac_addr);
7350
7351 list_del(&mac_cfg->node);
7352 kfree(mac_cfg);
7353 break;
7354 }
7355 }
7356 }
7357
7358 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7359 enum HCLGE_MAC_ADDR_TYPE mac_type)
7360 {
7361 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7362 struct list_head *list;
7363
7364 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7365 &vport->uc_mac_list : &vport->mc_mac_list;
7366
7367 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7368 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7369 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7370
7371 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7372 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7373
7374 mac_cfg->hd_tbl_status = false;
7375 if (is_del_list) {
7376 list_del(&mac_cfg->node);
7377 kfree(mac_cfg);
7378 }
7379 }
7380 }
7381
7382 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7383 {
7384 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7385 struct hclge_vport *vport;
7386 int i;
7387
7388 mutex_lock(&hdev->vport_cfg_mutex);
7389 for (i = 0; i < hdev->num_alloc_vport; i++) {
7390 vport = &hdev->vport[i];
7391 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7392 list_del(&mac->node);
7393 kfree(mac);
7394 }
7395
7396 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7397 list_del(&mac->node);
7398 kfree(mac);
7399 }
7400 }
7401 mutex_unlock(&hdev->vport_cfg_mutex);
7402 }
7403
7404 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7405 u16 cmdq_resp, u8 resp_code)
7406 {
7407 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7408 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7409 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7410 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7411
7412 int return_status;
7413
7414 if (cmdq_resp) {
7415 dev_err(&hdev->pdev->dev,
7416 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7417 cmdq_resp);
7418 return -EIO;
7419 }
7420
7421 switch (resp_code) {
7422 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7423 case HCLGE_ETHERTYPE_ALREADY_ADD:
7424 return_status = 0;
7425 break;
7426 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7427 dev_err(&hdev->pdev->dev,
7428 "add mac ethertype failed for manager table overflow.\n");
7429 return_status = -EIO;
7430 break;
7431 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7432 dev_err(&hdev->pdev->dev,
7433 "add mac ethertype failed for key conflict.\n");
7434 return_status = -EIO;
7435 break;
7436 default:
7437 dev_err(&hdev->pdev->dev,
7438 "add mac ethertype failed for undefined, code=%d.\n",
7439 resp_code);
7440 return_status = -EIO;
7441 }
7442
7443 return return_status;
7444 }
7445
7446 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7447 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7448 {
7449 struct hclge_desc desc;
7450 u8 resp_code;
7451 u16 retval;
7452 int ret;
7453
7454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7455 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7456
7457 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7458 if (ret) {
7459 dev_err(&hdev->pdev->dev,
7460 "add mac ethertype failed for cmd_send, ret =%d.\n",
7461 ret);
7462 return ret;
7463 }
7464
7465 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7466 retval = le16_to_cpu(desc.retval);
7467
7468 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7469 }
7470
7471 static int init_mgr_tbl(struct hclge_dev *hdev)
7472 {
7473 int ret;
7474 int i;
7475
7476 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7477 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7478 if (ret) {
7479 dev_err(&hdev->pdev->dev,
7480 "add mac ethertype failed, ret =%d.\n",
7481 ret);
7482 return ret;
7483 }
7484 }
7485
7486 return 0;
7487 }
7488
7489 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7490 {
7491 struct hclge_vport *vport = hclge_get_vport(handle);
7492 struct hclge_dev *hdev = vport->back;
7493
7494 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7495 }
7496
7497 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7498 bool is_first)
7499 {
7500 const unsigned char *new_addr = (const unsigned char *)p;
7501 struct hclge_vport *vport = hclge_get_vport(handle);
7502 struct hclge_dev *hdev = vport->back;
7503 int ret;
7504
7505
7506 if (is_zero_ether_addr(new_addr) ||
7507 is_broadcast_ether_addr(new_addr) ||
7508 is_multicast_ether_addr(new_addr)) {
7509 dev_err(&hdev->pdev->dev,
7510 "Change uc mac err! invalid mac:%pM.\n",
7511 new_addr);
7512 return -EINVAL;
7513 }
7514
7515 if ((!is_first || is_kdump_kernel()) &&
7516 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7517 dev_warn(&hdev->pdev->dev,
7518 "remove old uc mac address fail.\n");
7519
7520 ret = hclge_add_uc_addr(handle, new_addr);
7521 if (ret) {
7522 dev_err(&hdev->pdev->dev,
7523 "add uc mac address fail, ret =%d.\n",
7524 ret);
7525
7526 if (!is_first &&
7527 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7528 dev_err(&hdev->pdev->dev,
7529 "restore uc mac address fail.\n");
7530
7531 return -EIO;
7532 }
7533
7534 ret = hclge_pause_addr_cfg(hdev, new_addr);
7535 if (ret) {
7536 dev_err(&hdev->pdev->dev,
7537 "configure mac pause address fail, ret =%d.\n",
7538 ret);
7539 return -EIO;
7540 }
7541
7542 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7543
7544 return 0;
7545 }
7546
7547 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7548 int cmd)
7549 {
7550 struct hclge_vport *vport = hclge_get_vport(handle);
7551 struct hclge_dev *hdev = vport->back;
7552
7553 if (!hdev->hw.mac.phydev)
7554 return -EOPNOTSUPP;
7555
7556 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7557 }
7558
7559 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7560 u8 fe_type, bool filter_en, u8 vf_id)
7561 {
7562 struct hclge_vlan_filter_ctrl_cmd *req;
7563 struct hclge_desc desc;
7564 int ret;
7565
7566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7567
7568 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7569 req->vlan_type = vlan_type;
7570 req->vlan_fe = filter_en ? fe_type : 0;
7571 req->vf_id = vf_id;
7572
7573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7574 if (ret)
7575 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7576 ret);
7577
7578 return ret;
7579 }
7580
7581 #define HCLGE_FILTER_TYPE_VF 0
7582 #define HCLGE_FILTER_TYPE_PORT 1
7583 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7584 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7585 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7586 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7587 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7588 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7589 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7590 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7591 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7592
7593 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7594 {
7595 struct hclge_vport *vport = hclge_get_vport(handle);
7596 struct hclge_dev *hdev = vport->back;
7597
7598 if (hdev->pdev->revision >= 0x21) {
7599 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7600 HCLGE_FILTER_FE_EGRESS, enable, 0);
7601 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7602 HCLGE_FILTER_FE_INGRESS, enable, 0);
7603 } else {
7604 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7605 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7606 0);
7607 }
7608 if (enable)
7609 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7610 else
7611 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7612 }
7613
7614 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7615 bool is_kill, u16 vlan,
7616 __be16 proto)
7617 {
7618 #define HCLGE_MAX_VF_BYTES 16
7619 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7620 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7621 struct hclge_desc desc[2];
7622 u8 vf_byte_val;
7623 u8 vf_byte_off;
7624 int ret;
7625
7626
7627
7628
7629 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7630 return 0;
7631
7632 hclge_cmd_setup_basic_desc(&desc[0],
7633 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7634 hclge_cmd_setup_basic_desc(&desc[1],
7635 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7636
7637 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7638
7639 vf_byte_off = vfid / 8;
7640 vf_byte_val = 1 << (vfid % 8);
7641
7642 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7643 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7644
7645 req0->vlan_id = cpu_to_le16(vlan);
7646 req0->vlan_cfg = is_kill;
7647
7648 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7649 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7650 else
7651 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7652
7653 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7654 if (ret) {
7655 dev_err(&hdev->pdev->dev,
7656 "Send vf vlan command fail, ret =%d.\n",
7657 ret);
7658 return ret;
7659 }
7660
7661 if (!is_kill) {
7662 #define HCLGE_VF_VLAN_NO_ENTRY 2
7663 if (!req0->resp_code || req0->resp_code == 1)
7664 return 0;
7665
7666 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7667 set_bit(vfid, hdev->vf_vlan_full);
7668 dev_warn(&hdev->pdev->dev,
7669 "vf vlan table is full, vf vlan filter is disabled\n");
7670 return 0;
7671 }
7672
7673 dev_err(&hdev->pdev->dev,
7674 "Add vf vlan filter fail, ret =%d.\n",
7675 req0->resp_code);
7676 } else {
7677 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7678 if (!req0->resp_code)
7679 return 0;
7680
7681
7682
7683
7684
7685
7686 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7687 return 0;
7688
7689 dev_err(&hdev->pdev->dev,
7690 "Kill vf vlan filter fail, ret =%d.\n",
7691 req0->resp_code);
7692 }
7693
7694 return -EIO;
7695 }
7696
7697 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7698 u16 vlan_id, bool is_kill)
7699 {
7700 struct hclge_vlan_filter_pf_cfg_cmd *req;
7701 struct hclge_desc desc;
7702 u8 vlan_offset_byte_val;
7703 u8 vlan_offset_byte;
7704 u8 vlan_offset_160;
7705 int ret;
7706
7707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7708
7709 vlan_offset_160 = vlan_id / 160;
7710 vlan_offset_byte = (vlan_id % 160) / 8;
7711 vlan_offset_byte_val = 1 << (vlan_id % 8);
7712
7713 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7714 req->vlan_offset = vlan_offset_160;
7715 req->vlan_cfg = is_kill;
7716 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7717
7718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7719 if (ret)
7720 dev_err(&hdev->pdev->dev,
7721 "port vlan command, send fail, ret =%d.\n", ret);
7722 return ret;
7723 }
7724
7725 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7726 u16 vport_id, u16 vlan_id,
7727 bool is_kill)
7728 {
7729 u16 vport_idx, vport_num = 0;
7730 int ret;
7731
7732 if (is_kill && !vlan_id)
7733 return 0;
7734
7735 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7736 proto);
7737 if (ret) {
7738 dev_err(&hdev->pdev->dev,
7739 "Set %d vport vlan filter config fail, ret =%d.\n",
7740 vport_id, ret);
7741 return ret;
7742 }
7743
7744
7745 if (!is_kill && !vlan_id &&
7746 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7747 return 0;
7748
7749 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7750 dev_err(&hdev->pdev->dev,
7751 "Add port vlan failed, vport %d is already in vlan %d\n",
7752 vport_id, vlan_id);
7753 return -EINVAL;
7754 }
7755
7756 if (is_kill &&
7757 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7758 dev_err(&hdev->pdev->dev,
7759 "Delete port vlan failed, vport %d is not in vlan %d\n",
7760 vport_id, vlan_id);
7761 return -EINVAL;
7762 }
7763
7764 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7765 vport_num++;
7766
7767 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7768 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7769 is_kill);
7770
7771 return ret;
7772 }
7773
7774 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7775 {
7776 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7777 struct hclge_vport_vtag_tx_cfg_cmd *req;
7778 struct hclge_dev *hdev = vport->back;
7779 struct hclge_desc desc;
7780 u16 bmap_index;
7781 int status;
7782
7783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7784
7785 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7786 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7787 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7788 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7789 vcfg->accept_tag1 ? 1 : 0);
7790 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7791 vcfg->accept_untag1 ? 1 : 0);
7792 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7793 vcfg->accept_tag2 ? 1 : 0);
7794 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7795 vcfg->accept_untag2 ? 1 : 0);
7796 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7797 vcfg->insert_tag1_en ? 1 : 0);
7798 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7799 vcfg->insert_tag2_en ? 1 : 0);
7800 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7801
7802 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7803 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7804 HCLGE_VF_NUM_PER_BYTE;
7805 req->vf_bitmap[bmap_index] =
7806 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7807
7808 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7809 if (status)
7810 dev_err(&hdev->pdev->dev,
7811 "Send port txvlan cfg command fail, ret =%d\n",
7812 status);
7813
7814 return status;
7815 }
7816
7817 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7818 {
7819 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7820 struct hclge_vport_vtag_rx_cfg_cmd *req;
7821 struct hclge_dev *hdev = vport->back;
7822 struct hclge_desc desc;
7823 u16 bmap_index;
7824 int status;
7825
7826 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7827
7828 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7829 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7830 vcfg->strip_tag1_en ? 1 : 0);
7831 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7832 vcfg->strip_tag2_en ? 1 : 0);
7833 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7834 vcfg->vlan1_vlan_prionly ? 1 : 0);
7835 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7836 vcfg->vlan2_vlan_prionly ? 1 : 0);
7837
7838 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7839 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7840 HCLGE_VF_NUM_PER_BYTE;
7841 req->vf_bitmap[bmap_index] =
7842 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7843
7844 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7845 if (status)
7846 dev_err(&hdev->pdev->dev,
7847 "Send port rxvlan cfg command fail, ret =%d\n",
7848 status);
7849
7850 return status;
7851 }
7852
7853 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7854 u16 port_base_vlan_state,
7855 u16 vlan_tag)
7856 {
7857 int ret;
7858
7859 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7860 vport->txvlan_cfg.accept_tag1 = true;
7861 vport->txvlan_cfg.insert_tag1_en = false;
7862 vport->txvlan_cfg.default_tag1 = 0;
7863 } else {
7864 vport->txvlan_cfg.accept_tag1 = false;
7865 vport->txvlan_cfg.insert_tag1_en = true;
7866 vport->txvlan_cfg.default_tag1 = vlan_tag;
7867 }
7868
7869 vport->txvlan_cfg.accept_untag1 = true;
7870
7871
7872
7873
7874
7875 vport->txvlan_cfg.accept_tag2 = true;
7876 vport->txvlan_cfg.accept_untag2 = true;
7877 vport->txvlan_cfg.insert_tag2_en = false;
7878 vport->txvlan_cfg.default_tag2 = 0;
7879
7880 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7881 vport->rxvlan_cfg.strip_tag1_en = false;
7882 vport->rxvlan_cfg.strip_tag2_en =
7883 vport->rxvlan_cfg.rx_vlan_offload_en;
7884 } else {
7885 vport->rxvlan_cfg.strip_tag1_en =
7886 vport->rxvlan_cfg.rx_vlan_offload_en;
7887 vport->rxvlan_cfg.strip_tag2_en = true;
7888 }
7889 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7890 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7891
7892 ret = hclge_set_vlan_tx_offload_cfg(vport);
7893 if (ret)
7894 return ret;
7895
7896 return hclge_set_vlan_rx_offload_cfg(vport);
7897 }
7898
7899 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7900 {
7901 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7902 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7903 struct hclge_desc desc;
7904 int status;
7905
7906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7907 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7908 rx_req->ot_fst_vlan_type =
7909 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7910 rx_req->ot_sec_vlan_type =
7911 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7912 rx_req->in_fst_vlan_type =
7913 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7914 rx_req->in_sec_vlan_type =
7915 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7916
7917 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7918 if (status) {
7919 dev_err(&hdev->pdev->dev,
7920 "Send rxvlan protocol type command fail, ret =%d\n",
7921 status);
7922 return status;
7923 }
7924
7925 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7926
7927 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7928 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7929 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7930
7931 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7932 if (status)
7933 dev_err(&hdev->pdev->dev,
7934 "Send txvlan protocol type command fail, ret =%d\n",
7935 status);
7936
7937 return status;
7938 }
7939
7940 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7941 {
7942 #define HCLGE_DEF_VLAN_TYPE 0x8100
7943
7944 struct hnae3_handle *handle = &hdev->vport[0].nic;
7945 struct hclge_vport *vport;
7946 int ret;
7947 int i;
7948
7949 if (hdev->pdev->revision >= 0x21) {
7950
7951 for (i = 0; i < hdev->num_alloc_vport; i++) {
7952 vport = &hdev->vport[i];
7953 ret = hclge_set_vlan_filter_ctrl(hdev,
7954 HCLGE_FILTER_TYPE_VF,
7955 HCLGE_FILTER_FE_EGRESS,
7956 true,
7957 vport->vport_id);
7958 if (ret)
7959 return ret;
7960 }
7961
7962 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7963 HCLGE_FILTER_FE_INGRESS, true,
7964 0);
7965 if (ret)
7966 return ret;
7967 } else {
7968 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7969 HCLGE_FILTER_FE_EGRESS_V1_B,
7970 true, 0);
7971 if (ret)
7972 return ret;
7973 }
7974
7975 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7976
7977 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7978 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7979 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7980 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7981 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7982 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7983
7984 ret = hclge_set_vlan_protocol_type(hdev);
7985 if (ret)
7986 return ret;
7987
7988 for (i = 0; i < hdev->num_alloc_vport; i++) {
7989 u16 vlan_tag;
7990
7991 vport = &hdev->vport[i];
7992 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7993
7994 ret = hclge_vlan_offload_cfg(vport,
7995 vport->port_base_vlan_cfg.state,
7996 vlan_tag);
7997 if (ret)
7998 return ret;
7999 }
8000
8001 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8002 }
8003
8004 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8005 bool writen_to_tbl)
8006 {
8007 struct hclge_vport_vlan_cfg *vlan;
8008
8009 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8010 if (!vlan)
8011 return;
8012
8013 vlan->hd_tbl_status = writen_to_tbl;
8014 vlan->vlan_id = vlan_id;
8015
8016 list_add_tail(&vlan->node, &vport->vlan_list);
8017 }
8018
8019 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8020 {
8021 struct hclge_vport_vlan_cfg *vlan, *tmp;
8022 struct hclge_dev *hdev = vport->back;
8023 int ret;
8024
8025 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8026 if (!vlan->hd_tbl_status) {
8027 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8028 vport->vport_id,
8029 vlan->vlan_id, false);
8030 if (ret) {
8031 dev_err(&hdev->pdev->dev,
8032 "restore vport vlan list failed, ret=%d\n",
8033 ret);
8034 return ret;
8035 }
8036 }
8037 vlan->hd_tbl_status = true;
8038 }
8039
8040 return 0;
8041 }
8042
8043 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8044 bool is_write_tbl)
8045 {
8046 struct hclge_vport_vlan_cfg *vlan, *tmp;
8047 struct hclge_dev *hdev = vport->back;
8048
8049 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8050 if (vlan->vlan_id == vlan_id) {
8051 if (is_write_tbl && vlan->hd_tbl_status)
8052 hclge_set_vlan_filter_hw(hdev,
8053 htons(ETH_P_8021Q),
8054 vport->vport_id,
8055 vlan_id,
8056 true);
8057
8058 list_del(&vlan->node);
8059 kfree(vlan);
8060 break;
8061 }
8062 }
8063 }
8064
8065 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8066 {
8067 struct hclge_vport_vlan_cfg *vlan, *tmp;
8068 struct hclge_dev *hdev = vport->back;
8069
8070 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8071 if (vlan->hd_tbl_status)
8072 hclge_set_vlan_filter_hw(hdev,
8073 htons(ETH_P_8021Q),
8074 vport->vport_id,
8075 vlan->vlan_id,
8076 true);
8077
8078 vlan->hd_tbl_status = false;
8079 if (is_del_list) {
8080 list_del(&vlan->node);
8081 kfree(vlan);
8082 }
8083 }
8084 }
8085
8086 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8087 {
8088 struct hclge_vport_vlan_cfg *vlan, *tmp;
8089 struct hclge_vport *vport;
8090 int i;
8091
8092 mutex_lock(&hdev->vport_cfg_mutex);
8093 for (i = 0; i < hdev->num_alloc_vport; i++) {
8094 vport = &hdev->vport[i];
8095 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8096 list_del(&vlan->node);
8097 kfree(vlan);
8098 }
8099 }
8100 mutex_unlock(&hdev->vport_cfg_mutex);
8101 }
8102
8103 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8104 {
8105 struct hclge_vport *vport = hclge_get_vport(handle);
8106 struct hclge_vport_vlan_cfg *vlan, *tmp;
8107 struct hclge_dev *hdev = vport->back;
8108 u16 vlan_proto;
8109 u16 state, vlan_id;
8110 int i;
8111
8112 mutex_lock(&hdev->vport_cfg_mutex);
8113 for (i = 0; i < hdev->num_alloc_vport; i++) {
8114 vport = &hdev->vport[i];
8115 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8116 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8117 state = vport->port_base_vlan_cfg.state;
8118
8119 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8120 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8121 vport->vport_id, vlan_id,
8122 false);
8123 continue;
8124 }
8125
8126 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8127 if (vlan->hd_tbl_status)
8128 hclge_set_vlan_filter_hw(hdev,
8129 htons(ETH_P_8021Q),
8130 vport->vport_id,
8131 vlan->vlan_id,
8132 false);
8133 }
8134 }
8135
8136 mutex_unlock(&hdev->vport_cfg_mutex);
8137 }
8138
8139 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8140 {
8141 struct hclge_vport *vport = hclge_get_vport(handle);
8142
8143 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8144 vport->rxvlan_cfg.strip_tag1_en = false;
8145 vport->rxvlan_cfg.strip_tag2_en = enable;
8146 } else {
8147 vport->rxvlan_cfg.strip_tag1_en = enable;
8148 vport->rxvlan_cfg.strip_tag2_en = true;
8149 }
8150 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8151 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8152 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8153
8154 return hclge_set_vlan_rx_offload_cfg(vport);
8155 }
8156
8157 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8158 u16 port_base_vlan_state,
8159 struct hclge_vlan_info *new_info,
8160 struct hclge_vlan_info *old_info)
8161 {
8162 struct hclge_dev *hdev = vport->back;
8163 int ret;
8164
8165 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8166 hclge_rm_vport_all_vlan_table(vport, false);
8167 return hclge_set_vlan_filter_hw(hdev,
8168 htons(new_info->vlan_proto),
8169 vport->vport_id,
8170 new_info->vlan_tag,
8171 false);
8172 }
8173
8174 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8175 vport->vport_id, old_info->vlan_tag,
8176 true);
8177 if (ret)
8178 return ret;
8179
8180 return hclge_add_vport_all_vlan_table(vport);
8181 }
8182
8183 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8184 struct hclge_vlan_info *vlan_info)
8185 {
8186 struct hnae3_handle *nic = &vport->nic;
8187 struct hclge_vlan_info *old_vlan_info;
8188 struct hclge_dev *hdev = vport->back;
8189 int ret;
8190
8191 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8192
8193 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8194 if (ret)
8195 return ret;
8196
8197 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8198
8199 ret = hclge_set_vlan_filter_hw(hdev,
8200 htons(vlan_info->vlan_proto),
8201 vport->vport_id,
8202 vlan_info->vlan_tag,
8203 false);
8204 if (ret)
8205 return ret;
8206
8207
8208 ret = hclge_set_vlan_filter_hw(hdev,
8209 htons(old_vlan_info->vlan_proto),
8210 vport->vport_id,
8211 old_vlan_info->vlan_tag,
8212 true);
8213 if (ret)
8214 return ret;
8215
8216 goto update;
8217 }
8218
8219 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8220 old_vlan_info);
8221 if (ret)
8222 return ret;
8223
8224
8225 vport->port_base_vlan_cfg.state = state;
8226 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8227 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8228 else
8229 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8230
8231 update:
8232 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8233 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8234 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8235
8236 return 0;
8237 }
8238
8239 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8240 enum hnae3_port_base_vlan_state state,
8241 u16 vlan)
8242 {
8243 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8244 if (!vlan)
8245 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8246 else
8247 return HNAE3_PORT_BASE_VLAN_ENABLE;
8248 } else {
8249 if (!vlan)
8250 return HNAE3_PORT_BASE_VLAN_DISABLE;
8251 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8252 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8253 else
8254 return HNAE3_PORT_BASE_VLAN_MODIFY;
8255 }
8256 }
8257
8258 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8259 u16 vlan, u8 qos, __be16 proto)
8260 {
8261 struct hclge_vport *vport = hclge_get_vport(handle);
8262 struct hclge_dev *hdev = vport->back;
8263 struct hclge_vlan_info vlan_info;
8264 u16 state;
8265 int ret;
8266
8267 if (hdev->pdev->revision == 0x20)
8268 return -EOPNOTSUPP;
8269
8270
8271 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8272 return -EINVAL;
8273 if (proto != htons(ETH_P_8021Q))
8274 return -EPROTONOSUPPORT;
8275
8276 vport = &hdev->vport[vfid];
8277 state = hclge_get_port_base_vlan_state(vport,
8278 vport->port_base_vlan_cfg.state,
8279 vlan);
8280 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8281 return 0;
8282
8283 vlan_info.vlan_tag = vlan;
8284 vlan_info.qos = qos;
8285 vlan_info.vlan_proto = ntohs(proto);
8286
8287
8288 if (!vfid) {
8289 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8290 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8291 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8292
8293 return ret;
8294 }
8295
8296 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8297 return hclge_update_port_base_vlan_cfg(vport, state,
8298 &vlan_info);
8299 } else {
8300 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8301 (u8)vfid, state,
8302 vlan, qos,
8303 ntohs(proto));
8304 return ret;
8305 }
8306 }
8307
8308 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8309 u16 vlan_id, bool is_kill)
8310 {
8311 struct hclge_vport *vport = hclge_get_vport(handle);
8312 struct hclge_dev *hdev = vport->back;
8313 bool writen_to_tbl = false;
8314 int ret = 0;
8315
8316
8317
8318
8319
8320 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8321 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8322 return -EBUSY;
8323 }
8324
8325
8326
8327
8328
8329
8330
8331 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8332 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8333 vlan_id, is_kill);
8334 writen_to_tbl = true;
8335 }
8336
8337 if (!ret) {
8338 if (is_kill)
8339 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8340 else
8341 hclge_add_vport_vlan_table(vport, vlan_id,
8342 writen_to_tbl);
8343 } else if (is_kill) {
8344
8345
8346
8347
8348 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8349 }
8350 return ret;
8351 }
8352
8353 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8354 {
8355 #define HCLGE_MAX_SYNC_COUNT 60
8356
8357 int i, ret, sync_cnt = 0;
8358 u16 vlan_id;
8359
8360
8361 for (i = 0; i < hdev->num_alloc_vport; i++) {
8362 struct hclge_vport *vport = &hdev->vport[i];
8363
8364 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8365 VLAN_N_VID);
8366 while (vlan_id != VLAN_N_VID) {
8367 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8368 vport->vport_id, vlan_id,
8369 true);
8370 if (ret && ret != -EINVAL)
8371 return;
8372
8373 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8374 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8375
8376 sync_cnt++;
8377 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8378 return;
8379
8380 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8381 VLAN_N_VID);
8382 }
8383 }
8384 }
8385
8386 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8387 {
8388 struct hclge_config_max_frm_size_cmd *req;
8389 struct hclge_desc desc;
8390
8391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8392
8393 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8394 req->max_frm_size = cpu_to_le16(new_mps);
8395 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8396
8397 return hclge_cmd_send(&hdev->hw, &desc, 1);
8398 }
8399
8400 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8401 {
8402 struct hclge_vport *vport = hclge_get_vport(handle);
8403
8404 return hclge_set_vport_mtu(vport, new_mtu);
8405 }
8406
8407 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8408 {
8409 struct hclge_dev *hdev = vport->back;
8410 int i, max_frm_size, ret;
8411
8412 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8413 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8414 max_frm_size > HCLGE_MAC_MAX_FRAME)
8415 return -EINVAL;
8416
8417 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8418 mutex_lock(&hdev->vport_lock);
8419
8420 if (vport->vport_id && max_frm_size > hdev->mps) {
8421 mutex_unlock(&hdev->vport_lock);
8422 return -EINVAL;
8423 } else if (vport->vport_id) {
8424 vport->mps = max_frm_size;
8425 mutex_unlock(&hdev->vport_lock);
8426 return 0;
8427 }
8428
8429
8430 for (i = 1; i < hdev->num_alloc_vport; i++)
8431 if (max_frm_size < hdev->vport[i].mps) {
8432 mutex_unlock(&hdev->vport_lock);
8433 return -EINVAL;
8434 }
8435
8436 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8437
8438 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8439 if (ret) {
8440 dev_err(&hdev->pdev->dev,
8441 "Change mtu fail, ret =%d\n", ret);
8442 goto out;
8443 }
8444
8445 hdev->mps = max_frm_size;
8446 vport->mps = max_frm_size;
8447
8448 ret = hclge_buffer_alloc(hdev);
8449 if (ret)
8450 dev_err(&hdev->pdev->dev,
8451 "Allocate buffer fail, ret =%d\n", ret);
8452
8453 out:
8454 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8455 mutex_unlock(&hdev->vport_lock);
8456 return ret;
8457 }
8458
8459 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8460 bool enable)
8461 {
8462 struct hclge_reset_tqp_queue_cmd *req;
8463 struct hclge_desc desc;
8464 int ret;
8465
8466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8467
8468 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8469 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8470 if (enable)
8471 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8472
8473 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8474 if (ret) {
8475 dev_err(&hdev->pdev->dev,
8476 "Send tqp reset cmd error, status =%d\n", ret);
8477 return ret;
8478 }
8479
8480 return 0;
8481 }
8482
8483 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8484 {
8485 struct hclge_reset_tqp_queue_cmd *req;
8486 struct hclge_desc desc;
8487 int ret;
8488
8489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8490
8491 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8492 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8493
8494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8495 if (ret) {
8496 dev_err(&hdev->pdev->dev,
8497 "Get reset status error, status =%d\n", ret);
8498 return ret;
8499 }
8500
8501 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8502 }
8503
8504 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8505 {
8506 struct hnae3_queue *queue;
8507 struct hclge_tqp *tqp;
8508
8509 queue = handle->kinfo.tqp[queue_id];
8510 tqp = container_of(queue, struct hclge_tqp, q);
8511
8512 return tqp->index;
8513 }
8514
8515 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8516 {
8517 struct hclge_vport *vport = hclge_get_vport(handle);
8518 struct hclge_dev *hdev = vport->back;
8519 int reset_try_times = 0;
8520 int reset_status;
8521 u16 queue_gid;
8522 int ret;
8523
8524 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8525
8526 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8527 if (ret) {
8528 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8529 return ret;
8530 }
8531
8532 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8533 if (ret) {
8534 dev_err(&hdev->pdev->dev,
8535 "Send reset tqp cmd fail, ret = %d\n", ret);
8536 return ret;
8537 }
8538
8539 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8540 reset_status = hclge_get_reset_status(hdev, queue_gid);
8541 if (reset_status)
8542 break;
8543
8544
8545 usleep_range(1000, 1200);
8546 }
8547
8548 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8549 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8550 return ret;
8551 }
8552
8553 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8554 if (ret)
8555 dev_err(&hdev->pdev->dev,
8556 "Deassert the soft reset fail, ret = %d\n", ret);
8557
8558 return ret;
8559 }
8560
8561 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8562 {
8563 struct hclge_dev *hdev = vport->back;
8564 int reset_try_times = 0;
8565 int reset_status;
8566 u16 queue_gid;
8567 int ret;
8568
8569 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8570
8571 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8572 if (ret) {
8573 dev_warn(&hdev->pdev->dev,
8574 "Send reset tqp cmd fail, ret = %d\n", ret);
8575 return;
8576 }
8577
8578 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8579 reset_status = hclge_get_reset_status(hdev, queue_gid);
8580 if (reset_status)
8581 break;
8582
8583
8584 usleep_range(1000, 1200);
8585 }
8586
8587 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8588 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8589 return;
8590 }
8591
8592 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8593 if (ret)
8594 dev_warn(&hdev->pdev->dev,
8595 "Deassert the soft reset fail, ret = %d\n", ret);
8596 }
8597
8598 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8599 {
8600 struct hclge_vport *vport = hclge_get_vport(handle);
8601 struct hclge_dev *hdev = vport->back;
8602
8603 return hdev->fw_version;
8604 }
8605
8606 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8607 {
8608 struct phy_device *phydev = hdev->hw.mac.phydev;
8609
8610 if (!phydev)
8611 return;
8612
8613 phy_set_asym_pause(phydev, rx_en, tx_en);
8614 }
8615
8616 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8617 {
8618 int ret;
8619
8620 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8621 return 0;
8622
8623 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8624 if (ret)
8625 dev_err(&hdev->pdev->dev,
8626 "configure pauseparam error, ret = %d.\n", ret);
8627
8628 return ret;
8629 }
8630
8631 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8632 {
8633 struct phy_device *phydev = hdev->hw.mac.phydev;
8634 u16 remote_advertising = 0;
8635 u16 local_advertising;
8636 u32 rx_pause, tx_pause;
8637 u8 flowctl;
8638
8639 if (!phydev->link || !phydev->autoneg)
8640 return 0;
8641
8642 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8643
8644 if (phydev->pause)
8645 remote_advertising = LPA_PAUSE_CAP;
8646
8647 if (phydev->asym_pause)
8648 remote_advertising |= LPA_PAUSE_ASYM;
8649
8650 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8651 remote_advertising);
8652 tx_pause = flowctl & FLOW_CTRL_TX;
8653 rx_pause = flowctl & FLOW_CTRL_RX;
8654
8655 if (phydev->duplex == HCLGE_MAC_HALF) {
8656 tx_pause = 0;
8657 rx_pause = 0;
8658 }
8659
8660 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8661 }
8662
8663 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8664 u32 *rx_en, u32 *tx_en)
8665 {
8666 struct hclge_vport *vport = hclge_get_vport(handle);
8667 struct hclge_dev *hdev = vport->back;
8668 struct phy_device *phydev = hdev->hw.mac.phydev;
8669
8670 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8671
8672 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8673 *rx_en = 0;
8674 *tx_en = 0;
8675 return;
8676 }
8677
8678 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8679 *rx_en = 1;
8680 *tx_en = 0;
8681 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8682 *tx_en = 1;
8683 *rx_en = 0;
8684 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8685 *rx_en = 1;
8686 *tx_en = 1;
8687 } else {
8688 *rx_en = 0;
8689 *tx_en = 0;
8690 }
8691 }
8692
8693 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8694 u32 rx_en, u32 tx_en)
8695 {
8696 if (rx_en && tx_en)
8697 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8698 else if (rx_en && !tx_en)
8699 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8700 else if (!rx_en && tx_en)
8701 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8702 else
8703 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8704
8705 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8706 }
8707
8708 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8709 u32 rx_en, u32 tx_en)
8710 {
8711 struct hclge_vport *vport = hclge_get_vport(handle);
8712 struct hclge_dev *hdev = vport->back;
8713 struct phy_device *phydev = hdev->hw.mac.phydev;
8714 u32 fc_autoneg;
8715
8716 if (phydev) {
8717 fc_autoneg = hclge_get_autoneg(handle);
8718 if (auto_neg != fc_autoneg) {
8719 dev_info(&hdev->pdev->dev,
8720 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8721 return -EOPNOTSUPP;
8722 }
8723 }
8724
8725 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8726 dev_info(&hdev->pdev->dev,
8727 "Priority flow control enabled. Cannot set link flow control.\n");
8728 return -EOPNOTSUPP;
8729 }
8730
8731 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8732
8733 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8734
8735 if (!auto_neg)
8736 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8737
8738 if (phydev)
8739 return phy_start_aneg(phydev);
8740
8741 return -EOPNOTSUPP;
8742 }
8743
8744 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8745 u8 *auto_neg, u32 *speed, u8 *duplex)
8746 {
8747 struct hclge_vport *vport = hclge_get_vport(handle);
8748 struct hclge_dev *hdev = vport->back;
8749
8750 if (speed)
8751 *speed = hdev->hw.mac.speed;
8752 if (duplex)
8753 *duplex = hdev->hw.mac.duplex;
8754 if (auto_neg)
8755 *auto_neg = hdev->hw.mac.autoneg;
8756 }
8757
8758 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8759 u8 *module_type)
8760 {
8761 struct hclge_vport *vport = hclge_get_vport(handle);
8762 struct hclge_dev *hdev = vport->back;
8763
8764 if (media_type)
8765 *media_type = hdev->hw.mac.media_type;
8766
8767 if (module_type)
8768 *module_type = hdev->hw.mac.module_type;
8769 }
8770
8771 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8772 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8773 {
8774 struct hclge_vport *vport = hclge_get_vport(handle);
8775 struct hclge_dev *hdev = vport->back;
8776 struct phy_device *phydev = hdev->hw.mac.phydev;
8777 int mdix_ctrl, mdix, is_resolved;
8778 unsigned int retval;
8779
8780 if (!phydev) {
8781 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8782 *tp_mdix = ETH_TP_MDI_INVALID;
8783 return;
8784 }
8785
8786 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8787
8788 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8789 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8790 HCLGE_PHY_MDIX_CTRL_S);
8791
8792 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8793 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8794 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8795
8796 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8797
8798 switch (mdix_ctrl) {
8799 case 0x0:
8800 *tp_mdix_ctrl = ETH_TP_MDI;
8801 break;
8802 case 0x1:
8803 *tp_mdix_ctrl = ETH_TP_MDI_X;
8804 break;
8805 case 0x3:
8806 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8807 break;
8808 default:
8809 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8810 break;
8811 }
8812
8813 if (!is_resolved)
8814 *tp_mdix = ETH_TP_MDI_INVALID;
8815 else if (mdix)
8816 *tp_mdix = ETH_TP_MDI_X;
8817 else
8818 *tp_mdix = ETH_TP_MDI;
8819 }
8820
8821 static void hclge_info_show(struct hclge_dev *hdev)
8822 {
8823 struct device *dev = &hdev->pdev->dev;
8824
8825 dev_info(dev, "PF info begin:\n");
8826
8827 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8828 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8829 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8830 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8831 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8832 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8833 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8834 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8835 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8836 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8837 dev_info(dev, "This is %s PF\n",
8838 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8839 dev_info(dev, "DCB %s\n",
8840 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8841 dev_info(dev, "MQPRIO %s\n",
8842 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8843
8844 dev_info(dev, "PF info end.\n");
8845 }
8846
8847 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8848 struct hclge_vport *vport)
8849 {
8850 struct hnae3_client *client = vport->nic.client;
8851 struct hclge_dev *hdev = ae_dev->priv;
8852 int rst_cnt;
8853 int ret;
8854
8855 rst_cnt = hdev->rst_stats.reset_cnt;
8856 ret = client->ops->init_instance(&vport->nic);
8857 if (ret)
8858 return ret;
8859
8860 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8861 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8862 rst_cnt != hdev->rst_stats.reset_cnt) {
8863 ret = -EBUSY;
8864 goto init_nic_err;
8865 }
8866
8867
8868 ret = hclge_config_nic_hw_error(hdev, true);
8869 if (ret) {
8870 dev_err(&ae_dev->pdev->dev,
8871 "fail(%d) to enable hw error interrupts\n", ret);
8872 goto init_nic_err;
8873 }
8874
8875 hnae3_set_client_init_flag(client, ae_dev, 1);
8876
8877 if (netif_msg_drv(&hdev->vport->nic))
8878 hclge_info_show(hdev);
8879
8880 return ret;
8881
8882 init_nic_err:
8883 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8884 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8885 msleep(HCLGE_WAIT_RESET_DONE);
8886
8887 client->ops->uninit_instance(&vport->nic, 0);
8888
8889 return ret;
8890 }
8891
8892 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8893 struct hclge_vport *vport)
8894 {
8895 struct hnae3_client *client = vport->roce.client;
8896 struct hclge_dev *hdev = ae_dev->priv;
8897 int rst_cnt;
8898 int ret;
8899
8900 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8901 !hdev->nic_client)
8902 return 0;
8903
8904 client = hdev->roce_client;
8905 ret = hclge_init_roce_base_info(vport);
8906 if (ret)
8907 return ret;
8908
8909 rst_cnt = hdev->rst_stats.reset_cnt;
8910 ret = client->ops->init_instance(&vport->roce);
8911 if (ret)
8912 return ret;
8913
8914 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8915 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8916 rst_cnt != hdev->rst_stats.reset_cnt) {
8917 ret = -EBUSY;
8918 goto init_roce_err;
8919 }
8920
8921
8922 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8923 if (ret) {
8924 dev_err(&ae_dev->pdev->dev,
8925 "fail(%d) to enable roce ras interrupts\n", ret);
8926 goto init_roce_err;
8927 }
8928
8929 hnae3_set_client_init_flag(client, ae_dev, 1);
8930
8931 return 0;
8932
8933 init_roce_err:
8934 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8935 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8936 msleep(HCLGE_WAIT_RESET_DONE);
8937
8938 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8939
8940 return ret;
8941 }
8942
8943 static int hclge_init_client_instance(struct hnae3_client *client,
8944 struct hnae3_ae_dev *ae_dev)
8945 {
8946 struct hclge_dev *hdev = ae_dev->priv;
8947 struct hclge_vport *vport;
8948 int i, ret;
8949
8950 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8951 vport = &hdev->vport[i];
8952
8953 switch (client->type) {
8954 case HNAE3_CLIENT_KNIC:
8955
8956 hdev->nic_client = client;
8957 vport->nic.client = client;
8958 ret = hclge_init_nic_client_instance(ae_dev, vport);
8959 if (ret)
8960 goto clear_nic;
8961
8962 ret = hclge_init_roce_client_instance(ae_dev, vport);
8963 if (ret)
8964 goto clear_roce;
8965
8966 break;
8967 case HNAE3_CLIENT_ROCE:
8968 if (hnae3_dev_roce_supported(hdev)) {
8969 hdev->roce_client = client;
8970 vport->roce.client = client;
8971 }
8972
8973 ret = hclge_init_roce_client_instance(ae_dev, vport);
8974 if (ret)
8975 goto clear_roce;
8976
8977 break;
8978 default:
8979 return -EINVAL;
8980 }
8981 }
8982
8983 return 0;
8984
8985 clear_nic:
8986 hdev->nic_client = NULL;
8987 vport->nic.client = NULL;
8988 return ret;
8989 clear_roce:
8990 hdev->roce_client = NULL;
8991 vport->roce.client = NULL;
8992 return ret;
8993 }
8994
8995 static void hclge_uninit_client_instance(struct hnae3_client *client,
8996 struct hnae3_ae_dev *ae_dev)
8997 {
8998 struct hclge_dev *hdev = ae_dev->priv;
8999 struct hclge_vport *vport;
9000 int i;
9001
9002 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9003 vport = &hdev->vport[i];
9004 if (hdev->roce_client) {
9005 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9006 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9007 msleep(HCLGE_WAIT_RESET_DONE);
9008
9009 hdev->roce_client->ops->uninit_instance(&vport->roce,
9010 0);
9011 hdev->roce_client = NULL;
9012 vport->roce.client = NULL;
9013 }
9014 if (client->type == HNAE3_CLIENT_ROCE)
9015 return;
9016 if (hdev->nic_client && client->ops->uninit_instance) {
9017 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9018 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9019 msleep(HCLGE_WAIT_RESET_DONE);
9020
9021 client->ops->uninit_instance(&vport->nic, 0);
9022 hdev->nic_client = NULL;
9023 vport->nic.client = NULL;
9024 }
9025 }
9026 }
9027
9028 static int hclge_pci_init(struct hclge_dev *hdev)
9029 {
9030 struct pci_dev *pdev = hdev->pdev;
9031 struct hclge_hw *hw;
9032 int ret;
9033
9034 ret = pci_enable_device(pdev);
9035 if (ret) {
9036 dev_err(&pdev->dev, "failed to enable PCI device\n");
9037 return ret;
9038 }
9039
9040 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9041 if (ret) {
9042 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9043 if (ret) {
9044 dev_err(&pdev->dev,
9045 "can't set consistent PCI DMA");
9046 goto err_disable_device;
9047 }
9048 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9049 }
9050
9051 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9052 if (ret) {
9053 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9054 goto err_disable_device;
9055 }
9056
9057 pci_set_master(pdev);
9058 hw = &hdev->hw;
9059 hw->io_base = pcim_iomap(pdev, 2, 0);
9060 if (!hw->io_base) {
9061 dev_err(&pdev->dev, "Can't map configuration register space\n");
9062 ret = -ENOMEM;
9063 goto err_clr_master;
9064 }
9065
9066 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9067
9068 return 0;
9069 err_clr_master:
9070 pci_clear_master(pdev);
9071 pci_release_regions(pdev);
9072 err_disable_device:
9073 pci_disable_device(pdev);
9074
9075 return ret;
9076 }
9077
9078 static void hclge_pci_uninit(struct hclge_dev *hdev)
9079 {
9080 struct pci_dev *pdev = hdev->pdev;
9081
9082 pcim_iounmap(pdev, hdev->hw.io_base);
9083 pci_free_irq_vectors(pdev);
9084 pci_clear_master(pdev);
9085 pci_release_mem_regions(pdev);
9086 pci_disable_device(pdev);
9087 }
9088
9089 static void hclge_state_init(struct hclge_dev *hdev)
9090 {
9091 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9092 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9093 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9094 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9095 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9096 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9097 }
9098
9099 static void hclge_state_uninit(struct hclge_dev *hdev)
9100 {
9101 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9102 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9103
9104 if (hdev->reset_timer.function)
9105 del_timer_sync(&hdev->reset_timer);
9106 if (hdev->service_task.work.func)
9107 cancel_delayed_work_sync(&hdev->service_task);
9108 if (hdev->rst_service_task.func)
9109 cancel_work_sync(&hdev->rst_service_task);
9110 if (hdev->mbx_service_task.func)
9111 cancel_work_sync(&hdev->mbx_service_task);
9112 }
9113
9114 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9115 {
9116 #define HCLGE_FLR_WAIT_MS 100
9117 #define HCLGE_FLR_WAIT_CNT 50
9118 struct hclge_dev *hdev = ae_dev->priv;
9119 int cnt = 0;
9120
9121 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9122 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9123 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9124 hclge_reset_event(hdev->pdev, NULL);
9125
9126 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9127 cnt++ < HCLGE_FLR_WAIT_CNT)
9128 msleep(HCLGE_FLR_WAIT_MS);
9129
9130 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9131 dev_err(&hdev->pdev->dev,
9132 "flr wait down timeout: %d\n", cnt);
9133 }
9134
9135 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9136 {
9137 struct hclge_dev *hdev = ae_dev->priv;
9138
9139 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9140 }
9141
9142 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9143 {
9144 u16 i;
9145
9146 for (i = 0; i < hdev->num_alloc_vport; i++) {
9147 struct hclge_vport *vport = &hdev->vport[i];
9148 int ret;
9149
9150
9151 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9152 if (ret)
9153 dev_warn(&hdev->pdev->dev,
9154 "clear vf(%d) rst failed %d!\n",
9155 vport->vport_id, ret);
9156 }
9157 }
9158
9159 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9160 {
9161 struct pci_dev *pdev = ae_dev->pdev;
9162 struct hclge_dev *hdev;
9163 int ret;
9164
9165 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9166 if (!hdev) {
9167 ret = -ENOMEM;
9168 goto out;
9169 }
9170
9171 hdev->pdev = pdev;
9172 hdev->ae_dev = ae_dev;
9173 hdev->reset_type = HNAE3_NONE_RESET;
9174 hdev->reset_level = HNAE3_FUNC_RESET;
9175 ae_dev->priv = hdev;
9176 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9177
9178 mutex_init(&hdev->vport_lock);
9179 mutex_init(&hdev->vport_cfg_mutex);
9180 spin_lock_init(&hdev->fd_rule_lock);
9181
9182 ret = hclge_pci_init(hdev);
9183 if (ret) {
9184 dev_err(&pdev->dev, "PCI init failed\n");
9185 goto out;
9186 }
9187
9188
9189 ret = hclge_cmd_queue_init(hdev);
9190 if (ret) {
9191 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9192 goto err_pci_uninit;
9193 }
9194
9195
9196 ret = hclge_cmd_init(hdev);
9197 if (ret)
9198 goto err_cmd_uninit;
9199
9200 ret = hclge_get_cap(hdev);
9201 if (ret) {
9202 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9203 ret);
9204 goto err_cmd_uninit;
9205 }
9206
9207 ret = hclge_configure(hdev);
9208 if (ret) {
9209 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9210 goto err_cmd_uninit;
9211 }
9212
9213 ret = hclge_init_msi(hdev);
9214 if (ret) {
9215 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9216 goto err_cmd_uninit;
9217 }
9218
9219 ret = hclge_misc_irq_init(hdev);
9220 if (ret) {
9221 dev_err(&pdev->dev,
9222 "Misc IRQ(vector0) init error, ret = %d.\n",
9223 ret);
9224 goto err_msi_uninit;
9225 }
9226
9227 ret = hclge_alloc_tqps(hdev);
9228 if (ret) {
9229 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9230 goto err_msi_irq_uninit;
9231 }
9232
9233 ret = hclge_alloc_vport(hdev);
9234 if (ret) {
9235 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9236 goto err_msi_irq_uninit;
9237 }
9238
9239 ret = hclge_map_tqp(hdev);
9240 if (ret) {
9241 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9242 goto err_msi_irq_uninit;
9243 }
9244
9245 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9246 ret = hclge_mac_mdio_config(hdev);
9247 if (ret) {
9248 dev_err(&hdev->pdev->dev,
9249 "mdio config fail ret=%d\n", ret);
9250 goto err_msi_irq_uninit;
9251 }
9252 }
9253
9254 ret = hclge_init_umv_space(hdev);
9255 if (ret) {
9256 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9257 goto err_mdiobus_unreg;
9258 }
9259
9260 ret = hclge_mac_init(hdev);
9261 if (ret) {
9262 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9263 goto err_mdiobus_unreg;
9264 }
9265
9266 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9267 if (ret) {
9268 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9269 goto err_mdiobus_unreg;
9270 }
9271
9272 ret = hclge_config_gro(hdev, true);
9273 if (ret)
9274 goto err_mdiobus_unreg;
9275
9276 ret = hclge_init_vlan_config(hdev);
9277 if (ret) {
9278 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9279 goto err_mdiobus_unreg;
9280 }
9281
9282 ret = hclge_tm_schd_init(hdev);
9283 if (ret) {
9284 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9285 goto err_mdiobus_unreg;
9286 }
9287
9288 hclge_rss_init_cfg(hdev);
9289 ret = hclge_rss_init_hw(hdev);
9290 if (ret) {
9291 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9292 goto err_mdiobus_unreg;
9293 }
9294
9295 ret = init_mgr_tbl(hdev);
9296 if (ret) {
9297 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9298 goto err_mdiobus_unreg;
9299 }
9300
9301 ret = hclge_init_fd_config(hdev);
9302 if (ret) {
9303 dev_err(&pdev->dev,
9304 "fd table init fail, ret=%d\n", ret);
9305 goto err_mdiobus_unreg;
9306 }
9307
9308 INIT_KFIFO(hdev->mac_tnl_log);
9309
9310 hclge_dcb_ops_set(hdev);
9311
9312 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9313 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9314 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9315 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9316
9317
9318
9319
9320 hclge_misc_affinity_setup(hdev);
9321
9322 hclge_clear_all_event_cause(hdev);
9323 hclge_clear_resetting_state(hdev);
9324
9325
9326 hclge_handle_all_hns_hw_errors(ae_dev);
9327
9328
9329
9330
9331 if (ae_dev->hw_err_reset_req) {
9332 enum hnae3_reset_type reset_level;
9333
9334 reset_level = hclge_get_reset_level(ae_dev,
9335 &ae_dev->hw_err_reset_req);
9336 hclge_set_def_reset_request(ae_dev, reset_level);
9337 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9338 }
9339
9340
9341 hclge_enable_vector(&hdev->misc_vector, true);
9342
9343 hclge_state_init(hdev);
9344 hdev->last_reset_time = jiffies;
9345
9346 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9347 HCLGE_DRIVER_NAME);
9348
9349 return 0;
9350
9351 err_mdiobus_unreg:
9352 if (hdev->hw.mac.phydev)
9353 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9354 err_msi_irq_uninit:
9355 hclge_misc_irq_uninit(hdev);
9356 err_msi_uninit:
9357 pci_free_irq_vectors(pdev);
9358 err_cmd_uninit:
9359 hclge_cmd_uninit(hdev);
9360 err_pci_uninit:
9361 pcim_iounmap(pdev, hdev->hw.io_base);
9362 pci_clear_master(pdev);
9363 pci_release_regions(pdev);
9364 pci_disable_device(pdev);
9365 out:
9366 return ret;
9367 }
9368
9369 static void hclge_stats_clear(struct hclge_dev *hdev)
9370 {
9371 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9372 }
9373
9374 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9375 {
9376 struct hclge_vport *vport = hdev->vport;
9377 int i;
9378
9379 for (i = 0; i < hdev->num_alloc_vport; i++) {
9380 hclge_vport_stop(vport);
9381 vport++;
9382 }
9383 }
9384
9385 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9386 {
9387 struct hclge_dev *hdev = ae_dev->priv;
9388 struct pci_dev *pdev = ae_dev->pdev;
9389 int ret;
9390
9391 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9392
9393 hclge_stats_clear(hdev);
9394 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9395 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9396
9397 ret = hclge_cmd_init(hdev);
9398 if (ret) {
9399 dev_err(&pdev->dev, "Cmd queue init failed\n");
9400 return ret;
9401 }
9402
9403 ret = hclge_map_tqp(hdev);
9404 if (ret) {
9405 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9406 return ret;
9407 }
9408
9409 hclge_reset_umv_space(hdev);
9410
9411 ret = hclge_mac_init(hdev);
9412 if (ret) {
9413 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9414 return ret;
9415 }
9416
9417 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9418 if (ret) {
9419 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9420 return ret;
9421 }
9422
9423 ret = hclge_config_gro(hdev, true);
9424 if (ret)
9425 return ret;
9426
9427 ret = hclge_init_vlan_config(hdev);
9428 if (ret) {
9429 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9430 return ret;
9431 }
9432
9433 ret = hclge_tm_init_hw(hdev, true);
9434 if (ret) {
9435 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9436 return ret;
9437 }
9438
9439 ret = hclge_rss_init_hw(hdev);
9440 if (ret) {
9441 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9442 return ret;
9443 }
9444
9445 ret = init_mgr_tbl(hdev);
9446 if (ret) {
9447 dev_err(&pdev->dev,
9448 "failed to reinit manager table, ret = %d\n", ret);
9449 return ret;
9450 }
9451
9452 ret = hclge_init_fd_config(hdev);
9453 if (ret) {
9454 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9455 return ret;
9456 }
9457
9458
9459 hclge_handle_all_hns_hw_errors(ae_dev);
9460
9461
9462
9463
9464 ret = hclge_config_nic_hw_error(hdev, true);
9465 if (ret) {
9466 dev_err(&pdev->dev,
9467 "fail(%d) to re-enable NIC hw error interrupts\n",
9468 ret);
9469 return ret;
9470 }
9471
9472 if (hdev->roce_client) {
9473 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9474 if (ret) {
9475 dev_err(&pdev->dev,
9476 "fail(%d) to re-enable roce ras interrupts\n",
9477 ret);
9478 return ret;
9479 }
9480 }
9481
9482 hclge_reset_vport_state(hdev);
9483
9484 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9485 HCLGE_DRIVER_NAME);
9486
9487 return 0;
9488 }
9489
9490 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9491 {
9492 struct hclge_dev *hdev = ae_dev->priv;
9493 struct hclge_mac *mac = &hdev->hw.mac;
9494
9495 hclge_misc_affinity_teardown(hdev);
9496 hclge_state_uninit(hdev);
9497
9498 if (mac->phydev)
9499 mdiobus_unregister(mac->mdio_bus);
9500
9501 hclge_uninit_umv_space(hdev);
9502
9503
9504 hclge_enable_vector(&hdev->misc_vector, false);
9505 synchronize_irq(hdev->misc_vector.vector_irq);
9506
9507
9508 hclge_config_mac_tnl_int(hdev, false);
9509 hclge_config_nic_hw_error(hdev, false);
9510 hclge_config_rocee_ras_interrupt(hdev, false);
9511
9512 hclge_cmd_uninit(hdev);
9513 hclge_misc_irq_uninit(hdev);
9514 hclge_pci_uninit(hdev);
9515 mutex_destroy(&hdev->vport_lock);
9516 hclge_uninit_vport_mac_table(hdev);
9517 hclge_uninit_vport_vlan_table(hdev);
9518 mutex_destroy(&hdev->vport_cfg_mutex);
9519 ae_dev->priv = NULL;
9520 }
9521
9522 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9523 {
9524 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9525 struct hclge_vport *vport = hclge_get_vport(handle);
9526 struct hclge_dev *hdev = vport->back;
9527
9528 return min_t(u32, hdev->rss_size_max,
9529 vport->alloc_tqps / kinfo->num_tc);
9530 }
9531
9532 static void hclge_get_channels(struct hnae3_handle *handle,
9533 struct ethtool_channels *ch)
9534 {
9535 ch->max_combined = hclge_get_max_channels(handle);
9536 ch->other_count = 1;
9537 ch->max_other = 1;
9538 ch->combined_count = handle->kinfo.rss_size;
9539 }
9540
9541 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9542 u16 *alloc_tqps, u16 *max_rss_size)
9543 {
9544 struct hclge_vport *vport = hclge_get_vport(handle);
9545 struct hclge_dev *hdev = vport->back;
9546
9547 *alloc_tqps = vport->alloc_tqps;
9548 *max_rss_size = hdev->rss_size_max;
9549 }
9550
9551 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9552 bool rxfh_configured)
9553 {
9554 struct hclge_vport *vport = hclge_get_vport(handle);
9555 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9556 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9557 struct hclge_dev *hdev = vport->back;
9558 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9559 int cur_rss_size = kinfo->rss_size;
9560 int cur_tqps = kinfo->num_tqps;
9561 u16 tc_valid[HCLGE_MAX_TC_NUM];
9562 u16 roundup_size;
9563 u32 *rss_indir;
9564 unsigned int i;
9565 int ret;
9566
9567 kinfo->req_rss_size = new_tqps_num;
9568
9569 ret = hclge_tm_vport_map_update(hdev);
9570 if (ret) {
9571 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9572 return ret;
9573 }
9574
9575 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9576 roundup_size = ilog2(roundup_size);
9577
9578 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9579 tc_valid[i] = 0;
9580
9581 if (!(hdev->hw_tc_map & BIT(i)))
9582 continue;
9583
9584 tc_valid[i] = 1;
9585 tc_size[i] = roundup_size;
9586 tc_offset[i] = kinfo->rss_size * i;
9587 }
9588 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9589 if (ret)
9590 return ret;
9591
9592
9593 if (rxfh_configured)
9594 goto out;
9595
9596
9597 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9598 if (!rss_indir)
9599 return -ENOMEM;
9600
9601 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9602 rss_indir[i] = i % kinfo->rss_size;
9603
9604 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9605 if (ret)
9606 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9607 ret);
9608
9609 kfree(rss_indir);
9610
9611 out:
9612 if (!ret)
9613 dev_info(&hdev->pdev->dev,
9614 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9615 cur_rss_size, kinfo->rss_size,
9616 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9617
9618 return ret;
9619 }
9620
9621 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9622 u32 *regs_num_64_bit)
9623 {
9624 struct hclge_desc desc;
9625 u32 total_num;
9626 int ret;
9627
9628 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9629 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9630 if (ret) {
9631 dev_err(&hdev->pdev->dev,
9632 "Query register number cmd failed, ret = %d.\n", ret);
9633 return ret;
9634 }
9635
9636 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9637 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9638
9639 total_num = *regs_num_32_bit + *regs_num_64_bit;
9640 if (!total_num)
9641 return -EINVAL;
9642
9643 return 0;
9644 }
9645
9646 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9647 void *data)
9648 {
9649 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9650 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9651
9652 struct hclge_desc *desc;
9653 u32 *reg_val = data;
9654 __le32 *desc_data;
9655 int nodata_num;
9656 int cmd_num;
9657 int i, k, n;
9658 int ret;
9659
9660 if (regs_num == 0)
9661 return 0;
9662
9663 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9664 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9665 HCLGE_32_BIT_REG_RTN_DATANUM);
9666 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9667 if (!desc)
9668 return -ENOMEM;
9669
9670 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9671 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9672 if (ret) {
9673 dev_err(&hdev->pdev->dev,
9674 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9675 kfree(desc);
9676 return ret;
9677 }
9678
9679 for (i = 0; i < cmd_num; i++) {
9680 if (i == 0) {
9681 desc_data = (__le32 *)(&desc[i].data[0]);
9682 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9683 } else {
9684 desc_data = (__le32 *)(&desc[i]);
9685 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9686 }
9687 for (k = 0; k < n; k++) {
9688 *reg_val++ = le32_to_cpu(*desc_data++);
9689
9690 regs_num--;
9691 if (!regs_num)
9692 break;
9693 }
9694 }
9695
9696 kfree(desc);
9697 return 0;
9698 }
9699
9700 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9701 void *data)
9702 {
9703 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9704 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9705
9706 struct hclge_desc *desc;
9707 u64 *reg_val = data;
9708 __le64 *desc_data;
9709 int nodata_len;
9710 int cmd_num;
9711 int i, k, n;
9712 int ret;
9713
9714 if (regs_num == 0)
9715 return 0;
9716
9717 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9718 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9719 HCLGE_64_BIT_REG_RTN_DATANUM);
9720 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9721 if (!desc)
9722 return -ENOMEM;
9723
9724 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9725 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9726 if (ret) {
9727 dev_err(&hdev->pdev->dev,
9728 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9729 kfree(desc);
9730 return ret;
9731 }
9732
9733 for (i = 0; i < cmd_num; i++) {
9734 if (i == 0) {
9735 desc_data = (__le64 *)(&desc[i].data[0]);
9736 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9737 } else {
9738 desc_data = (__le64 *)(&desc[i]);
9739 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9740 }
9741 for (k = 0; k < n; k++) {
9742 *reg_val++ = le64_to_cpu(*desc_data++);
9743
9744 regs_num--;
9745 if (!regs_num)
9746 break;
9747 }
9748 }
9749
9750 kfree(desc);
9751 return 0;
9752 }
9753
9754 #define MAX_SEPARATE_NUM 4
9755 #define SEPARATOR_VALUE 0xFDFCFBFA
9756 #define REG_NUM_PER_LINE 4
9757 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9758 #define REG_SEPARATOR_LINE 1
9759 #define REG_NUM_REMAIN_MASK 3
9760 #define BD_LIST_MAX_NUM 30
9761
9762 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9763 {
9764
9765 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9766 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9767 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9768 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9769 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9770 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9771 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9772
9773 return hclge_cmd_send(&hdev->hw, desc, 4);
9774 }
9775
9776 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9777 int *bd_num_list,
9778 u32 type_num)
9779 {
9780 #define HCLGE_DFX_REG_BD_NUM 4
9781
9782 u32 entries_per_desc, desc_index, index, offset, i;
9783 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9784 int ret;
9785
9786 ret = hclge_query_bd_num_cmd_send(hdev, desc);
9787 if (ret) {
9788 dev_err(&hdev->pdev->dev,
9789 "Get dfx bd num fail, status is %d.\n", ret);
9790 return ret;
9791 }
9792
9793 entries_per_desc = ARRAY_SIZE(desc[0].data);
9794 for (i = 0; i < type_num; i++) {
9795 offset = hclge_dfx_bd_offset_list[i];
9796 index = offset % entries_per_desc;
9797 desc_index = offset / entries_per_desc;
9798 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9799 }
9800
9801 return ret;
9802 }
9803
9804 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9805 struct hclge_desc *desc_src, int bd_num,
9806 enum hclge_opcode_type cmd)
9807 {
9808 struct hclge_desc *desc = desc_src;
9809 int i, ret;
9810
9811 hclge_cmd_setup_basic_desc(desc, cmd, true);
9812 for (i = 0; i < bd_num - 1; i++) {
9813 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9814 desc++;
9815 hclge_cmd_setup_basic_desc(desc, cmd, true);
9816 }
9817
9818 desc = desc_src;
9819 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9820 if (ret)
9821 dev_err(&hdev->pdev->dev,
9822 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9823 cmd, ret);
9824
9825 return ret;
9826 }
9827
9828 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9829 void *data)
9830 {
9831 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9832 struct hclge_desc *desc = desc_src;
9833 u32 *reg = data;
9834
9835 entries_per_desc = ARRAY_SIZE(desc->data);
9836 reg_num = entries_per_desc * bd_num;
9837 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9838 for (i = 0; i < reg_num; i++) {
9839 index = i % entries_per_desc;
9840 desc_index = i / entries_per_desc;
9841 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
9842 }
9843 for (i = 0; i < separator_num; i++)
9844 *reg++ = SEPARATOR_VALUE;
9845
9846 return reg_num + separator_num;
9847 }
9848
9849 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9850 {
9851 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9852 int data_len_per_desc, data_len, bd_num, i;
9853 int bd_num_list[BD_LIST_MAX_NUM];
9854 int ret;
9855
9856 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9857 if (ret) {
9858 dev_err(&hdev->pdev->dev,
9859 "Get dfx reg bd num fail, status is %d.\n", ret);
9860 return ret;
9861 }
9862
9863 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9864 *len = 0;
9865 for (i = 0; i < dfx_reg_type_num; i++) {
9866 bd_num = bd_num_list[i];
9867 data_len = data_len_per_desc * bd_num;
9868 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9869 }
9870
9871 return ret;
9872 }
9873
9874 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9875 {
9876 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9877 int bd_num, bd_num_max, buf_len, i;
9878 int bd_num_list[BD_LIST_MAX_NUM];
9879 struct hclge_desc *desc_src;
9880 u32 *reg = data;
9881 int ret;
9882
9883 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9884 if (ret) {
9885 dev_err(&hdev->pdev->dev,
9886 "Get dfx reg bd num fail, status is %d.\n", ret);
9887 return ret;
9888 }
9889
9890 bd_num_max = bd_num_list[0];
9891 for (i = 1; i < dfx_reg_type_num; i++)
9892 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9893
9894 buf_len = sizeof(*desc_src) * bd_num_max;
9895 desc_src = kzalloc(buf_len, GFP_KERNEL);
9896 if (!desc_src) {
9897 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9898 return -ENOMEM;
9899 }
9900
9901 for (i = 0; i < dfx_reg_type_num; i++) {
9902 bd_num = bd_num_list[i];
9903 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9904 hclge_dfx_reg_opcode_list[i]);
9905 if (ret) {
9906 dev_err(&hdev->pdev->dev,
9907 "Get dfx reg fail, status is %d.\n", ret);
9908 break;
9909 }
9910
9911 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9912 }
9913
9914 kfree(desc_src);
9915 return ret;
9916 }
9917
9918 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9919 struct hnae3_knic_private_info *kinfo)
9920 {
9921 #define HCLGE_RING_REG_OFFSET 0x200
9922 #define HCLGE_RING_INT_REG_OFFSET 0x4
9923
9924 int i, j, reg_num, separator_num;
9925 int data_num_sum;
9926 u32 *reg = data;
9927
9928
9929 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9930 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9931 for (i = 0; i < reg_num; i++)
9932 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9933 for (i = 0; i < separator_num; i++)
9934 *reg++ = SEPARATOR_VALUE;
9935 data_num_sum = reg_num + separator_num;
9936
9937 reg_num = ARRAY_SIZE(common_reg_addr_list);
9938 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9939 for (i = 0; i < reg_num; i++)
9940 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9941 for (i = 0; i < separator_num; i++)
9942 *reg++ = SEPARATOR_VALUE;
9943 data_num_sum += reg_num + separator_num;
9944
9945 reg_num = ARRAY_SIZE(ring_reg_addr_list);
9946 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9947 for (j = 0; j < kinfo->num_tqps; j++) {
9948 for (i = 0; i < reg_num; i++)
9949 *reg++ = hclge_read_dev(&hdev->hw,
9950 ring_reg_addr_list[i] +
9951 HCLGE_RING_REG_OFFSET * j);
9952 for (i = 0; i < separator_num; i++)
9953 *reg++ = SEPARATOR_VALUE;
9954 }
9955 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9956
9957 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9958 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9959 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9960 for (i = 0; i < reg_num; i++)
9961 *reg++ = hclge_read_dev(&hdev->hw,
9962 tqp_intr_reg_addr_list[i] +
9963 HCLGE_RING_INT_REG_OFFSET * j);
9964 for (i = 0; i < separator_num; i++)
9965 *reg++ = SEPARATOR_VALUE;
9966 }
9967 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9968
9969 return data_num_sum;
9970 }
9971
9972 static int hclge_get_regs_len(struct hnae3_handle *handle)
9973 {
9974 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9975 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9976 struct hclge_vport *vport = hclge_get_vport(handle);
9977 struct hclge_dev *hdev = vport->back;
9978 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9979 int regs_lines_32_bit, regs_lines_64_bit;
9980 int ret;
9981
9982 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9983 if (ret) {
9984 dev_err(&hdev->pdev->dev,
9985 "Get register number failed, ret = %d.\n", ret);
9986 return ret;
9987 }
9988
9989 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9990 if (ret) {
9991 dev_err(&hdev->pdev->dev,
9992 "Get dfx reg len failed, ret = %d.\n", ret);
9993 return ret;
9994 }
9995
9996 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9997 REG_SEPARATOR_LINE;
9998 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9999 REG_SEPARATOR_LINE;
10000 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10001 REG_SEPARATOR_LINE;
10002 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10003 REG_SEPARATOR_LINE;
10004 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10005 REG_SEPARATOR_LINE;
10006 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10007 REG_SEPARATOR_LINE;
10008
10009 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10010 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10011 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10012 }
10013
10014 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10015 void *data)
10016 {
10017 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10018 struct hclge_vport *vport = hclge_get_vport(handle);
10019 struct hclge_dev *hdev = vport->back;
10020 u32 regs_num_32_bit, regs_num_64_bit;
10021 int i, reg_num, separator_num, ret;
10022 u32 *reg = data;
10023
10024 *version = hdev->fw_version;
10025
10026 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10027 if (ret) {
10028 dev_err(&hdev->pdev->dev,
10029 "Get register number failed, ret = %d.\n", ret);
10030 return;
10031 }
10032
10033 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10034
10035 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10036 if (ret) {
10037 dev_err(&hdev->pdev->dev,
10038 "Get 32 bit register failed, ret = %d.\n", ret);
10039 return;
10040 }
10041 reg_num = regs_num_32_bit;
10042 reg += reg_num;
10043 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10044 for (i = 0; i < separator_num; i++)
10045 *reg++ = SEPARATOR_VALUE;
10046
10047 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10048 if (ret) {
10049 dev_err(&hdev->pdev->dev,
10050 "Get 64 bit register failed, ret = %d.\n", ret);
10051 return;
10052 }
10053 reg_num = regs_num_64_bit * 2;
10054 reg += reg_num;
10055 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10056 for (i = 0; i < separator_num; i++)
10057 *reg++ = SEPARATOR_VALUE;
10058
10059 ret = hclge_get_dfx_reg(hdev, reg);
10060 if (ret)
10061 dev_err(&hdev->pdev->dev,
10062 "Get dfx register failed, ret = %d.\n", ret);
10063 }
10064
10065 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10066 {
10067 struct hclge_set_led_state_cmd *req;
10068 struct hclge_desc desc;
10069 int ret;
10070
10071 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10072
10073 req = (struct hclge_set_led_state_cmd *)desc.data;
10074 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10075 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10076
10077 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10078 if (ret)
10079 dev_err(&hdev->pdev->dev,
10080 "Send set led state cmd error, ret =%d\n", ret);
10081
10082 return ret;
10083 }
10084
10085 enum hclge_led_status {
10086 HCLGE_LED_OFF,
10087 HCLGE_LED_ON,
10088 HCLGE_LED_NO_CHANGE = 0xFF,
10089 };
10090
10091 static int hclge_set_led_id(struct hnae3_handle *handle,
10092 enum ethtool_phys_id_state status)
10093 {
10094 struct hclge_vport *vport = hclge_get_vport(handle);
10095 struct hclge_dev *hdev = vport->back;
10096
10097 switch (status) {
10098 case ETHTOOL_ID_ACTIVE:
10099 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10100 case ETHTOOL_ID_INACTIVE:
10101 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10102 default:
10103 return -EINVAL;
10104 }
10105 }
10106
10107 static void hclge_get_link_mode(struct hnae3_handle *handle,
10108 unsigned long *supported,
10109 unsigned long *advertising)
10110 {
10111 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10112 struct hclge_vport *vport = hclge_get_vport(handle);
10113 struct hclge_dev *hdev = vport->back;
10114 unsigned int idx = 0;
10115
10116 for (; idx < size; idx++) {
10117 supported[idx] = hdev->hw.mac.supported[idx];
10118 advertising[idx] = hdev->hw.mac.advertising[idx];
10119 }
10120 }
10121
10122 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10123 {
10124 struct hclge_vport *vport = hclge_get_vport(handle);
10125 struct hclge_dev *hdev = vport->back;
10126
10127 return hclge_config_gro(hdev, enable);
10128 }
10129
10130 static const struct hnae3_ae_ops hclge_ops = {
10131 .init_ae_dev = hclge_init_ae_dev,
10132 .uninit_ae_dev = hclge_uninit_ae_dev,
10133 .flr_prepare = hclge_flr_prepare,
10134 .flr_done = hclge_flr_done,
10135 .init_client_instance = hclge_init_client_instance,
10136 .uninit_client_instance = hclge_uninit_client_instance,
10137 .map_ring_to_vector = hclge_map_ring_to_vector,
10138 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10139 .get_vector = hclge_get_vector,
10140 .put_vector = hclge_put_vector,
10141 .set_promisc_mode = hclge_set_promisc_mode,
10142 .set_loopback = hclge_set_loopback,
10143 .start = hclge_ae_start,
10144 .stop = hclge_ae_stop,
10145 .client_start = hclge_client_start,
10146 .client_stop = hclge_client_stop,
10147 .get_status = hclge_get_status,
10148 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10149 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10150 .get_media_type = hclge_get_media_type,
10151 .check_port_speed = hclge_check_port_speed,
10152 .get_fec = hclge_get_fec,
10153 .set_fec = hclge_set_fec,
10154 .get_rss_key_size = hclge_get_rss_key_size,
10155 .get_rss_indir_size = hclge_get_rss_indir_size,
10156 .get_rss = hclge_get_rss,
10157 .set_rss = hclge_set_rss,
10158 .set_rss_tuple = hclge_set_rss_tuple,
10159 .get_rss_tuple = hclge_get_rss_tuple,
10160 .get_tc_size = hclge_get_tc_size,
10161 .get_mac_addr = hclge_get_mac_addr,
10162 .set_mac_addr = hclge_set_mac_addr,
10163 .do_ioctl = hclge_do_ioctl,
10164 .add_uc_addr = hclge_add_uc_addr,
10165 .rm_uc_addr = hclge_rm_uc_addr,
10166 .add_mc_addr = hclge_add_mc_addr,
10167 .rm_mc_addr = hclge_rm_mc_addr,
10168 .set_autoneg = hclge_set_autoneg,
10169 .get_autoneg = hclge_get_autoneg,
10170 .restart_autoneg = hclge_restart_autoneg,
10171 .halt_autoneg = hclge_halt_autoneg,
10172 .get_pauseparam = hclge_get_pauseparam,
10173 .set_pauseparam = hclge_set_pauseparam,
10174 .set_mtu = hclge_set_mtu,
10175 .reset_queue = hclge_reset_tqp,
10176 .get_stats = hclge_get_stats,
10177 .get_mac_stats = hclge_get_mac_stat,
10178 .update_stats = hclge_update_stats,
10179 .get_strings = hclge_get_strings,
10180 .get_sset_count = hclge_get_sset_count,
10181 .get_fw_version = hclge_get_fw_version,
10182 .get_mdix_mode = hclge_get_mdix_mode,
10183 .enable_vlan_filter = hclge_enable_vlan_filter,
10184 .set_vlan_filter = hclge_set_vlan_filter,
10185 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10186 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10187 .reset_event = hclge_reset_event,
10188 .get_reset_level = hclge_get_reset_level,
10189 .set_default_reset_request = hclge_set_def_reset_request,
10190 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10191 .set_channels = hclge_set_channels,
10192 .get_channels = hclge_get_channels,
10193 .get_regs_len = hclge_get_regs_len,
10194 .get_regs = hclge_get_regs,
10195 .set_led_id = hclge_set_led_id,
10196 .get_link_mode = hclge_get_link_mode,
10197 .add_fd_entry = hclge_add_fd_entry,
10198 .del_fd_entry = hclge_del_fd_entry,
10199 .del_all_fd_entries = hclge_del_all_fd_entries,
10200 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10201 .get_fd_rule_info = hclge_get_fd_rule_info,
10202 .get_fd_all_rules = hclge_get_all_rules,
10203 .restore_fd_rules = hclge_restore_fd_entries,
10204 .enable_fd = hclge_enable_fd,
10205 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10206 .dbg_run_cmd = hclge_dbg_run_cmd,
10207 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10208 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10209 .ae_dev_resetting = hclge_ae_dev_resetting,
10210 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10211 .set_gro_en = hclge_gro_en,
10212 .get_global_queue_id = hclge_covert_handle_qid_global,
10213 .set_timer_task = hclge_set_timer_task,
10214 .mac_connect_phy = hclge_mac_connect_phy,
10215 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10216 .restore_vlan_table = hclge_restore_vlan_table,
10217 };
10218
10219 static struct hnae3_ae_algo ae_algo = {
10220 .ops = &hclge_ops,
10221 .pdev_id_table = ae_algo_pci_tbl,
10222 };
10223
10224 static int hclge_init(void)
10225 {
10226 pr_info("%s is initializing\n", HCLGE_NAME);
10227
10228 hnae3_register_ae_algo(&ae_algo);
10229
10230 return 0;
10231 }
10232
10233 static void hclge_exit(void)
10234 {
10235 hnae3_unregister_ae_algo(&ae_algo);
10236 }
10237 module_init(hclge_init);
10238 module_exit(hclge_exit);
10239
10240 MODULE_LICENSE("GPL");
10241 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10242 MODULE_DESCRIPTION("HCLGE Driver");
10243 MODULE_VERSION(HCLGE_MOD_VERSION);