This source file includes following definitions.
- __storm_memset_dma_mapping
- storm_memset_spq_addr
- storm_memset_vf_to_pf
- storm_memset_func_en
- storm_memset_eq_data
- storm_memset_eq_prod
- bnx2x_reg_wr_ind
- bnx2x_reg_rd_ind
- bnx2x_dp_dmae
- bnx2x_post_dmae
- bnx2x_dmae_opcode_add_comp
- bnx2x_dmae_opcode_clr_src_reset
- bnx2x_dmae_opcode
- bnx2x_prep_dmae_with_comp
- bnx2x_issue_dmae_with_comp
- bnx2x_write_dmae
- bnx2x_read_dmae
- bnx2x_write_dmae_phys_len
- bnx2x_get_assert_list_entry
- bnx2x_mc_assert
- bnx2x_fw_dump_lvl
- bnx2x_fw_dump
- bnx2x_hc_int_disable
- bnx2x_igu_int_disable
- bnx2x_int_disable
- bnx2x_panic_dump
- bnx2x_pbf_pN_buf_flushed
- bnx2x_pbf_pN_cmd_flushed
- bnx2x_flr_clnup_reg_poll
- bnx2x_flr_clnup_poll_hw_counter
- bnx2x_flr_clnup_poll_count
- bnx2x_tx_hw_flushed
- bnx2x_send_final_clnup
- bnx2x_is_pcie_pending
- bnx2x_poll_hw_usage_counters
- bnx2x_hw_enable_status
- bnx2x_pf_flr_clnup
- bnx2x_hc_int_enable
- bnx2x_igu_int_enable
- bnx2x_int_enable
- bnx2x_int_disable_sync
- bnx2x_trylock_hw_lock
- bnx2x_get_leader_lock_resource
- bnx2x_trylock_leader_lock
- bnx2x_schedule_sp_task
- bnx2x_sp_event
- bnx2x_interrupt
- bnx2x_acquire_hw_lock
- bnx2x_release_leader_lock
- bnx2x_release_hw_lock
- bnx2x_get_gpio
- bnx2x_set_gpio
- bnx2x_set_mult_gpio
- bnx2x_set_gpio_int
- bnx2x_set_spio
- bnx2x_calc_fc_adv
- bnx2x_set_requested_fc
- bnx2x_init_dropless_fc
- bnx2x_initial_phy_init
- bnx2x_link_set
- bnx2x__link_reset
- bnx2x_force_link_reset
- bnx2x_link_test
- bnx2x_calc_vn_min
- bnx2x_calc_vn_max
- bnx2x_get_cmng_fns_mode
- bnx2x_read_mf_cfg
- bnx2x_cmng_fns_init
- storm_memset_cmng
- bnx2x_set_local_cmng
- bnx2x_link_attn
- bnx2x__link_status_update
- bnx2x_afex_func_update
- bnx2x_afex_handle_vif_list_cmd
- bnx2x_handle_afex_cmd
- bnx2x_handle_update_svid_cmd
- bnx2x_pmf_update
- bnx2x_fw_command
- storm_memset_func_cfg
- bnx2x_func_init
- bnx2x_get_common_flags
- bnx2x_get_q_flags
- bnx2x_pf_q_prep_general
- bnx2x_pf_rx_q_prep
- bnx2x_pf_tx_q_prep
- bnx2x_pf_init
- bnx2x_e1h_disable
- bnx2x_e1h_enable
- bnx2x_drv_info_ether_stat
- bnx2x_drv_info_fcoe_stat
- bnx2x_drv_info_iscsi_stat
- bnx2x_config_mf_bw
- bnx2x_set_mf_bw
- bnx2x_handle_eee_event
- bnx2x_handle_drv_info_req
- bnx2x_update_mng_version_utility
- bnx2x_update_mng_version
- bnx2x_update_mfw_dump
- bnx2x_oem_event
- bnx2x_sp_get_next
- bnx2x_sp_prod_update
- bnx2x_is_contextless_ramrod
- bnx2x_sp_post
- bnx2x_acquire_alr
- bnx2x_release_alr
- bnx2x_update_dsb_idx
- bnx2x_attn_int_asserted
- bnx2x_fan_failure
- bnx2x_attn_int_deasserted0
- bnx2x_attn_int_deasserted1
- bnx2x_attn_int_deasserted2
- bnx2x_attn_int_deasserted3
- bnx2x_set_reset_global
- bnx2x_clear_reset_global
- bnx2x_reset_is_global
- bnx2x_set_reset_done
- bnx2x_set_reset_in_progress
- bnx2x_reset_is_done
- bnx2x_set_pf_load
- bnx2x_clear_pf_load
- bnx2x_get_load_status
- _print_parity
- _print_next_block
- bnx2x_check_blocks_with_parity0
- bnx2x_check_blocks_with_parity1
- bnx2x_check_blocks_with_parity2
- bnx2x_check_blocks_with_parity3
- bnx2x_check_blocks_with_parity4
- bnx2x_parity_attn
- bnx2x_chk_parity_attn
- bnx2x_attn_int_deasserted4
- bnx2x_attn_int_deasserted
- bnx2x_attn_int
- bnx2x_igu_ack_sb
- bnx2x_update_eq_prod
- bnx2x_cnic_handle_cfc_del
- bnx2x_handle_mcast_eqe
- bnx2x_handle_classification_eqe
- bnx2x_handle_rx_mode_eqe
- bnx2x_after_afex_vif_lists
- bnx2x_after_function_update
- bnx2x_cid_to_q_obj
- bnx2x_eq_int
- bnx2x_sp_task
- bnx2x_msix_sp_int
- bnx2x_drv_pulse
- bnx2x_timer
- bnx2x_fill
- bnx2x_wr_fp_sb_data
- bnx2x_zero_fp_sb
- bnx2x_wr_sp_sb_data
- bnx2x_zero_sp_sb
- bnx2x_setup_ndsb_state_machine
- bnx2x_map_sb_state_machines
- bnx2x_init_sb
- bnx2x_update_coalesce_sb
- bnx2x_init_def_sb
- bnx2x_update_coalesce
- bnx2x_init_sp_ring
- bnx2x_init_eq_ring
- bnx2x_set_q_rx_mode
- bnx2x_fill_accept_flags
- bnx2x_set_storm_rx_mode
- bnx2x_init_internal_common
- bnx2x_init_internal
- bnx2x_fp_igu_sb_id
- bnx2x_fp_fw_sb_id
- bnx2x_fp_cl_id
- bnx2x_init_eth_fp
- bnx2x_init_tx_ring_one
- bnx2x_init_tx_rings_cnic
- bnx2x_init_tx_rings
- bnx2x_init_fcoe_fp
- bnx2x_nic_init_cnic
- bnx2x_pre_irq_nic_init
- bnx2x_post_irq_nic_init
- bnx2x_gunzip_init
- bnx2x_gunzip_end
- bnx2x_gunzip
- bnx2x_lb_pckt
- bnx2x_int_mem_test
- bnx2x_enable_blocks_attention
- bnx2x_reset_common
- bnx2x_setup_dmae
- bnx2x_init_pxp
- bnx2x_setup_fan_failure_detection
- bnx2x_pf_disable
- bnx2x__common_init_phy
- bnx2x_config_endianity
- bnx2x_set_endianity
- bnx2x_reset_endianity
- bnx2x_init_hw_common
- bnx2x_init_hw_common_chip
- bnx2x_init_hw_port
- bnx2x_ilt_wr
- bnx2x_igu_clear_sb_gen
- bnx2x_igu_clear_sb
- bnx2x_clear_func_ilt
- bnx2x_init_searcher
- bnx2x_func_switch_update
- bnx2x_reset_nic_mode
- bnx2x_init_hw_func_cnic
- bnx2x_clean_pglue_errors
- bnx2x_init_hw_func
- bnx2x_free_mem_cnic
- bnx2x_free_mem
- bnx2x_alloc_mem_cnic
- bnx2x_alloc_mem
- bnx2x_set_mac_one
- bnx2x_set_vlan_one
- bnx2x_clear_vlan_info
- bnx2x_del_all_vlans
- bnx2x_del_all_macs
- bnx2x_set_eth_mac
- bnx2x_setup_leading
- bnx2x_set_int_mode
- bnx2x_cid_ilt_lines
- bnx2x_ilt_set_info
- bnx2x_pf_q_prep_init
- bnx2x_setup_tx_only
- bnx2x_setup_queue
- bnx2x_stop_queue
- bnx2x_reset_func
- bnx2x_reset_port
- bnx2x_reset_hw
- bnx2x_func_stop
- bnx2x_send_unload_req
- bnx2x_send_unload_done
- bnx2x_func_wait_started
- bnx2x_disable_ptp
- bnx2x_stop_ptp
- bnx2x_chip_cleanup
- bnx2x_disable_close_the_gate
- bnx2x_set_234_gates
- bnx2x_clp_reset_prep
- bnx2x_clp_reset_done
- bnx2x_reset_mcp_prep
- bnx2x_mcp_wait_one
- bnx2x_init_shmem
- bnx2x_reset_mcp_comp
- bnx2x_pxp_prep
- bnx2x_process_kill_chip_reset
- bnx2x_er_poll_igu_vq
- bnx2x_process_kill
- bnx2x_leader_reset
- bnx2x_recovery_failed
- bnx2x_parity_recover
- bnx2x_udp_port_update
- __bnx2x_add_udp_port
- __bnx2x_del_udp_port
- bnx2x_udp_tunnel_add
- bnx2x_udp_tunnel_del
- bnx2x_sp_rtnl_task
- bnx2x_period_task
- bnx2x_get_pretend_reg
- bnx2x_prev_unload_close_umac
- bnx2x_prev_unload_close_mac
- bnx2x_prev_is_after_undi
- bnx2x_prev_unload_undi_inc
- bnx2x_prev_mcp_done
- bnx2x_prev_path_get_entry
- bnx2x_prev_path_mark_eeh
- bnx2x_prev_is_path_marked
- bnx2x_port_after_undi
- bnx2x_prev_mark_path
- bnx2x_do_flr
- bnx2x_prev_unload_uncommon
- bnx2x_prev_unload_common
- bnx2x_prev_unload
- bnx2x_get_common_hwinfo
- bnx2x_get_igu_cam_info
- bnx2x_link_settings_supported
- bnx2x_link_settings_requested
- bnx2x_set_mac_buf
- bnx2x_get_port_hwinfo
- bnx2x_get_iscsi_info
- bnx2x_get_ext_wwn_info
- bnx2x_shared_fcoe_funcs
- bnx2x_get_fcoe_info
- bnx2x_get_cnic_info
- bnx2x_get_cnic_mac_hwinfo
- bnx2x_get_mac_hwinfo
- bnx2x_get_dropless_info
- validate_set_si_mode
- bnx2x_get_hwinfo
- bnx2x_read_fwinfo
- bnx2x_set_modes_bitmap
- bnx2x_init_bp
- bnx2x_open
- bnx2x_close
- bnx2x_init_mcast_macs_list
- bnx2x_set_uc_list
- bnx2x_set_mc_list_e1x
- bnx2x_set_mc_list
- bnx2x_set_rx_mode
- bnx2x_set_rx_mode_inner
- bnx2x_mdio_read
- bnx2x_mdio_write
- bnx2x_ioctl
- bnx2x_validate_addr
- bnx2x_get_phys_port_id
- bnx2x_features_check
- __bnx2x_vlan_configure_vid
- bnx2x_vlan_configure_vid_list
- bnx2x_vlan_configure
- bnx2x_vlan_reconfigure_vid
- bnx2x_vlan_rx_add_vid
- bnx2x_vlan_rx_kill_vid
- bnx2x_set_coherency_mask
- bnx2x_disable_pcie_error_reporting
- bnx2x_init_dev
- bnx2x_check_firmware
- be32_to_cpu_n
- bnx2x_prep_ops
- bnx2x_prep_iro
- be16_to_cpu_n
- bnx2x_init_firmware
- bnx2x_release_firmware
- bnx2x__init_func_obj
- bnx2x_set_qm_cid_count
- bnx2x_get_num_non_def_sbs
- set_max_cos_est
- set_is_vf
- bnx2x_send_update_drift_ramrod
- bnx2x_ptp_adjfreq
- bnx2x_ptp_adjtime
- bnx2x_ptp_gettime
- bnx2x_ptp_settime
- bnx2x_ptp_enable
- bnx2x_register_phc
- bnx2x_init_one
- __bnx2x_remove
- bnx2x_remove_one
- bnx2x_eeh_nic_unload
- bnx2x_io_error_detected
- bnx2x_io_slot_reset
- bnx2x_io_resume
- bnx2x_shutdown
- bnx2x_init
- bnx2x_cleanup
- bnx2x_notify_link_changed
- bnx2x_set_iscsi_eth_mac_addr
- bnx2x_cnic_sp_post
- bnx2x_cnic_sp_queue
- bnx2x_cnic_ctl_send
- bnx2x_cnic_ctl_send_bh
- bnx2x_cnic_notify
- bnx2x_cnic_cfc_comp
- bnx2x_set_iscsi_eth_rx_mode
- bnx2x_drv_ctl
- bnx2x_get_fc_npiv
- bnx2x_setup_cnic_irq_info
- bnx2x_setup_cnic_info
- bnx2x_register_cnic
- bnx2x_unregister_cnic
- bnx2x_cnic_probe
- bnx2x_rx_ustorm_prods_offset
- bnx2x_pretend_func
- bnx2x_ptp_task
- bnx2x_set_rx_ts
- bnx2x_cyclecounter_read
- bnx2x_init_cyclecounter
- bnx2x_send_reset_timesync_ramrod
- bnx2x_enable_ptp_packets
- bnx2x_configure_ptp_filters
- bnx2x_hwtstamp_ioctl
- bnx2x_configure_ptp
- bnx2x_init_ptp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kernel.h>
25 #include <linux/device.h>
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/ioport.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/aer.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/bitops.h>
39 #include <linux/irq.h>
40 #include <linux/delay.h>
41 #include <asm/byteorder.h>
42 #include <linux/time.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/crash_dump.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/tcp.h>
50 #include <net/vxlan.h>
51 #include <net/checksum.h>
52 #include <net/ip6_checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/crc32c.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/io.h>
59 #include <linux/semaphore.h>
60 #include <linux/stringify.h>
61 #include <linux/vmalloc.h>
62 #include "bnx2x.h"
63 #include "bnx2x_init.h"
64 #include "bnx2x_init_ops.h"
65 #include "bnx2x_cmn.h"
66 #include "bnx2x_vfpf.h"
67 #include "bnx2x_dcb.h"
68 #include "bnx2x_sp.h"
69 #include <linux/firmware.h>
70 #include "bnx2x_fw_file_hdr.h"
71
72 #define FW_FILE_VERSION \
73 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
74 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
75 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
76 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
77 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
78 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
79 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
80
81
82 #define TX_TIMEOUT (5*HZ)
83
84 static char version[] =
85 "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88 MODULE_AUTHOR("Eliezer Tamir");
89 MODULE_DESCRIPTION("QLogic "
90 "BCM57710/57711/57711E/"
91 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92 "57840/57840_MF Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_MODULE_VERSION);
95 MODULE_FIRMWARE(FW_FILE_NAME_E1);
96 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97 MODULE_FIRMWARE(FW_FILE_NAME_E2);
98
99 int bnx2x_num_queues;
100 module_param_named(num_queues, bnx2x_num_queues, int, 0444);
101 MODULE_PARM_DESC(num_queues,
102 " Set number of queues (default is as a number of CPUs)");
103
104 static int disable_tpa;
105 module_param(disable_tpa, int, 0444);
106 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
107
108 static int int_mode;
109 module_param(int_mode, int, 0444);
110 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
111 "(1 INT#x; 2 MSI)");
112
113 static int dropless_fc;
114 module_param(dropless_fc, int, 0444);
115 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
117 static int mrrs = -1;
118 module_param(mrrs, int, 0444);
119 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
121 static int debug;
122 module_param(debug, int, 0444);
123 MODULE_PARM_DESC(debug, " Default debug msglevel");
124
125 static struct workqueue_struct *bnx2x_wq;
126 struct workqueue_struct *bnx2x_iov_wq;
127
128 struct bnx2x_mac_vals {
129 u32 xmac_addr;
130 u32 xmac_val;
131 u32 emac_addr;
132 u32 emac_val;
133 u32 umac_addr[2];
134 u32 umac_val[2];
135 u32 bmac_addr;
136 u32 bmac_val[2];
137 };
138
139 enum bnx2x_board_type {
140 BCM57710 = 0,
141 BCM57711,
142 BCM57711E,
143 BCM57712,
144 BCM57712_MF,
145 BCM57712_VF,
146 BCM57800,
147 BCM57800_MF,
148 BCM57800_VF,
149 BCM57810,
150 BCM57810_MF,
151 BCM57810_VF,
152 BCM57840_4_10,
153 BCM57840_2_20,
154 BCM57840_MF,
155 BCM57840_VF,
156 BCM57811,
157 BCM57811_MF,
158 BCM57840_O,
159 BCM57840_MFO,
160 BCM57811_VF
161 };
162
163
164 static struct {
165 char *name;
166 } board_info[] = {
167 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
168 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
169 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
170 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
171 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
172 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
173 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
174 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
175 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
176 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
177 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
178 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
179 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
180 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
181 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
182 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
183 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
184 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
185 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
186 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
187 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
188 };
189
190 #ifndef PCI_DEVICE_ID_NX2_57710
191 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
192 #endif
193 #ifndef PCI_DEVICE_ID_NX2_57711
194 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
195 #endif
196 #ifndef PCI_DEVICE_ID_NX2_57711E
197 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
198 #endif
199 #ifndef PCI_DEVICE_ID_NX2_57712
200 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
201 #endif
202 #ifndef PCI_DEVICE_ID_NX2_57712_MF
203 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
204 #endif
205 #ifndef PCI_DEVICE_ID_NX2_57712_VF
206 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
207 #endif
208 #ifndef PCI_DEVICE_ID_NX2_57800
209 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
210 #endif
211 #ifndef PCI_DEVICE_ID_NX2_57800_MF
212 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
213 #endif
214 #ifndef PCI_DEVICE_ID_NX2_57800_VF
215 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
216 #endif
217 #ifndef PCI_DEVICE_ID_NX2_57810
218 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
219 #endif
220 #ifndef PCI_DEVICE_ID_NX2_57810_MF
221 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
222 #endif
223 #ifndef PCI_DEVICE_ID_NX2_57840_O
224 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
225 #endif
226 #ifndef PCI_DEVICE_ID_NX2_57810_VF
227 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
228 #endif
229 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
230 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
231 #endif
232 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
233 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
234 #endif
235 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
236 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
237 #endif
238 #ifndef PCI_DEVICE_ID_NX2_57840_MF
239 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
240 #endif
241 #ifndef PCI_DEVICE_ID_NX2_57840_VF
242 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
243 #endif
244 #ifndef PCI_DEVICE_ID_NX2_57811
245 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
246 #endif
247 #ifndef PCI_DEVICE_ID_NX2_57811_MF
248 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
249 #endif
250 #ifndef PCI_DEVICE_ID_NX2_57811_VF
251 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
252 #endif
253
254 static const struct pci_device_id bnx2x_pci_tbl[] = {
255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
268 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
273 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
275 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
276 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
279 { 0 }
280 };
281
282 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
283
284
285 #define BNX2X_PREV_WAIT_NEEDED 1
286 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
287 static LIST_HEAD(bnx2x_prev_list);
288
289
290 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
291 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
292 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
293
294
295
296
297
298 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
299
300 static void __storm_memset_dma_mapping(struct bnx2x *bp,
301 u32 addr, dma_addr_t mapping)
302 {
303 REG_WR(bp, addr, U64_LO(mapping));
304 REG_WR(bp, addr + 4, U64_HI(mapping));
305 }
306
307 static void storm_memset_spq_addr(struct bnx2x *bp,
308 dma_addr_t mapping, u16 abs_fid)
309 {
310 u32 addr = XSEM_REG_FAST_MEMORY +
311 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
312
313 __storm_memset_dma_mapping(bp, addr, mapping);
314 }
315
316 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
317 u16 pf_id)
318 {
319 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
320 pf_id);
321 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 }
328
329 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
330 u8 enable)
331 {
332 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
333 enable);
334 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 }
341
342 static void storm_memset_eq_data(struct bnx2x *bp,
343 struct event_ring_data *eq_data,
344 u16 pfid)
345 {
346 size_t size = sizeof(struct event_ring_data);
347
348 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
349
350 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
351 }
352
353 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
354 u16 pfid)
355 {
356 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
357 REG_WR16(bp, addr, eq_prod);
358 }
359
360
361
362
363 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
364 {
365 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
366 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
368 PCICFG_VENDOR_ID_OFFSET);
369 }
370
371 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
372 {
373 u32 val;
374
375 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
376 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
377 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
378 PCICFG_VENDOR_ID_OFFSET);
379
380 return val;
381 }
382
383 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
384 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
385 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
386 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
387 #define DMAE_DP_DST_NONE "dst_addr [none]"
388
389 static void bnx2x_dp_dmae(struct bnx2x *bp,
390 struct dmae_command *dmae, int msglvl)
391 {
392 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
393 int i;
394
395 switch (dmae->opcode & DMAE_COMMAND_DST) {
396 case DMAE_CMD_DST_PCI:
397 if (src_type == DMAE_CMD_SRC_PCI)
398 DP(msglvl, "DMAE: opcode 0x%08x\n"
399 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
400 "comp_addr [%x:%08x], comp_val 0x%08x\n",
401 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
402 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
403 dmae->comp_addr_hi, dmae->comp_addr_lo,
404 dmae->comp_val);
405 else
406 DP(msglvl, "DMAE: opcode 0x%08x\n"
407 "src [%08x], len [%d*4], dst [%x:%08x]\n"
408 "comp_addr [%x:%08x], comp_val 0x%08x\n",
409 dmae->opcode, dmae->src_addr_lo >> 2,
410 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
411 dmae->comp_addr_hi, dmae->comp_addr_lo,
412 dmae->comp_val);
413 break;
414 case DMAE_CMD_DST_GRC:
415 if (src_type == DMAE_CMD_SRC_PCI)
416 DP(msglvl, "DMAE: opcode 0x%08x\n"
417 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
418 "comp_addr [%x:%08x], comp_val 0x%08x\n",
419 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
420 dmae->len, dmae->dst_addr_lo >> 2,
421 dmae->comp_addr_hi, dmae->comp_addr_lo,
422 dmae->comp_val);
423 else
424 DP(msglvl, "DMAE: opcode 0x%08x\n"
425 "src [%08x], len [%d*4], dst [%08x]\n"
426 "comp_addr [%x:%08x], comp_val 0x%08x\n",
427 dmae->opcode, dmae->src_addr_lo >> 2,
428 dmae->len, dmae->dst_addr_lo >> 2,
429 dmae->comp_addr_hi, dmae->comp_addr_lo,
430 dmae->comp_val);
431 break;
432 default:
433 if (src_type == DMAE_CMD_SRC_PCI)
434 DP(msglvl, "DMAE: opcode 0x%08x\n"
435 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
436 "comp_addr [%x:%08x] comp_val 0x%08x\n",
437 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
438 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
439 dmae->comp_val);
440 else
441 DP(msglvl, "DMAE: opcode 0x%08x\n"
442 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
443 "comp_addr [%x:%08x] comp_val 0x%08x\n",
444 dmae->opcode, dmae->src_addr_lo >> 2,
445 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 break;
448 }
449
450 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
451 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
452 i, *(((u32 *)dmae) + i));
453 }
454
455
456 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
457 {
458 u32 cmd_offset;
459 int i;
460
461 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
462 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
463 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
464 }
465 REG_WR(bp, dmae_reg_go_c[idx], 1);
466 }
467
468 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
469 {
470 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
471 DMAE_CMD_C_ENABLE);
472 }
473
474 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
475 {
476 return opcode & ~DMAE_CMD_SRC_RESET;
477 }
478
479 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
480 bool with_comp, u8 comp_type)
481 {
482 u32 opcode = 0;
483
484 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
485 (dst_type << DMAE_COMMAND_DST_SHIFT));
486
487 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
488
489 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
490 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
491 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
492 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
493
494 #ifdef __BIG_ENDIAN
495 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
496 #else
497 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
498 #endif
499 if (with_comp)
500 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
501 return opcode;
502 }
503
504 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
505 struct dmae_command *dmae,
506 u8 src_type, u8 dst_type)
507 {
508 memset(dmae, 0, sizeof(struct dmae_command));
509
510
511 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
512 true, DMAE_COMP_PCI);
513
514
515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
517 dmae->comp_val = DMAE_COMP_VAL;
518 }
519
520
521 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
522 u32 *comp)
523 {
524 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
525 int rc = 0;
526
527 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
528
529
530
531
532
533
534 spin_lock_bh(&bp->dmae_lock);
535
536
537 *comp = 0;
538
539
540 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
541
542
543 udelay(5);
544 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
545
546 if (!cnt ||
547 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
548 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
549 BNX2X_ERR("DMAE timeout!\n");
550 rc = DMAE_TIMEOUT;
551 goto unlock;
552 }
553 cnt--;
554 udelay(50);
555 }
556 if (*comp & DMAE_PCI_ERR_FLAG) {
557 BNX2X_ERR("DMAE PCI error!\n");
558 rc = DMAE_PCI_ERROR;
559 }
560
561 unlock:
562
563 spin_unlock_bh(&bp->dmae_lock);
564
565 return rc;
566 }
567
568 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
569 u32 len32)
570 {
571 int rc;
572 struct dmae_command dmae;
573
574 if (!bp->dmae_ready) {
575 u32 *data = bnx2x_sp(bp, wb_data[0]);
576
577 if (CHIP_IS_E1(bp))
578 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
579 else
580 bnx2x_init_str_wr(bp, dst_addr, data, len32);
581 return;
582 }
583
584
585 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
586
587
588 dmae.src_addr_lo = U64_LO(dma_addr);
589 dmae.src_addr_hi = U64_HI(dma_addr);
590 dmae.dst_addr_lo = dst_addr >> 2;
591 dmae.dst_addr_hi = 0;
592 dmae.len = len32;
593
594
595 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
596 if (rc) {
597 BNX2X_ERR("DMAE returned failure %d\n", rc);
598 #ifdef BNX2X_STOP_ON_ERROR
599 bnx2x_panic();
600 #endif
601 }
602 }
603
604 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
605 {
606 int rc;
607 struct dmae_command dmae;
608
609 if (!bp->dmae_ready) {
610 u32 *data = bnx2x_sp(bp, wb_data[0]);
611 int i;
612
613 if (CHIP_IS_E1(bp))
614 for (i = 0; i < len32; i++)
615 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
616 else
617 for (i = 0; i < len32; i++)
618 data[i] = REG_RD(bp, src_addr + i*4);
619
620 return;
621 }
622
623
624 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
625
626
627 dmae.src_addr_lo = src_addr >> 2;
628 dmae.src_addr_hi = 0;
629 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
630 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
631 dmae.len = len32;
632
633
634 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
635 if (rc) {
636 BNX2X_ERR("DMAE returned failure %d\n", rc);
637 #ifdef BNX2X_STOP_ON_ERROR
638 bnx2x_panic();
639 #endif
640 }
641 }
642
643 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
644 u32 addr, u32 len)
645 {
646 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
647 int offset = 0;
648
649 while (len > dmae_wr_max) {
650 bnx2x_write_dmae(bp, phys_addr + offset,
651 addr + offset, dmae_wr_max);
652 offset += dmae_wr_max * 4;
653 len -= dmae_wr_max;
654 }
655
656 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
657 }
658
659 enum storms {
660 XSTORM,
661 TSTORM,
662 CSTORM,
663 USTORM,
664 MAX_STORMS
665 };
666
667 #define STORMS_NUM 4
668 #define REGS_IN_ENTRY 4
669
670 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
671 enum storms storm,
672 int entry)
673 {
674 switch (storm) {
675 case XSTORM:
676 return XSTORM_ASSERT_LIST_OFFSET(entry);
677 case TSTORM:
678 return TSTORM_ASSERT_LIST_OFFSET(entry);
679 case CSTORM:
680 return CSTORM_ASSERT_LIST_OFFSET(entry);
681 case USTORM:
682 return USTORM_ASSERT_LIST_OFFSET(entry);
683 case MAX_STORMS:
684 default:
685 BNX2X_ERR("unknown storm\n");
686 }
687 return -EINVAL;
688 }
689
690 static int bnx2x_mc_assert(struct bnx2x *bp)
691 {
692 char last_idx;
693 int i, j, rc = 0;
694 enum storms storm;
695 u32 regs[REGS_IN_ENTRY];
696 u32 bar_storm_intmem[STORMS_NUM] = {
697 BAR_XSTRORM_INTMEM,
698 BAR_TSTRORM_INTMEM,
699 BAR_CSTRORM_INTMEM,
700 BAR_USTRORM_INTMEM
701 };
702 u32 storm_assert_list_index[STORMS_NUM] = {
703 XSTORM_ASSERT_LIST_INDEX_OFFSET,
704 TSTORM_ASSERT_LIST_INDEX_OFFSET,
705 CSTORM_ASSERT_LIST_INDEX_OFFSET,
706 USTORM_ASSERT_LIST_INDEX_OFFSET
707 };
708 char *storms_string[STORMS_NUM] = {
709 "XSTORM",
710 "TSTORM",
711 "CSTORM",
712 "USTORM"
713 };
714
715 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
716 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
717 storm_assert_list_index[storm]);
718 if (last_idx)
719 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
720 storms_string[storm], last_idx);
721
722
723 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
724
725 for (j = 0; j < REGS_IN_ENTRY; j++)
726 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
727 bnx2x_get_assert_list_entry(bp,
728 storm,
729 i) +
730 sizeof(u32) * j);
731
732
733 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
734 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
735 storms_string[storm], i, regs[3],
736 regs[2], regs[1], regs[0]);
737 rc++;
738 } else {
739 break;
740 }
741 }
742 }
743
744 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
745 CHIP_IS_E1(bp) ? "everest1" :
746 CHIP_IS_E1H(bp) ? "everest1h" :
747 CHIP_IS_E2(bp) ? "everest2" : "everest3",
748 BCM_5710_FW_MAJOR_VERSION,
749 BCM_5710_FW_MINOR_VERSION,
750 BCM_5710_FW_REVISION_VERSION);
751
752 return rc;
753 }
754
755 #define MCPR_TRACE_BUFFER_SIZE (0x800)
756 #define SCRATCH_BUFFER_SIZE(bp) \
757 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
758
759 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
760 {
761 u32 addr, val;
762 u32 mark, offset;
763 __be32 data[9];
764 int word;
765 u32 trace_shmem_base;
766 if (BP_NOMCP(bp)) {
767 BNX2X_ERR("NO MCP - can not dump\n");
768 return;
769 }
770 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
771 (bp->common.bc_ver & 0xff0000) >> 16,
772 (bp->common.bc_ver & 0xff00) >> 8,
773 (bp->common.bc_ver & 0xff));
774
775 if (pci_channel_offline(bp->pdev)) {
776 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
777 return;
778 }
779
780 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
781 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
782 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
783
784 if (BP_PATH(bp) == 0)
785 trace_shmem_base = bp->common.shmem_base;
786 else
787 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
788
789
790 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
791 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
792 SCRATCH_BUFFER_SIZE(bp)) {
793 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
794 trace_shmem_base);
795 return;
796 }
797
798 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
799
800
801 mark = REG_RD(bp, addr);
802 if (mark != MFW_TRACE_SIGNATURE) {
803 BNX2X_ERR("Trace buffer signature is missing.");
804 return ;
805 }
806
807
808 addr += 4;
809 mark = REG_RD(bp, addr);
810 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
811 if (mark >= trace_shmem_base || mark < addr + 4) {
812 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
813 return;
814 }
815 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
816
817 printk("%s", lvl);
818
819
820 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
821 for (word = 0; word < 8; word++)
822 data[word] = htonl(REG_RD(bp, offset + 4*word));
823 data[8] = 0x0;
824 pr_cont("%s", (char *)data);
825 }
826
827
828 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
829 for (word = 0; word < 8; word++)
830 data[word] = htonl(REG_RD(bp, offset + 4*word));
831 data[8] = 0x0;
832 pr_cont("%s", (char *)data);
833 }
834 printk("%s" "end of fw dump\n", lvl);
835 }
836
837 static void bnx2x_fw_dump(struct bnx2x *bp)
838 {
839 bnx2x_fw_dump_lvl(bp, KERN_ERR);
840 }
841
842 static void bnx2x_hc_int_disable(struct bnx2x *bp)
843 {
844 int port = BP_PORT(bp);
845 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
846 u32 val = REG_RD(bp, addr);
847
848
849
850
851
852 if (CHIP_IS_E1(bp)) {
853
854
855
856
857 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
858
859 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
860 HC_CONFIG_0_REG_INT_LINE_EN_0 |
861 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
862 } else
863 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
864 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
865 HC_CONFIG_0_REG_INT_LINE_EN_0 |
866 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
867
868 DP(NETIF_MSG_IFDOWN,
869 "write %x to HC %d (addr 0x%x)\n",
870 val, port, addr);
871
872 REG_WR(bp, addr, val);
873 if (REG_RD(bp, addr) != val)
874 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
875 }
876
877 static void bnx2x_igu_int_disable(struct bnx2x *bp)
878 {
879 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
880
881 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
882 IGU_PF_CONF_INT_LINE_EN |
883 IGU_PF_CONF_ATTN_BIT_EN);
884
885 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
886
887 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
888 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
889 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
890 }
891
892 static void bnx2x_int_disable(struct bnx2x *bp)
893 {
894 if (bp->common.int_block == INT_BLOCK_HC)
895 bnx2x_hc_int_disable(bp);
896 else
897 bnx2x_igu_int_disable(bp);
898 }
899
900 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
901 {
902 int i;
903 u16 j;
904 struct hc_sp_status_block_data sp_sb_data;
905 int func = BP_FUNC(bp);
906 #ifdef BNX2X_STOP_ON_ERROR
907 u16 start = 0, end = 0;
908 u8 cos;
909 #endif
910 if (IS_PF(bp) && disable_int)
911 bnx2x_int_disable(bp);
912
913 bp->stats_state = STATS_STATE_DISABLED;
914 bp->eth_stats.unrecoverable_error++;
915 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
916
917 BNX2X_ERR("begin crash dump -----------------\n");
918
919
920
921 if (IS_PF(bp)) {
922 struct host_sp_status_block *def_sb = bp->def_status_blk;
923 int data_size, cstorm_offset;
924
925 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
926 bp->def_idx, bp->def_att_idx, bp->attn_state,
927 bp->spq_prod_idx, bp->stats_counter);
928 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
929 def_sb->atten_status_block.attn_bits,
930 def_sb->atten_status_block.attn_bits_ack,
931 def_sb->atten_status_block.status_block_id,
932 def_sb->atten_status_block.attn_bits_index);
933 BNX2X_ERR(" def (");
934 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
935 pr_cont("0x%x%s",
936 def_sb->sp_sb.index_values[i],
937 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
938
939 data_size = sizeof(struct hc_sp_status_block_data) /
940 sizeof(u32);
941 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
942 for (i = 0; i < data_size; i++)
943 *((u32 *)&sp_sb_data + i) =
944 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
945 i * sizeof(u32));
946
947 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
948 sp_sb_data.igu_sb_id,
949 sp_sb_data.igu_seg_id,
950 sp_sb_data.p_func.pf_id,
951 sp_sb_data.p_func.vnic_id,
952 sp_sb_data.p_func.vf_id,
953 sp_sb_data.p_func.vf_valid,
954 sp_sb_data.state);
955 }
956
957 for_each_eth_queue(bp, i) {
958 struct bnx2x_fastpath *fp = &bp->fp[i];
959 int loop;
960 struct hc_status_block_data_e2 sb_data_e2;
961 struct hc_status_block_data_e1x sb_data_e1x;
962 struct hc_status_block_sm *hc_sm_p =
963 CHIP_IS_E1x(bp) ?
964 sb_data_e1x.common.state_machine :
965 sb_data_e2.common.state_machine;
966 struct hc_index_data *hc_index_p =
967 CHIP_IS_E1x(bp) ?
968 sb_data_e1x.index_data :
969 sb_data_e2.index_data;
970 u8 data_size, cos;
971 u32 *sb_data_p;
972 struct bnx2x_fp_txdata txdata;
973
974 if (!bp->fp)
975 break;
976
977 if (!fp->rx_cons_sb)
978 continue;
979
980
981 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
982 i, fp->rx_bd_prod, fp->rx_bd_cons,
983 fp->rx_comp_prod,
984 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
985 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
986 fp->rx_sge_prod, fp->last_max_sge,
987 le16_to_cpu(fp->fp_hc_idx));
988
989
990 for_each_cos_in_tx_queue(fp, cos)
991 {
992 if (!fp->txdata_ptr[cos])
993 break;
994
995 txdata = *fp->txdata_ptr[cos];
996
997 if (!txdata.tx_cons_sb)
998 continue;
999
1000 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1001 i, txdata.tx_pkt_prod,
1002 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1003 txdata.tx_bd_cons,
1004 le16_to_cpu(*txdata.tx_cons_sb));
1005 }
1006
1007 loop = CHIP_IS_E1x(bp) ?
1008 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1009
1010
1011
1012 if (IS_FCOE_FP(fp))
1013 continue;
1014
1015 BNX2X_ERR(" run indexes (");
1016 for (j = 0; j < HC_SB_MAX_SM; j++)
1017 pr_cont("0x%x%s",
1018 fp->sb_running_index[j],
1019 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1020
1021 BNX2X_ERR(" indexes (");
1022 for (j = 0; j < loop; j++)
1023 pr_cont("0x%x%s",
1024 fp->sb_index_values[j],
1025 (j == loop - 1) ? ")" : " ");
1026
1027
1028 if (IS_VF(bp))
1029 continue;
1030
1031
1032 data_size = CHIP_IS_E1x(bp) ?
1033 sizeof(struct hc_status_block_data_e1x) :
1034 sizeof(struct hc_status_block_data_e2);
1035 data_size /= sizeof(u32);
1036 sb_data_p = CHIP_IS_E1x(bp) ?
1037 (u32 *)&sb_data_e1x :
1038 (u32 *)&sb_data_e2;
1039
1040 for (j = 0; j < data_size; j++)
1041 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1042 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1043 j * sizeof(u32));
1044
1045 if (!CHIP_IS_E1x(bp)) {
1046 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1047 sb_data_e2.common.p_func.pf_id,
1048 sb_data_e2.common.p_func.vf_id,
1049 sb_data_e2.common.p_func.vf_valid,
1050 sb_data_e2.common.p_func.vnic_id,
1051 sb_data_e2.common.same_igu_sb_1b,
1052 sb_data_e2.common.state);
1053 } else {
1054 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1055 sb_data_e1x.common.p_func.pf_id,
1056 sb_data_e1x.common.p_func.vf_id,
1057 sb_data_e1x.common.p_func.vf_valid,
1058 sb_data_e1x.common.p_func.vnic_id,
1059 sb_data_e1x.common.same_igu_sb_1b,
1060 sb_data_e1x.common.state);
1061 }
1062
1063
1064 for (j = 0; j < HC_SB_MAX_SM; j++) {
1065 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1066 j, hc_sm_p[j].__flags,
1067 hc_sm_p[j].igu_sb_id,
1068 hc_sm_p[j].igu_seg_id,
1069 hc_sm_p[j].time_to_expire,
1070 hc_sm_p[j].timer_value);
1071 }
1072
1073
1074 for (j = 0; j < loop; j++) {
1075 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1076 hc_index_p[j].flags,
1077 hc_index_p[j].timeout);
1078 }
1079 }
1080
1081 #ifdef BNX2X_STOP_ON_ERROR
1082 if (IS_PF(bp)) {
1083
1084 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1085 for (i = 0; i < NUM_EQ_DESC; i++) {
1086 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1087
1088 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1089 i, bp->eq_ring[i].message.opcode,
1090 bp->eq_ring[i].message.error);
1091 BNX2X_ERR("data: %x %x %x\n",
1092 data[0], data[1], data[2]);
1093 }
1094 }
1095
1096
1097
1098 for_each_valid_rx_queue(bp, i) {
1099 struct bnx2x_fastpath *fp = &bp->fp[i];
1100
1101 if (!bp->fp)
1102 break;
1103
1104 if (!fp->rx_cons_sb)
1105 continue;
1106
1107 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1108 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1109 for (j = start; j != end; j = RX_BD(j + 1)) {
1110 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1111 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1112
1113 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1114 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1115 }
1116
1117 start = RX_SGE(fp->rx_sge_prod);
1118 end = RX_SGE(fp->last_max_sge);
1119 for (j = start; j != end; j = RX_SGE(j + 1)) {
1120 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1121 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1122
1123 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1124 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1125 }
1126
1127 start = RCQ_BD(fp->rx_comp_cons - 10);
1128 end = RCQ_BD(fp->rx_comp_cons + 503);
1129 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1130 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1131
1132 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1133 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1134 }
1135 }
1136
1137
1138 for_each_valid_tx_queue(bp, i) {
1139 struct bnx2x_fastpath *fp = &bp->fp[i];
1140
1141 if (!bp->fp)
1142 break;
1143
1144 for_each_cos_in_tx_queue(fp, cos) {
1145 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1146
1147 if (!fp->txdata_ptr[cos])
1148 break;
1149
1150 if (!txdata->tx_cons_sb)
1151 continue;
1152
1153 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1154 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1155 for (j = start; j != end; j = TX_BD(j + 1)) {
1156 struct sw_tx_bd *sw_bd =
1157 &txdata->tx_buf_ring[j];
1158
1159 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1160 i, cos, j, sw_bd->skb,
1161 sw_bd->first_bd);
1162 }
1163
1164 start = TX_BD(txdata->tx_bd_cons - 10);
1165 end = TX_BD(txdata->tx_bd_cons + 254);
1166 for (j = start; j != end; j = TX_BD(j + 1)) {
1167 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1168
1169 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1170 i, cos, j, tx_bd[0], tx_bd[1],
1171 tx_bd[2], tx_bd[3]);
1172 }
1173 }
1174 }
1175 #endif
1176 if (IS_PF(bp)) {
1177 bnx2x_fw_dump(bp);
1178 bnx2x_mc_assert(bp);
1179 }
1180 BNX2X_ERR("end crash dump -----------------\n");
1181 }
1182
1183
1184
1185
1186
1187
1188
1189 #define FLR_WAIT_USEC 10000
1190 #define FLR_WAIT_INTERVAL 50
1191 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1192
1193 struct pbf_pN_buf_regs {
1194 int pN;
1195 u32 init_crd;
1196 u32 crd;
1197 u32 crd_freed;
1198 };
1199
1200 struct pbf_pN_cmd_regs {
1201 int pN;
1202 u32 lines_occup;
1203 u32 lines_freed;
1204 };
1205
1206 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1207 struct pbf_pN_buf_regs *regs,
1208 u32 poll_count)
1209 {
1210 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1211 u32 cur_cnt = poll_count;
1212
1213 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1214 crd = crd_start = REG_RD(bp, regs->crd);
1215 init_crd = REG_RD(bp, regs->init_crd);
1216
1217 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1218 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1219 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1220
1221 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1222 (init_crd - crd_start))) {
1223 if (cur_cnt--) {
1224 udelay(FLR_WAIT_INTERVAL);
1225 crd = REG_RD(bp, regs->crd);
1226 crd_freed = REG_RD(bp, regs->crd_freed);
1227 } else {
1228 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1229 regs->pN);
1230 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1231 regs->pN, crd);
1232 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1233 regs->pN, crd_freed);
1234 break;
1235 }
1236 }
1237 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1238 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1239 }
1240
1241 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1242 struct pbf_pN_cmd_regs *regs,
1243 u32 poll_count)
1244 {
1245 u32 occup, to_free, freed, freed_start;
1246 u32 cur_cnt = poll_count;
1247
1248 occup = to_free = REG_RD(bp, regs->lines_occup);
1249 freed = freed_start = REG_RD(bp, regs->lines_freed);
1250
1251 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1252 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1253
1254 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1255 if (cur_cnt--) {
1256 udelay(FLR_WAIT_INTERVAL);
1257 occup = REG_RD(bp, regs->lines_occup);
1258 freed = REG_RD(bp, regs->lines_freed);
1259 } else {
1260 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1261 regs->pN);
1262 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1263 regs->pN, occup);
1264 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1265 regs->pN, freed);
1266 break;
1267 }
1268 }
1269 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1270 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1271 }
1272
1273 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1274 u32 expected, u32 poll_count)
1275 {
1276 u32 cur_cnt = poll_count;
1277 u32 val;
1278
1279 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1280 udelay(FLR_WAIT_INTERVAL);
1281
1282 return val;
1283 }
1284
1285 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1286 char *msg, u32 poll_cnt)
1287 {
1288 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1289 if (val != 0) {
1290 BNX2X_ERR("%s usage count=%d\n", msg, val);
1291 return 1;
1292 }
1293 return 0;
1294 }
1295
1296
1297 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1298 {
1299
1300 if (CHIP_REV_IS_EMUL(bp))
1301 return FLR_POLL_CNT * 2000;
1302
1303 if (CHIP_REV_IS_FPGA(bp))
1304 return FLR_POLL_CNT * 120;
1305
1306 return FLR_POLL_CNT;
1307 }
1308
1309 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1310 {
1311 struct pbf_pN_cmd_regs cmd_regs[] = {
1312 {0, (CHIP_IS_E3B0(bp)) ?
1313 PBF_REG_TQ_OCCUPANCY_Q0 :
1314 PBF_REG_P0_TQ_OCCUPANCY,
1315 (CHIP_IS_E3B0(bp)) ?
1316 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1317 PBF_REG_P0_TQ_LINES_FREED_CNT},
1318 {1, (CHIP_IS_E3B0(bp)) ?
1319 PBF_REG_TQ_OCCUPANCY_Q1 :
1320 PBF_REG_P1_TQ_OCCUPANCY,
1321 (CHIP_IS_E3B0(bp)) ?
1322 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1323 PBF_REG_P1_TQ_LINES_FREED_CNT},
1324 {4, (CHIP_IS_E3B0(bp)) ?
1325 PBF_REG_TQ_OCCUPANCY_LB_Q :
1326 PBF_REG_P4_TQ_OCCUPANCY,
1327 (CHIP_IS_E3B0(bp)) ?
1328 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1329 PBF_REG_P4_TQ_LINES_FREED_CNT}
1330 };
1331
1332 struct pbf_pN_buf_regs buf_regs[] = {
1333 {0, (CHIP_IS_E3B0(bp)) ?
1334 PBF_REG_INIT_CRD_Q0 :
1335 PBF_REG_P0_INIT_CRD ,
1336 (CHIP_IS_E3B0(bp)) ?
1337 PBF_REG_CREDIT_Q0 :
1338 PBF_REG_P0_CREDIT,
1339 (CHIP_IS_E3B0(bp)) ?
1340 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1341 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1342 {1, (CHIP_IS_E3B0(bp)) ?
1343 PBF_REG_INIT_CRD_Q1 :
1344 PBF_REG_P1_INIT_CRD,
1345 (CHIP_IS_E3B0(bp)) ?
1346 PBF_REG_CREDIT_Q1 :
1347 PBF_REG_P1_CREDIT,
1348 (CHIP_IS_E3B0(bp)) ?
1349 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1350 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1351 {4, (CHIP_IS_E3B0(bp)) ?
1352 PBF_REG_INIT_CRD_LB_Q :
1353 PBF_REG_P4_INIT_CRD,
1354 (CHIP_IS_E3B0(bp)) ?
1355 PBF_REG_CREDIT_LB_Q :
1356 PBF_REG_P4_CREDIT,
1357 (CHIP_IS_E3B0(bp)) ?
1358 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1359 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1360 };
1361
1362 int i;
1363
1364
1365 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1366 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1367
1368
1369 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1370 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1371 }
1372
1373 #define OP_GEN_PARAM(param) \
1374 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1375
1376 #define OP_GEN_TYPE(type) \
1377 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1378
1379 #define OP_GEN_AGG_VECT(index) \
1380 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1381
1382 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1383 {
1384 u32 op_gen_command = 0;
1385 u32 comp_addr = BAR_CSTRORM_INTMEM +
1386 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1387 int ret = 0;
1388
1389 if (REG_RD(bp, comp_addr)) {
1390 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1391 return 1;
1392 }
1393
1394 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1395 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1396 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1397 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1398
1399 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1400 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1401
1402 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1403 BNX2X_ERR("FW final cleanup did not succeed\n");
1404 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1405 (REG_RD(bp, comp_addr)));
1406 bnx2x_panic();
1407 return 1;
1408 }
1409
1410 REG_WR(bp, comp_addr, 0);
1411
1412 return ret;
1413 }
1414
1415 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1416 {
1417 u16 status;
1418
1419 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1420 return status & PCI_EXP_DEVSTA_TRPND;
1421 }
1422
1423
1424
1425 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1426 {
1427
1428 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1429 CFC_REG_NUM_LCIDS_INSIDE_PF,
1430 "CFC PF usage counter timed out",
1431 poll_cnt))
1432 return 1;
1433
1434
1435 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1436 DORQ_REG_PF_USAGE_CNT,
1437 "DQ PF usage counter timed out",
1438 poll_cnt))
1439 return 1;
1440
1441
1442 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1443 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1444 "QM PF usage counter timed out",
1445 poll_cnt))
1446 return 1;
1447
1448
1449 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1450 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1451 "Timers VNIC usage counter timed out",
1452 poll_cnt))
1453 return 1;
1454 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1455 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1456 "Timers NUM_SCANS usage counter timed out",
1457 poll_cnt))
1458 return 1;
1459
1460
1461 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1462 dmae_reg_go_c[INIT_DMAE_C(bp)],
1463 "DMAE command register timed out",
1464 poll_cnt))
1465 return 1;
1466
1467 return 0;
1468 }
1469
1470 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1471 {
1472 u32 val;
1473
1474 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1475 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1476
1477 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1478 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1479
1480 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1481 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1482
1483 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1484 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1485
1486 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1487 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1488
1489 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1490 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1491
1492 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1493 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1494
1495 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1496 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1497 val);
1498 }
1499
1500 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1501 {
1502 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1503
1504 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1505
1506
1507 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1508
1509
1510 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1511 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1512 return -EBUSY;
1513
1514
1515
1516
1517 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1518 return -EBUSY;
1519
1520
1521
1522
1523 bnx2x_tx_hw_flushed(bp, poll_cnt);
1524
1525
1526 msleep(100);
1527
1528
1529 if (bnx2x_is_pcie_pending(bp->pdev))
1530 BNX2X_ERR("PCIE Transactions still pending\n");
1531
1532
1533 bnx2x_hw_enable_status(bp);
1534
1535
1536
1537
1538
1539 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1540
1541 return 0;
1542 }
1543
1544 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1545 {
1546 int port = BP_PORT(bp);
1547 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1548 u32 val = REG_RD(bp, addr);
1549 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1550 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1551 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1552
1553 if (msix) {
1554 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1555 HC_CONFIG_0_REG_INT_LINE_EN_0);
1556 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1557 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1558 if (single_msix)
1559 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1560 } else if (msi) {
1561 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1562 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1563 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1564 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1565 } else {
1566 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1567 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1569 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1570
1571 if (!CHIP_IS_E1(bp)) {
1572 DP(NETIF_MSG_IFUP,
1573 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1574
1575 REG_WR(bp, addr, val);
1576
1577 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1578 }
1579 }
1580
1581 if (CHIP_IS_E1(bp))
1582 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1583
1584 DP(NETIF_MSG_IFUP,
1585 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1586 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1587
1588 REG_WR(bp, addr, val);
1589
1590
1591
1592 barrier();
1593
1594 if (!CHIP_IS_E1(bp)) {
1595
1596 if (IS_MF(bp)) {
1597 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1598 if (bp->port.pmf)
1599
1600 val |= 0x1100;
1601 } else
1602 val = 0xffff;
1603
1604 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1605 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1606 }
1607 }
1608
1609 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1610 {
1611 u32 val;
1612 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1613 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1614 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1615
1616 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1617
1618 if (msix) {
1619 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1620 IGU_PF_CONF_SINGLE_ISR_EN);
1621 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1622 IGU_PF_CONF_ATTN_BIT_EN);
1623
1624 if (single_msix)
1625 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1626 } else if (msi) {
1627 val &= ~IGU_PF_CONF_INT_LINE_EN;
1628 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1629 IGU_PF_CONF_ATTN_BIT_EN |
1630 IGU_PF_CONF_SINGLE_ISR_EN);
1631 } else {
1632 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1633 val |= (IGU_PF_CONF_INT_LINE_EN |
1634 IGU_PF_CONF_ATTN_BIT_EN |
1635 IGU_PF_CONF_SINGLE_ISR_EN);
1636 }
1637
1638
1639 if ((!msix) || single_msix) {
1640 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1641 bnx2x_ack_int(bp);
1642 }
1643
1644 val |= IGU_PF_CONF_FUNC_EN;
1645
1646 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1647 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1648
1649 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1650
1651 if (val & IGU_PF_CONF_INT_LINE_EN)
1652 pci_intx(bp->pdev, true);
1653
1654 barrier();
1655
1656
1657 if (IS_MF(bp)) {
1658 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1659 if (bp->port.pmf)
1660
1661 val |= 0x1100;
1662 } else
1663 val = 0xffff;
1664
1665 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1666 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1667 }
1668
1669 void bnx2x_int_enable(struct bnx2x *bp)
1670 {
1671 if (bp->common.int_block == INT_BLOCK_HC)
1672 bnx2x_hc_int_enable(bp);
1673 else
1674 bnx2x_igu_int_enable(bp);
1675 }
1676
1677 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1678 {
1679 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1680 int i, offset;
1681
1682 if (disable_hw)
1683
1684 bnx2x_int_disable(bp);
1685
1686
1687 if (msix) {
1688 synchronize_irq(bp->msix_table[0].vector);
1689 offset = 1;
1690 if (CNIC_SUPPORT(bp))
1691 offset++;
1692 for_each_eth_queue(bp, i)
1693 synchronize_irq(bp->msix_table[offset++].vector);
1694 } else
1695 synchronize_irq(bp->pdev->irq);
1696
1697
1698 cancel_delayed_work(&bp->sp_task);
1699 cancel_delayed_work(&bp->period_task);
1700 flush_workqueue(bnx2x_wq);
1701 }
1702
1703
1704
1705
1706
1707
1708
1709
1710 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1711 {
1712 u32 lock_status;
1713 u32 resource_bit = (1 << resource);
1714 int func = BP_FUNC(bp);
1715 u32 hw_lock_control_reg;
1716
1717 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1718 "Trying to take a lock on resource %d\n", resource);
1719
1720
1721 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1722 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1723 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1724 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1725 return false;
1726 }
1727
1728 if (func <= 5)
1729 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1730 else
1731 hw_lock_control_reg =
1732 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1733
1734
1735 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1736 lock_status = REG_RD(bp, hw_lock_control_reg);
1737 if (lock_status & resource_bit)
1738 return true;
1739
1740 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1741 "Failed to get a lock on resource %d\n", resource);
1742 return false;
1743 }
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1754 {
1755 if (BP_PATH(bp))
1756 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1757 else
1758 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1759 }
1760
1761
1762
1763
1764
1765
1766
1767
1768 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1769 {
1770 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1771 }
1772
1773 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1774
1775
1776 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1777 {
1778
1779
1780
1781
1782 atomic_set(&bp->interrupt_occurred, 1);
1783
1784
1785
1786
1787
1788 smp_wmb();
1789
1790
1791 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1792 }
1793
1794 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1795 {
1796 struct bnx2x *bp = fp->bp;
1797 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1798 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1799 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1800 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1801
1802 DP(BNX2X_MSG_SP,
1803 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1804 fp->index, cid, command, bp->state,
1805 rr_cqe->ramrod_cqe.ramrod_type);
1806
1807
1808
1809
1810 if (cid >= BNX2X_FIRST_VF_CID &&
1811 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1812 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1813
1814 switch (command) {
1815 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1816 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1817 drv_cmd = BNX2X_Q_CMD_UPDATE;
1818 break;
1819
1820 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1821 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1822 drv_cmd = BNX2X_Q_CMD_SETUP;
1823 break;
1824
1825 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1826 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1827 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1828 break;
1829
1830 case (RAMROD_CMD_ID_ETH_HALT):
1831 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1832 drv_cmd = BNX2X_Q_CMD_HALT;
1833 break;
1834
1835 case (RAMROD_CMD_ID_ETH_TERMINATE):
1836 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1837 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1838 break;
1839
1840 case (RAMROD_CMD_ID_ETH_EMPTY):
1841 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1842 drv_cmd = BNX2X_Q_CMD_EMPTY;
1843 break;
1844
1845 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1846 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1847 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1848 break;
1849
1850 default:
1851 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1852 command, fp->index);
1853 return;
1854 }
1855
1856 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1857 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1858
1859
1860
1861
1862
1863
1864
1865 #ifdef BNX2X_STOP_ON_ERROR
1866 bnx2x_panic();
1867 #else
1868 return;
1869 #endif
1870
1871 smp_mb__before_atomic();
1872 atomic_inc(&bp->cq_spq_left);
1873
1874 smp_mb__after_atomic();
1875
1876 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1877
1878 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1879 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 smp_mb__before_atomic();
1890 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1891 wmb();
1892 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1893 smp_mb__after_atomic();
1894
1895
1896 bnx2x_schedule_sp_task(bp);
1897 }
1898
1899 return;
1900 }
1901
1902 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1903 {
1904 struct bnx2x *bp = netdev_priv(dev_instance);
1905 u16 status = bnx2x_ack_int(bp);
1906 u16 mask;
1907 int i;
1908 u8 cos;
1909
1910
1911 if (unlikely(status == 0)) {
1912 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1913 return IRQ_NONE;
1914 }
1915 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1916
1917 #ifdef BNX2X_STOP_ON_ERROR
1918 if (unlikely(bp->panic))
1919 return IRQ_HANDLED;
1920 #endif
1921
1922 for_each_eth_queue(bp, i) {
1923 struct bnx2x_fastpath *fp = &bp->fp[i];
1924
1925 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1926 if (status & mask) {
1927
1928 for_each_cos_in_tx_queue(fp, cos)
1929 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1930 prefetch(&fp->sb_running_index[SM_RX_ID]);
1931 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1932 status &= ~mask;
1933 }
1934 }
1935
1936 if (CNIC_SUPPORT(bp)) {
1937 mask = 0x2;
1938 if (status & (mask | 0x1)) {
1939 struct cnic_ops *c_ops = NULL;
1940
1941 rcu_read_lock();
1942 c_ops = rcu_dereference(bp->cnic_ops);
1943 if (c_ops && (bp->cnic_eth_dev.drv_state &
1944 CNIC_DRV_STATE_HANDLES_IRQ))
1945 c_ops->cnic_handler(bp->cnic_data, NULL);
1946 rcu_read_unlock();
1947
1948 status &= ~mask;
1949 }
1950 }
1951
1952 if (unlikely(status & 0x1)) {
1953
1954
1955
1956
1957 bnx2x_schedule_sp_task(bp);
1958
1959 status &= ~0x1;
1960 if (!status)
1961 return IRQ_HANDLED;
1962 }
1963
1964 if (unlikely(status))
1965 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1966 status);
1967
1968 return IRQ_HANDLED;
1969 }
1970
1971
1972
1973
1974
1975
1976
1977 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1978 {
1979 u32 lock_status;
1980 u32 resource_bit = (1 << resource);
1981 int func = BP_FUNC(bp);
1982 u32 hw_lock_control_reg;
1983 int cnt;
1984
1985
1986 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1987 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1988 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1989 return -EINVAL;
1990 }
1991
1992 if (func <= 5) {
1993 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1994 } else {
1995 hw_lock_control_reg =
1996 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1997 }
1998
1999
2000 lock_status = REG_RD(bp, hw_lock_control_reg);
2001 if (lock_status & resource_bit) {
2002 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2003 lock_status, resource_bit);
2004 return -EEXIST;
2005 }
2006
2007
2008 for (cnt = 0; cnt < 1000; cnt++) {
2009
2010 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2011 lock_status = REG_RD(bp, hw_lock_control_reg);
2012 if (lock_status & resource_bit)
2013 return 0;
2014
2015 usleep_range(5000, 10000);
2016 }
2017 BNX2X_ERR("Timeout\n");
2018 return -EAGAIN;
2019 }
2020
2021 int bnx2x_release_leader_lock(struct bnx2x *bp)
2022 {
2023 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2024 }
2025
2026 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2027 {
2028 u32 lock_status;
2029 u32 resource_bit = (1 << resource);
2030 int func = BP_FUNC(bp);
2031 u32 hw_lock_control_reg;
2032
2033
2034 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2035 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2036 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2037 return -EINVAL;
2038 }
2039
2040 if (func <= 5) {
2041 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2042 } else {
2043 hw_lock_control_reg =
2044 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2045 }
2046
2047
2048 lock_status = REG_RD(bp, hw_lock_control_reg);
2049 if (!(lock_status & resource_bit)) {
2050 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2051 lock_status, resource_bit);
2052 return -EFAULT;
2053 }
2054
2055 REG_WR(bp, hw_lock_control_reg, resource_bit);
2056 return 0;
2057 }
2058
2059 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2060 {
2061
2062 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2063 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2064 int gpio_shift = gpio_num +
2065 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2066 u32 gpio_mask = (1 << gpio_shift);
2067 u32 gpio_reg;
2068 int value;
2069
2070 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2071 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2072 return -EINVAL;
2073 }
2074
2075
2076 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2077
2078
2079 if ((gpio_reg & gpio_mask) == gpio_mask)
2080 value = 1;
2081 else
2082 value = 0;
2083
2084 return value;
2085 }
2086
2087 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2088 {
2089
2090 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2091 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2092 int gpio_shift = gpio_num +
2093 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2094 u32 gpio_mask = (1 << gpio_shift);
2095 u32 gpio_reg;
2096
2097 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2098 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2099 return -EINVAL;
2100 }
2101
2102 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2103
2104 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2105
2106 switch (mode) {
2107 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2108 DP(NETIF_MSG_LINK,
2109 "Set GPIO %d (shift %d) -> output low\n",
2110 gpio_num, gpio_shift);
2111
2112 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2113 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2114 break;
2115
2116 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2117 DP(NETIF_MSG_LINK,
2118 "Set GPIO %d (shift %d) -> output high\n",
2119 gpio_num, gpio_shift);
2120
2121 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2122 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2123 break;
2124
2125 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2126 DP(NETIF_MSG_LINK,
2127 "Set GPIO %d (shift %d) -> input\n",
2128 gpio_num, gpio_shift);
2129
2130 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2131 break;
2132
2133 default:
2134 break;
2135 }
2136
2137 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2138 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2139
2140 return 0;
2141 }
2142
2143 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2144 {
2145 u32 gpio_reg = 0;
2146 int rc = 0;
2147
2148
2149
2150 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2151
2152 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2153 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2154 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2155 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2156
2157 switch (mode) {
2158 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2159 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2160
2161 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2162 break;
2163
2164 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2165 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2166
2167 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2168 break;
2169
2170 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2172
2173 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2174 break;
2175
2176 default:
2177 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2178 rc = -EINVAL;
2179 break;
2180 }
2181
2182 if (rc == 0)
2183 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2184
2185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2186
2187 return rc;
2188 }
2189
2190 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2191 {
2192
2193 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2194 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2195 int gpio_shift = gpio_num +
2196 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2197 u32 gpio_mask = (1 << gpio_shift);
2198 u32 gpio_reg;
2199
2200 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2201 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2202 return -EINVAL;
2203 }
2204
2205 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2206
2207 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2208
2209 switch (mode) {
2210 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2211 DP(NETIF_MSG_LINK,
2212 "Clear GPIO INT %d (shift %d) -> output low\n",
2213 gpio_num, gpio_shift);
2214
2215 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2216 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2217 break;
2218
2219 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2220 DP(NETIF_MSG_LINK,
2221 "Set GPIO INT %d (shift %d) -> output high\n",
2222 gpio_num, gpio_shift);
2223
2224 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2225 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2226 break;
2227
2228 default:
2229 break;
2230 }
2231
2232 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2233 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2234
2235 return 0;
2236 }
2237
2238 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2239 {
2240 u32 spio_reg;
2241
2242
2243 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2244 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2245 return -EINVAL;
2246 }
2247
2248 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2249
2250 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2251
2252 switch (mode) {
2253 case MISC_SPIO_OUTPUT_LOW:
2254 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2255
2256 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2257 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2258 break;
2259
2260 case MISC_SPIO_OUTPUT_HIGH:
2261 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2262
2263 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2264 spio_reg |= (spio << MISC_SPIO_SET_POS);
2265 break;
2266
2267 case MISC_SPIO_INPUT_HI_Z:
2268 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2269
2270 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2271 break;
2272
2273 default:
2274 break;
2275 }
2276
2277 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2278 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2279
2280 return 0;
2281 }
2282
2283 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2284 {
2285 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2286
2287 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2288 ADVERTISED_Pause);
2289 switch (bp->link_vars.ieee_fc &
2290 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2291 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2292 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2293 ADVERTISED_Pause);
2294 break;
2295
2296 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2297 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2298 break;
2299
2300 default:
2301 break;
2302 }
2303 }
2304
2305 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2306 {
2307
2308
2309
2310
2311 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2312 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2313 else
2314 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2315 }
2316
2317 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2318 {
2319 u32 pause_enabled = 0;
2320
2321 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2322 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2323 pause_enabled = 1;
2324
2325 REG_WR(bp, BAR_USTRORM_INTMEM +
2326 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2327 pause_enabled);
2328 }
2329
2330 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2331 pause_enabled ? "enabled" : "disabled");
2332 }
2333
2334 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2335 {
2336 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2337 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2338
2339 if (!BP_NOMCP(bp)) {
2340 bnx2x_set_requested_fc(bp);
2341 bnx2x_acquire_phy_lock(bp);
2342
2343 if (load_mode == LOAD_DIAG) {
2344 struct link_params *lp = &bp->link_params;
2345 lp->loopback_mode = LOOPBACK_XGXS;
2346
2347 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2348 if (lp->speed_cap_mask[cfx_idx] &
2349 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2350 lp->req_line_speed[cfx_idx] =
2351 SPEED_20000;
2352 else if (lp->speed_cap_mask[cfx_idx] &
2353 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2354 lp->req_line_speed[cfx_idx] =
2355 SPEED_10000;
2356 else
2357 lp->req_line_speed[cfx_idx] =
2358 SPEED_1000;
2359 }
2360 }
2361
2362 if (load_mode == LOAD_LOOPBACK_EXT) {
2363 struct link_params *lp = &bp->link_params;
2364 lp->loopback_mode = LOOPBACK_EXT;
2365 }
2366
2367 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2368
2369 bnx2x_release_phy_lock(bp);
2370
2371 bnx2x_init_dropless_fc(bp);
2372
2373 bnx2x_calc_fc_adv(bp);
2374
2375 if (bp->link_vars.link_up) {
2376 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2377 bnx2x_link_report(bp);
2378 }
2379 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2380 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2381 return rc;
2382 }
2383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2384 return -EINVAL;
2385 }
2386
2387 void bnx2x_link_set(struct bnx2x *bp)
2388 {
2389 if (!BP_NOMCP(bp)) {
2390 bnx2x_acquire_phy_lock(bp);
2391 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2392 bnx2x_release_phy_lock(bp);
2393
2394 bnx2x_init_dropless_fc(bp);
2395
2396 bnx2x_calc_fc_adv(bp);
2397 } else
2398 BNX2X_ERR("Bootcode is missing - can not set link\n");
2399 }
2400
2401 static void bnx2x__link_reset(struct bnx2x *bp)
2402 {
2403 if (!BP_NOMCP(bp)) {
2404 bnx2x_acquire_phy_lock(bp);
2405 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2406 bnx2x_release_phy_lock(bp);
2407 } else
2408 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2409 }
2410
2411 void bnx2x_force_link_reset(struct bnx2x *bp)
2412 {
2413 bnx2x_acquire_phy_lock(bp);
2414 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2415 bnx2x_release_phy_lock(bp);
2416 }
2417
2418 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2419 {
2420 u8 rc = 0;
2421
2422 if (!BP_NOMCP(bp)) {
2423 bnx2x_acquire_phy_lock(bp);
2424 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2425 is_serdes);
2426 bnx2x_release_phy_lock(bp);
2427 } else
2428 BNX2X_ERR("Bootcode is missing - can not test link\n");
2429
2430 return rc;
2431 }
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2443 struct cmng_init_input *input)
2444 {
2445 int all_zero = 1;
2446 int vn;
2447
2448 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2449 u32 vn_cfg = bp->mf_config[vn];
2450 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2451 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2452
2453
2454 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2455 vn_min_rate = 0;
2456
2457 else if (!vn_min_rate)
2458 vn_min_rate = DEF_MIN_RATE;
2459 else
2460 all_zero = 0;
2461
2462 input->vnic_min_rate[vn] = vn_min_rate;
2463 }
2464
2465
2466 if (BNX2X_IS_ETS_ENABLED(bp)) {
2467 input->flags.cmng_enables &=
2468 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2469 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2470 } else if (all_zero) {
2471 input->flags.cmng_enables &=
2472 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2473 DP(NETIF_MSG_IFUP,
2474 "All MIN values are zeroes fairness will be disabled\n");
2475 } else
2476 input->flags.cmng_enables |=
2477 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2478 }
2479
2480 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2481 struct cmng_init_input *input)
2482 {
2483 u16 vn_max_rate;
2484 u32 vn_cfg = bp->mf_config[vn];
2485
2486 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2487 vn_max_rate = 0;
2488 else {
2489 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2490
2491 if (IS_MF_PERCENT_BW(bp)) {
2492
2493 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2494 } else
2495
2496 vn_max_rate = maxCfg * 100;
2497 }
2498
2499 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2500
2501 input->vnic_max_rate[vn] = vn_max_rate;
2502 }
2503
2504 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2505 {
2506 if (CHIP_REV_IS_SLOW(bp))
2507 return CMNG_FNS_NONE;
2508 if (IS_MF(bp))
2509 return CMNG_FNS_MINMAX;
2510
2511 return CMNG_FNS_NONE;
2512 }
2513
2514 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2515 {
2516 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2517
2518 if (BP_NOMCP(bp))
2519 return;
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2533 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2534
2535 if (func >= E1H_FUNC_MAX)
2536 break;
2537
2538 bp->mf_config[vn] =
2539 MF_CFG_RD(bp, func_mf_config[func].config);
2540 }
2541 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2542 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2543 bp->flags |= MF_FUNC_DIS;
2544 } else {
2545 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2546 bp->flags &= ~MF_FUNC_DIS;
2547 }
2548 }
2549
2550 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2551 {
2552 struct cmng_init_input input;
2553 memset(&input, 0, sizeof(struct cmng_init_input));
2554
2555 input.port_rate = bp->link_vars.line_speed;
2556
2557 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2558 int vn;
2559
2560
2561 if (read_cfg)
2562 bnx2x_read_mf_cfg(bp);
2563
2564
2565 bnx2x_calc_vn_min(bp, &input);
2566
2567
2568 if (bp->port.pmf)
2569 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2570 bnx2x_calc_vn_max(bp, vn, &input);
2571
2572
2573 input.flags.cmng_enables |=
2574 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2575
2576 bnx2x_init_cmng(&input, &bp->cmng);
2577 return;
2578 }
2579
2580
2581 DP(NETIF_MSG_IFUP,
2582 "rate shaping and fairness are disabled\n");
2583 }
2584
2585 static void storm_memset_cmng(struct bnx2x *bp,
2586 struct cmng_init *cmng,
2587 u8 port)
2588 {
2589 int vn;
2590 size_t size = sizeof(struct cmng_struct_per_port);
2591
2592 u32 addr = BAR_XSTRORM_INTMEM +
2593 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2594
2595 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2596
2597 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2598 int func = func_by_vn(bp, vn);
2599
2600 addr = BAR_XSTRORM_INTMEM +
2601 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2602 size = sizeof(struct rate_shaping_vars_per_vn);
2603 __storm_memset_struct(bp, addr, size,
2604 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2605
2606 addr = BAR_XSTRORM_INTMEM +
2607 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2608 size = sizeof(struct fairness_vars_per_vn);
2609 __storm_memset_struct(bp, addr, size,
2610 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2611 }
2612 }
2613
2614
2615 void bnx2x_set_local_cmng(struct bnx2x *bp)
2616 {
2617 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2618
2619 if (cmng_fns != CMNG_FNS_NONE) {
2620 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2621 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2622 } else {
2623
2624 DP(NETIF_MSG_IFUP,
2625 "single function mode without fairness\n");
2626 }
2627 }
2628
2629
2630 static void bnx2x_link_attn(struct bnx2x *bp)
2631 {
2632
2633 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2634
2635 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2636
2637 bnx2x_init_dropless_fc(bp);
2638
2639 if (bp->link_vars.link_up) {
2640
2641 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2642 struct host_port_stats *pstats;
2643
2644 pstats = bnx2x_sp(bp, port_stats);
2645
2646 memset(&(pstats->mac_stx[0]), 0,
2647 sizeof(struct mac_stx));
2648 }
2649 if (bp->state == BNX2X_STATE_OPEN)
2650 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2651 }
2652
2653 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2654 bnx2x_set_local_cmng(bp);
2655
2656 __bnx2x_link_report(bp);
2657
2658 if (IS_MF(bp))
2659 bnx2x_link_sync_notify(bp);
2660 }
2661
2662 void bnx2x__link_status_update(struct bnx2x *bp)
2663 {
2664 if (bp->state != BNX2X_STATE_OPEN)
2665 return;
2666
2667
2668 if (IS_PF(bp)) {
2669 bnx2x_dcbx_pmf_update(bp);
2670 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2671 if (bp->link_vars.link_up)
2672 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2673 else
2674 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2675
2676 bnx2x_link_report(bp);
2677
2678 } else {
2679 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2680 SUPPORTED_10baseT_Full |
2681 SUPPORTED_100baseT_Half |
2682 SUPPORTED_100baseT_Full |
2683 SUPPORTED_1000baseT_Full |
2684 SUPPORTED_2500baseX_Full |
2685 SUPPORTED_10000baseT_Full |
2686 SUPPORTED_TP |
2687 SUPPORTED_FIBRE |
2688 SUPPORTED_Autoneg |
2689 SUPPORTED_Pause |
2690 SUPPORTED_Asym_Pause);
2691 bp->port.advertising[0] = bp->port.supported[0];
2692
2693 bp->link_params.bp = bp;
2694 bp->link_params.port = BP_PORT(bp);
2695 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2696 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2697 bp->link_params.req_line_speed[0] = SPEED_10000;
2698 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2699 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2700 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2701 bp->link_vars.line_speed = SPEED_10000;
2702 bp->link_vars.link_status =
2703 (LINK_STATUS_LINK_UP |
2704 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2705 bp->link_vars.link_up = 1;
2706 bp->link_vars.duplex = DUPLEX_FULL;
2707 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2708 __bnx2x_link_report(bp);
2709
2710 bnx2x_sample_bulletin(bp);
2711
2712
2713
2714
2715
2716
2717 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2718 }
2719 }
2720
2721 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2722 u16 vlan_val, u8 allowed_prio)
2723 {
2724 struct bnx2x_func_state_params func_params = {NULL};
2725 struct bnx2x_func_afex_update_params *f_update_params =
2726 &func_params.params.afex_update;
2727
2728 func_params.f_obj = &bp->func_obj;
2729 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2730
2731
2732
2733
2734
2735 f_update_params->vif_id = vifid;
2736 f_update_params->afex_default_vlan = vlan_val;
2737 f_update_params->allowed_priorities = allowed_prio;
2738
2739
2740 if (bnx2x_func_state_change(bp, &func_params) < 0)
2741 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2742
2743 return 0;
2744 }
2745
2746 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2747 u16 vif_index, u8 func_bit_map)
2748 {
2749 struct bnx2x_func_state_params func_params = {NULL};
2750 struct bnx2x_func_afex_viflists_params *update_params =
2751 &func_params.params.afex_viflists;
2752 int rc;
2753 u32 drv_msg_code;
2754
2755
2756 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2757 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2758 cmd_type);
2759
2760 func_params.f_obj = &bp->func_obj;
2761 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2762
2763
2764 update_params->afex_vif_list_command = cmd_type;
2765 update_params->vif_list_index = vif_index;
2766 update_params->func_bit_map =
2767 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2768 update_params->func_to_clear = 0;
2769 drv_msg_code =
2770 (cmd_type == VIF_LIST_RULE_GET) ?
2771 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2772 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2773
2774
2775
2776
2777 rc = bnx2x_func_state_change(bp, &func_params);
2778 if (rc < 0)
2779 bnx2x_fw_command(bp, drv_msg_code, 0);
2780
2781 return 0;
2782 }
2783
2784 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2785 {
2786 struct afex_stats afex_stats;
2787 u32 func = BP_ABS_FUNC(bp);
2788 u32 mf_config;
2789 u16 vlan_val;
2790 u32 vlan_prio;
2791 u16 vif_id;
2792 u8 allowed_prio;
2793 u8 vlan_mode;
2794 u32 addr_to_write, vifid, addrs, stats_type, i;
2795
2796 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2797 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2798 DP(BNX2X_MSG_MCP,
2799 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2800 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2801 }
2802
2803 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2804 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2805 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2806 DP(BNX2X_MSG_MCP,
2807 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2808 vifid, addrs);
2809 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2810 addrs);
2811 }
2812
2813 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2814 addr_to_write = SHMEM2_RD(bp,
2815 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2816 stats_type = SHMEM2_RD(bp,
2817 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2818
2819 DP(BNX2X_MSG_MCP,
2820 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2821 addr_to_write);
2822
2823 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2824
2825
2826 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2827 REG_WR(bp, addr_to_write + i*sizeof(u32),
2828 *(((u32 *)(&afex_stats))+i));
2829
2830
2831 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2832 }
2833
2834 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2835 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2836 bp->mf_config[BP_VN(bp)] = mf_config;
2837 DP(BNX2X_MSG_MCP,
2838 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2839 mf_config);
2840
2841
2842 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2843
2844 struct cmng_init_input cmng_input;
2845 struct rate_shaping_vars_per_vn m_rs_vn;
2846 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2847 u32 addr = BAR_XSTRORM_INTMEM +
2848 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2849
2850 bp->mf_config[BP_VN(bp)] = mf_config;
2851
2852 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2853 m_rs_vn.vn_counter.rate =
2854 cmng_input.vnic_max_rate[BP_VN(bp)];
2855 m_rs_vn.vn_counter.quota =
2856 (m_rs_vn.vn_counter.rate *
2857 RS_PERIODIC_TIMEOUT_USEC) / 8;
2858
2859 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2860
2861
2862 vif_id =
2863 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2864 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2865 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2866 vlan_val =
2867 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2868 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2869 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2870 vlan_prio = (mf_config &
2871 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2872 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2873 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2874 vlan_mode =
2875 (MF_CFG_RD(bp,
2876 func_mf_config[func].afex_config) &
2877 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2878 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2879 allowed_prio =
2880 (MF_CFG_RD(bp,
2881 func_mf_config[func].afex_config) &
2882 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2883 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2884
2885
2886 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2887 allowed_prio))
2888 return;
2889
2890 bp->afex_def_vlan_tag = vlan_val;
2891 bp->afex_vlan_mode = vlan_mode;
2892 } else {
2893
2894 bnx2x_link_report(bp);
2895
2896
2897 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2898
2899
2900 bp->afex_def_vlan_tag = -1;
2901 }
2902 }
2903 }
2904
2905 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2906 {
2907 struct bnx2x_func_switch_update_params *switch_update_params;
2908 struct bnx2x_func_state_params func_params;
2909
2910 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2911 switch_update_params = &func_params.params.switch_update;
2912 func_params.f_obj = &bp->func_obj;
2913 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2914
2915
2916 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2917 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2918
2919 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2920 int func = BP_ABS_FUNC(bp);
2921 u32 val;
2922
2923
2924 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2925 FUNC_MF_CFG_E1HOV_TAG_MASK;
2926 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2927 bp->mf_ov = val;
2928 } else {
2929 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2930 goto fail;
2931 }
2932
2933
2934 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2935 bp->mf_ov);
2936
2937
2938 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2939 &switch_update_params->changes);
2940 switch_update_params->vlan = bp->mf_ov;
2941
2942 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2943 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2944 bp->mf_ov);
2945 goto fail;
2946 } else {
2947 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2948 bp->mf_ov);
2949 }
2950 } else {
2951 goto fail;
2952 }
2953
2954 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2955 return;
2956 fail:
2957 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2958 }
2959
2960 static void bnx2x_pmf_update(struct bnx2x *bp)
2961 {
2962 int port = BP_PORT(bp);
2963 u32 val;
2964
2965 bp->port.pmf = 1;
2966 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2967
2968
2969
2970
2971
2972 smp_mb();
2973
2974
2975 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2976
2977 bnx2x_dcbx_pmf_update(bp);
2978
2979
2980 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2981 if (bp->common.int_block == INT_BLOCK_HC) {
2982 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2983 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2984 } else if (!CHIP_IS_E1x(bp)) {
2985 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2986 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2987 }
2988
2989 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2990 }
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3002 {
3003 int mb_idx = BP_FW_MB_IDX(bp);
3004 u32 seq;
3005 u32 rc = 0;
3006 u32 cnt = 1;
3007 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3008
3009 mutex_lock(&bp->fw_mb_mutex);
3010 seq = ++bp->fw_seq;
3011 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3012 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3013
3014 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3015 (command | seq), param);
3016
3017 do {
3018
3019 msleep(delay);
3020
3021 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3022
3023
3024 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3025
3026 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3027 cnt*delay, rc, seq);
3028
3029
3030 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3031 rc &= FW_MSG_CODE_MASK;
3032 else {
3033
3034 BNX2X_ERR("FW failed to respond!\n");
3035 bnx2x_fw_dump(bp);
3036 rc = 0;
3037 }
3038 mutex_unlock(&bp->fw_mb_mutex);
3039
3040 return rc;
3041 }
3042
3043 static void storm_memset_func_cfg(struct bnx2x *bp,
3044 struct tstorm_eth_function_common_config *tcfg,
3045 u16 abs_fid)
3046 {
3047 size_t size = sizeof(struct tstorm_eth_function_common_config);
3048
3049 u32 addr = BAR_TSTRORM_INTMEM +
3050 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3051
3052 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3053 }
3054
3055 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3056 {
3057 if (CHIP_IS_E1x(bp)) {
3058 struct tstorm_eth_function_common_config tcfg = {0};
3059
3060 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3061 }
3062
3063
3064 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3065 storm_memset_func_en(bp, p->func_id, 1);
3066
3067
3068 if (p->spq_active) {
3069 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3070 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3071 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3072 }
3073 }
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3085 struct bnx2x_fastpath *fp,
3086 bool zero_stats)
3087 {
3088 unsigned long flags = 0;
3089
3090
3091 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3092
3093
3094
3095
3096
3097
3098 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3099 if (zero_stats)
3100 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3101
3102 if (bp->flags & TX_SWITCHING)
3103 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3104
3105 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3106 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3107
3108 #ifdef BNX2X_STOP_ON_ERROR
3109 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3110 #endif
3111
3112 return flags;
3113 }
3114
3115 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3116 struct bnx2x_fastpath *fp,
3117 bool leading)
3118 {
3119 unsigned long flags = 0;
3120
3121
3122 if (IS_MF_SD(bp))
3123 __set_bit(BNX2X_Q_FLG_OV, &flags);
3124
3125 if (IS_FCOE_FP(fp)) {
3126 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3127
3128 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3129 }
3130
3131 if (fp->mode != TPA_MODE_DISABLED) {
3132 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3133 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3134 if (fp->mode == TPA_MODE_GRO)
3135 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3136 }
3137
3138 if (leading) {
3139 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3140 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3141 }
3142
3143
3144 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3145
3146
3147 if (IS_MF_AFEX(bp))
3148 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3149
3150 return flags | bnx2x_get_common_flags(bp, fp, true);
3151 }
3152
3153 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3154 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3155 u8 cos)
3156 {
3157 gen_init->stat_id = bnx2x_stats_id(fp);
3158 gen_init->spcl_id = fp->cl_id;
3159
3160
3161 if (IS_FCOE_FP(fp))
3162 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3163 else
3164 gen_init->mtu = bp->dev->mtu;
3165
3166 gen_init->cos = cos;
3167
3168 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3169 }
3170
3171 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3172 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3173 struct bnx2x_rxq_setup_params *rxq_init)
3174 {
3175 u8 max_sge = 0;
3176 u16 sge_sz = 0;
3177 u16 tpa_agg_size = 0;
3178
3179 if (fp->mode != TPA_MODE_DISABLED) {
3180 pause->sge_th_lo = SGE_TH_LO(bp);
3181 pause->sge_th_hi = SGE_TH_HI(bp);
3182
3183
3184 WARN_ON(bp->dropless_fc &&
3185 pause->sge_th_hi + FW_PREFETCH_CNT >
3186 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3187
3188 tpa_agg_size = TPA_AGG_SIZE;
3189 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3190 SGE_PAGE_SHIFT;
3191 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3192 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3193 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3194 }
3195
3196
3197 if (!CHIP_IS_E1(bp)) {
3198 pause->bd_th_lo = BD_TH_LO(bp);
3199 pause->bd_th_hi = BD_TH_HI(bp);
3200
3201 pause->rcq_th_lo = RCQ_TH_LO(bp);
3202 pause->rcq_th_hi = RCQ_TH_HI(bp);
3203
3204
3205
3206
3207 WARN_ON(bp->dropless_fc &&
3208 pause->bd_th_hi + FW_PREFETCH_CNT >
3209 bp->rx_ring_size);
3210 WARN_ON(bp->dropless_fc &&
3211 pause->rcq_th_hi + FW_PREFETCH_CNT >
3212 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3213
3214 pause->pri_map = 1;
3215 }
3216
3217
3218 rxq_init->dscr_map = fp->rx_desc_mapping;
3219 rxq_init->sge_map = fp->rx_sge_mapping;
3220 rxq_init->rcq_map = fp->rx_comp_mapping;
3221 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3222
3223
3224
3225
3226 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3227 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3228
3229 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3230 rxq_init->tpa_agg_sz = tpa_agg_size;
3231 rxq_init->sge_buf_sz = sge_sz;
3232 rxq_init->max_sges_pkt = max_sge;
3233 rxq_init->rss_engine_id = BP_FUNC(bp);
3234 rxq_init->mcast_engine_id = BP_FUNC(bp);
3235
3236
3237
3238
3239
3240
3241 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3242
3243 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3244 rxq_init->fw_sb_id = fp->fw_sb_id;
3245
3246 if (IS_FCOE_FP(fp))
3247 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3248 else
3249 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3250
3251
3252
3253 if (IS_MF_AFEX(bp)) {
3254 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3255 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3256 }
3257 }
3258
3259 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3260 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3261 u8 cos)
3262 {
3263 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3264 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3265 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3266 txq_init->fw_sb_id = fp->fw_sb_id;
3267
3268
3269
3270
3271
3272 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3273
3274 if (IS_FCOE_FP(fp)) {
3275 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3276 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3277 }
3278 }
3279
3280 static void bnx2x_pf_init(struct bnx2x *bp)
3281 {
3282 struct bnx2x_func_init_params func_init = {0};
3283 struct event_ring_data eq_data = { {0} };
3284
3285 if (!CHIP_IS_E1x(bp)) {
3286
3287
3288 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3289 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3290 (CHIP_MODE_IS_4_PORT(bp) ?
3291 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3292
3293 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3294 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3295 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3296 (CHIP_MODE_IS_4_PORT(bp) ?
3297 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3298 }
3299
3300 func_init.spq_active = true;
3301 func_init.pf_id = BP_FUNC(bp);
3302 func_init.func_id = BP_FUNC(bp);
3303 func_init.spq_map = bp->spq_mapping;
3304 func_init.spq_prod = bp->spq_prod_idx;
3305
3306 bnx2x_func_init(bp, &func_init);
3307
3308 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3309
3310
3311
3312
3313
3314
3315
3316 bp->link_vars.line_speed = SPEED_10000;
3317 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3318
3319
3320 if (bp->port.pmf)
3321 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3322
3323
3324 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3325 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3326 eq_data.producer = bp->eq_prod;
3327 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3328 eq_data.sb_id = DEF_SB_ID;
3329 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3330 }
3331
3332 static void bnx2x_e1h_disable(struct bnx2x *bp)
3333 {
3334 int port = BP_PORT(bp);
3335
3336 bnx2x_tx_disable(bp);
3337
3338 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3339 }
3340
3341 static void bnx2x_e1h_enable(struct bnx2x *bp)
3342 {
3343 int port = BP_PORT(bp);
3344
3345 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3346 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3347
3348
3349 netif_tx_wake_all_queues(bp->dev);
3350
3351
3352
3353
3354
3355 }
3356
3357 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3358
3359 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3360 {
3361 struct eth_stats_info *ether_stat =
3362 &bp->slowpath->drv_info_to_mcp.ether_stat;
3363 struct bnx2x_vlan_mac_obj *mac_obj =
3364 &bp->sp_objs->mac_obj;
3365 int i;
3366
3367 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3368 ETH_STAT_INFO_VERSION_LEN);
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3379 memset(ether_stat->mac_local + i, 0,
3380 sizeof(ether_stat->mac_local[0]));
3381 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3382 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3383 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3384 ETH_ALEN);
3385 ether_stat->mtu_size = bp->dev->mtu;
3386 if (bp->dev->features & NETIF_F_RXCSUM)
3387 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3388 if (bp->dev->features & NETIF_F_TSO)
3389 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3390 ether_stat->feature_flags |= bp->common.boot_mode;
3391
3392 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3393
3394 ether_stat->txq_size = bp->tx_ring_size;
3395 ether_stat->rxq_size = bp->rx_ring_size;
3396
3397 #ifdef CONFIG_BNX2X_SRIOV
3398 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3399 #endif
3400 }
3401
3402 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3403 {
3404 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3405 struct fcoe_stats_info *fcoe_stat =
3406 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3407
3408 if (!CNIC_LOADED(bp))
3409 return;
3410
3411 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3412
3413 fcoe_stat->qos_priority =
3414 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3415
3416
3417 if (!NO_FCOE(bp)) {
3418 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3419 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3420 tstorm_queue_statistics;
3421
3422 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3423 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3424 xstorm_queue_statistics;
3425
3426 struct fcoe_statistics_params *fw_fcoe_stat =
3427 &bp->fw_stats_data->fcoe;
3428
3429 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3430 fcoe_stat->rx_bytes_lo,
3431 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3432
3433 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3434 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3435 fcoe_stat->rx_bytes_lo,
3436 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3437
3438 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3439 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3440 fcoe_stat->rx_bytes_lo,
3441 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3442
3443 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3444 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3445 fcoe_stat->rx_bytes_lo,
3446 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3447
3448 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3449 fcoe_stat->rx_frames_lo,
3450 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3451
3452 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3453 fcoe_stat->rx_frames_lo,
3454 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3455
3456 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3457 fcoe_stat->rx_frames_lo,
3458 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3459
3460 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3461 fcoe_stat->rx_frames_lo,
3462 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3463
3464 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3465 fcoe_stat->tx_bytes_lo,
3466 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3467
3468 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3469 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3470 fcoe_stat->tx_bytes_lo,
3471 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3472
3473 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3474 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3475 fcoe_stat->tx_bytes_lo,
3476 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3477
3478 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3479 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3480 fcoe_stat->tx_bytes_lo,
3481 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3482
3483 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3484 fcoe_stat->tx_frames_lo,
3485 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3486
3487 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3488 fcoe_stat->tx_frames_lo,
3489 fcoe_q_xstorm_stats->ucast_pkts_sent);
3490
3491 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3492 fcoe_stat->tx_frames_lo,
3493 fcoe_q_xstorm_stats->bcast_pkts_sent);
3494
3495 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3496 fcoe_stat->tx_frames_lo,
3497 fcoe_q_xstorm_stats->mcast_pkts_sent);
3498 }
3499
3500
3501 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3502 }
3503
3504 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3505 {
3506 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3507 struct iscsi_stats_info *iscsi_stat =
3508 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3509
3510 if (!CNIC_LOADED(bp))
3511 return;
3512
3513 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3514 ETH_ALEN);
3515
3516 iscsi_stat->qos_priority =
3517 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3518
3519
3520 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3521 }
3522
3523
3524
3525
3526
3527
3528 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3529 {
3530
3531
3532
3533
3534 if (!IS_MF(bp)) {
3535 DP(BNX2X_MSG_MCP,
3536 "Ignoring MF BW config in single function mode\n");
3537 return;
3538 }
3539
3540 if (bp->link_vars.link_up) {
3541 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3542 bnx2x_link_sync_notify(bp);
3543 }
3544 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3545 }
3546
3547 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3548 {
3549 bnx2x_config_mf_bw(bp);
3550 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3551 }
3552
3553 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3554 {
3555 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3556 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3557 }
3558
3559 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3560 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3561
3562 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3563 {
3564 enum drv_info_opcode op_code;
3565 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3566 bool release = false;
3567 int wait;
3568
3569
3570 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3571 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3572 return;
3573 }
3574
3575 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3576 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3577
3578
3579 mutex_lock(&bp->drv_info_mutex);
3580
3581 memset(&bp->slowpath->drv_info_to_mcp, 0,
3582 sizeof(union drv_info_to_mcp));
3583
3584 switch (op_code) {
3585 case ETH_STATS_OPCODE:
3586 bnx2x_drv_info_ether_stat(bp);
3587 break;
3588 case FCOE_STATS_OPCODE:
3589 bnx2x_drv_info_fcoe_stat(bp);
3590 break;
3591 case ISCSI_STATS_OPCODE:
3592 bnx2x_drv_info_iscsi_stat(bp);
3593 break;
3594 default:
3595
3596 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3597 goto out;
3598 }
3599
3600
3601
3602
3603 SHMEM2_WR(bp, drv_info_host_addr_lo,
3604 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3605 SHMEM2_WR(bp, drv_info_host_addr_hi,
3606 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3607
3608 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3609
3610
3611
3612
3613
3614 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3615 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3616 } else if (!bp->drv_info_mng_owner) {
3617 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3618
3619 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3620 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3621
3622
3623 if (indication & bit) {
3624 SHMEM2_WR(bp, mfw_drv_indication,
3625 indication & ~bit);
3626 release = true;
3627 break;
3628 }
3629
3630 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3631 }
3632 }
3633 if (!release) {
3634 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3635 bp->drv_info_mng_owner = true;
3636 }
3637
3638 out:
3639 mutex_unlock(&bp->drv_info_mutex);
3640 }
3641
3642 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3643 {
3644 u8 vals[4];
3645 int i = 0;
3646
3647 if (bnx2x_format) {
3648 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3649 &vals[0], &vals[1], &vals[2], &vals[3]);
3650 if (i > 0)
3651 vals[0] -= '0';
3652 } else {
3653 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3654 &vals[0], &vals[1], &vals[2], &vals[3]);
3655 }
3656
3657 while (i < 4)
3658 vals[i++] = 0;
3659
3660 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3661 }
3662
3663 void bnx2x_update_mng_version(struct bnx2x *bp)
3664 {
3665 u32 iscsiver = DRV_VER_NOT_LOADED;
3666 u32 fcoever = DRV_VER_NOT_LOADED;
3667 u32 ethver = DRV_VER_NOT_LOADED;
3668 int idx = BP_FW_MB_IDX(bp);
3669 u8 *version;
3670
3671 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3672 return;
3673
3674 mutex_lock(&bp->drv_info_mutex);
3675
3676 if (bp->drv_info_mng_owner)
3677 goto out;
3678
3679 if (bp->state != BNX2X_STATE_OPEN)
3680 goto out;
3681
3682
3683 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3684 if (!CNIC_LOADED(bp))
3685 goto out;
3686
3687
3688 memset(&bp->slowpath->drv_info_to_mcp, 0,
3689 sizeof(union drv_info_to_mcp));
3690 bnx2x_drv_info_iscsi_stat(bp);
3691 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3692 iscsiver = bnx2x_update_mng_version_utility(version, false);
3693
3694 memset(&bp->slowpath->drv_info_to_mcp, 0,
3695 sizeof(union drv_info_to_mcp));
3696 bnx2x_drv_info_fcoe_stat(bp);
3697 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3698 fcoever = bnx2x_update_mng_version_utility(version, false);
3699
3700 out:
3701 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3702 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3703 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3704
3705 mutex_unlock(&bp->drv_info_mutex);
3706
3707 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3708 ethver, iscsiver, fcoever);
3709 }
3710
3711 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3712 {
3713 u32 drv_ver;
3714 u32 valid_dump;
3715
3716 if (!SHMEM2_HAS(bp, drv_info))
3717 return;
3718
3719
3720 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3721
3722 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3723 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3724
3725 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3726
3727
3728 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3729
3730 if (valid_dump & FIRST_DUMP_VALID)
3731 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3732
3733 if (valid_dump & SECOND_DUMP_VALID)
3734 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3735 }
3736
3737 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3738 {
3739 u32 cmd_ok, cmd_fail;
3740
3741
3742 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3743 event & DRV_STATUS_OEM_EVENT_MASK) {
3744 BNX2X_ERR("Received simultaneous events %08x\n", event);
3745 return;
3746 }
3747
3748 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3749 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3750 cmd_ok = DRV_MSG_CODE_DCC_OK;
3751 } else {
3752 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3753 cmd_ok = DRV_MSG_CODE_OEM_OK;
3754 }
3755
3756 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3757
3758 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3759 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3760
3761
3762
3763
3764 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3765 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3766 bp->flags |= MF_FUNC_DIS;
3767
3768 bnx2x_e1h_disable(bp);
3769 } else {
3770 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3771 bp->flags &= ~MF_FUNC_DIS;
3772
3773 bnx2x_e1h_enable(bp);
3774 }
3775 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3776 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3777 }
3778
3779 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3780 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3781 bnx2x_config_mf_bw(bp);
3782 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3783 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3784 }
3785
3786
3787 if (event)
3788 bnx2x_fw_command(bp, cmd_fail, 0);
3789 else
3790 bnx2x_fw_command(bp, cmd_ok, 0);
3791 }
3792
3793
3794 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3795 {
3796 struct eth_spe *next_spe = bp->spq_prod_bd;
3797
3798 if (bp->spq_prod_bd == bp->spq_last_bd) {
3799 bp->spq_prod_bd = bp->spq;
3800 bp->spq_prod_idx = 0;
3801 DP(BNX2X_MSG_SP, "end of spq\n");
3802 } else {
3803 bp->spq_prod_bd++;
3804 bp->spq_prod_idx++;
3805 }
3806 return next_spe;
3807 }
3808
3809
3810 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3811 {
3812 int func = BP_FUNC(bp);
3813
3814
3815
3816
3817
3818
3819 mb();
3820
3821 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3822 bp->spq_prod_idx);
3823 }
3824
3825
3826
3827
3828
3829
3830
3831 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3832 {
3833 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3834 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3835 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3836 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3837 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3838 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3839 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3840 return true;
3841 else
3842 return false;
3843 }
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3860 u32 data_hi, u32 data_lo, int cmd_type)
3861 {
3862 struct eth_spe *spe;
3863 u16 type;
3864 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3865
3866 #ifdef BNX2X_STOP_ON_ERROR
3867 if (unlikely(bp->panic)) {
3868 BNX2X_ERR("Can't post SP when there is panic\n");
3869 return -EIO;
3870 }
3871 #endif
3872
3873 spin_lock_bh(&bp->spq_lock);
3874
3875 if (common) {
3876 if (!atomic_read(&bp->eq_spq_left)) {
3877 BNX2X_ERR("BUG! EQ ring full!\n");
3878 spin_unlock_bh(&bp->spq_lock);
3879 bnx2x_panic();
3880 return -EBUSY;
3881 }
3882 } else if (!atomic_read(&bp->cq_spq_left)) {
3883 BNX2X_ERR("BUG! SPQ ring full!\n");
3884 spin_unlock_bh(&bp->spq_lock);
3885 bnx2x_panic();
3886 return -EBUSY;
3887 }
3888
3889 spe = bnx2x_sp_get_next(bp);
3890
3891
3892 spe->hdr.conn_and_cmd_data =
3893 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3894 HW_CID(bp, cid));
3895
3896
3897
3898
3899
3900 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3901 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3902 SPE_HDR_CONN_TYPE;
3903 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3904 SPE_HDR_FUNCTION_ID);
3905 } else {
3906 type = cmd_type;
3907 }
3908
3909 spe->hdr.type = cpu_to_le16(type);
3910
3911 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3912 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3913
3914
3915
3916
3917
3918
3919 if (common)
3920 atomic_dec(&bp->eq_spq_left);
3921 else
3922 atomic_dec(&bp->cq_spq_left);
3923
3924 DP(BNX2X_MSG_SP,
3925 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3926 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3927 (u32)(U64_LO(bp->spq_mapping) +
3928 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3929 HW_CID(bp, cid), data_hi, data_lo, type,
3930 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3931
3932 bnx2x_sp_prod_update(bp);
3933 spin_unlock_bh(&bp->spq_lock);
3934 return 0;
3935 }
3936
3937
3938 static int bnx2x_acquire_alr(struct bnx2x *bp)
3939 {
3940 u32 j, val;
3941 int rc = 0;
3942
3943 might_sleep();
3944 for (j = 0; j < 1000; j++) {
3945 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3946 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3947 if (val & MCPR_ACCESS_LOCK_LOCK)
3948 break;
3949
3950 usleep_range(5000, 10000);
3951 }
3952 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3953 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3954 rc = -EBUSY;
3955 }
3956
3957 return rc;
3958 }
3959
3960
3961 static void bnx2x_release_alr(struct bnx2x *bp)
3962 {
3963 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3964 }
3965
3966 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3967 #define BNX2X_DEF_SB_IDX 0x0002
3968
3969 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3970 {
3971 struct host_sp_status_block *def_sb = bp->def_status_blk;
3972 u16 rc = 0;
3973
3974 barrier();
3975 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3976 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3977 rc |= BNX2X_DEF_SB_ATT_IDX;
3978 }
3979
3980 if (bp->def_idx != def_sb->sp_sb.running_index) {
3981 bp->def_idx = def_sb->sp_sb.running_index;
3982 rc |= BNX2X_DEF_SB_IDX;
3983 }
3984
3985
3986 barrier();
3987 return rc;
3988 }
3989
3990
3991
3992
3993
3994 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3995 {
3996 int port = BP_PORT(bp);
3997 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3998 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3999 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4000 NIG_REG_MASK_INTERRUPT_PORT0;
4001 u32 aeu_mask;
4002 u32 nig_mask = 0;
4003 u32 reg_addr;
4004
4005 if (bp->attn_state & asserted)
4006 BNX2X_ERR("IGU ERROR\n");
4007
4008 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4009 aeu_mask = REG_RD(bp, aeu_addr);
4010
4011 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4012 aeu_mask, asserted);
4013 aeu_mask &= ~(asserted & 0x3ff);
4014 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4015
4016 REG_WR(bp, aeu_addr, aeu_mask);
4017 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4018
4019 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4020 bp->attn_state |= asserted;
4021 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4022
4023 if (asserted & ATTN_HARD_WIRED_MASK) {
4024 if (asserted & ATTN_NIG_FOR_FUNC) {
4025
4026 bnx2x_acquire_phy_lock(bp);
4027
4028
4029 nig_mask = REG_RD(bp, nig_int_mask_addr);
4030
4031
4032
4033
4034 if (nig_mask) {
4035 REG_WR(bp, nig_int_mask_addr, 0);
4036
4037 bnx2x_link_attn(bp);
4038 }
4039
4040
4041 }
4042 if (asserted & ATTN_SW_TIMER_4_FUNC)
4043 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4044
4045 if (asserted & GPIO_2_FUNC)
4046 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4047
4048 if (asserted & GPIO_3_FUNC)
4049 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4050
4051 if (asserted & GPIO_4_FUNC)
4052 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4053
4054 if (port == 0) {
4055 if (asserted & ATTN_GENERAL_ATTN_1) {
4056 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4057 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4058 }
4059 if (asserted & ATTN_GENERAL_ATTN_2) {
4060 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4062 }
4063 if (asserted & ATTN_GENERAL_ATTN_3) {
4064 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4066 }
4067 } else {
4068 if (asserted & ATTN_GENERAL_ATTN_4) {
4069 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4071 }
4072 if (asserted & ATTN_GENERAL_ATTN_5) {
4073 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4075 }
4076 if (asserted & ATTN_GENERAL_ATTN_6) {
4077 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4079 }
4080 }
4081
4082 }
4083
4084 if (bp->common.int_block == INT_BLOCK_HC)
4085 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4086 COMMAND_REG_ATTN_BITS_SET);
4087 else
4088 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4089
4090 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4091 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4092 REG_WR(bp, reg_addr, asserted);
4093
4094
4095 if (asserted & ATTN_NIG_FOR_FUNC) {
4096
4097
4098
4099 if (bp->common.int_block != INT_BLOCK_HC) {
4100 u32 cnt = 0, igu_acked;
4101 do {
4102 igu_acked = REG_RD(bp,
4103 IGU_REG_ATTENTION_ACK_BITS);
4104 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4105 (++cnt < MAX_IGU_ATTN_ACK_TO));
4106 if (!igu_acked)
4107 DP(NETIF_MSG_HW,
4108 "Failed to verify IGU ack on time\n");
4109 barrier();
4110 }
4111 REG_WR(bp, nig_int_mask_addr, nig_mask);
4112 bnx2x_release_phy_lock(bp);
4113 }
4114 }
4115
4116 static void bnx2x_fan_failure(struct bnx2x *bp)
4117 {
4118 int port = BP_PORT(bp);
4119 u32 ext_phy_config;
4120
4121 ext_phy_config =
4122 SHMEM_RD(bp,
4123 dev_info.port_hw_config[port].external_phy_config);
4124
4125 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4126 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4127 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4128 ext_phy_config);
4129
4130
4131 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4132 "Please contact OEM Support for assistance\n");
4133
4134
4135
4136
4137
4138 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4139 }
4140
4141 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4142 {
4143 int port = BP_PORT(bp);
4144 int reg_offset;
4145 u32 val;
4146
4147 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4148 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4149
4150 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4151
4152 val = REG_RD(bp, reg_offset);
4153 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4154 REG_WR(bp, reg_offset, val);
4155
4156 BNX2X_ERR("SPIO5 hw attention\n");
4157
4158
4159 bnx2x_hw_reset_phy(&bp->link_params);
4160 bnx2x_fan_failure(bp);
4161 }
4162
4163 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4164 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_handle_module_detect_int(&bp->link_params);
4166 bnx2x_release_phy_lock(bp);
4167 }
4168
4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170
4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val);
4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic();
4178 }
4179 }
4180
4181 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4182 {
4183 u32 val;
4184
4185 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4186
4187 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4188 BNX2X_ERR("DB hw attention 0x%x\n", val);
4189
4190 if (val & 0x2)
4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 }
4193
4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195
4196 int port = BP_PORT(bp);
4197 int reg_offset;
4198
4199 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201
4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val);
4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic();
4209 }
4210 }
4211
4212 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4213 {
4214 u32 val;
4215
4216 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4217
4218 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4219 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4220
4221 if (val & 0x2)
4222 BNX2X_ERR("FATAL error from CFC\n");
4223 }
4224
4225 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4226 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4227 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4228
4229 if (val & 0x18000)
4230 BNX2X_ERR("FATAL error from PXP\n");
4231
4232 if (!CHIP_IS_E1x(bp)) {
4233 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4234 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4235 }
4236 }
4237
4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239
4240 int port = BP_PORT(bp);
4241 int reg_offset;
4242
4243 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245
4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val);
4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic();
4253 }
4254 }
4255
4256 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4257 {
4258 u32 val;
4259
4260 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4261
4262 if (attn & BNX2X_PMF_LINK_ASSERT) {
4263 int func = BP_FUNC(bp);
4264
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4266 bnx2x_read_mf_cfg(bp);
4267 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4268 func_mf_config[BP_ABS_FUNC(bp)].config);
4269 val = SHMEM_RD(bp,
4270 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4271
4272 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4273 DRV_STATUS_OEM_EVENT_MASK))
4274 bnx2x_oem_event(bp,
4275 (val & (DRV_STATUS_DCC_EVENT_MASK |
4276 DRV_STATUS_OEM_EVENT_MASK)));
4277
4278 if (val & DRV_STATUS_SET_MF_BW)
4279 bnx2x_set_mf_bw(bp);
4280
4281 if (val & DRV_STATUS_DRV_INFO_REQ)
4282 bnx2x_handle_drv_info_req(bp);
4283
4284 if (val & DRV_STATUS_VF_DISABLED)
4285 bnx2x_schedule_iov_task(bp,
4286 BNX2X_IOV_HANDLE_FLR);
4287
4288 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4289 bnx2x_pmf_update(bp);
4290
4291 if (bp->port.pmf &&
4292 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4293 bp->dcbx_enabled > 0)
4294
4295 bnx2x_dcbx_set_params(bp,
4296 BNX2X_DCBX_STATE_NEG_RECEIVED);
4297 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4298 bnx2x_handle_afex_cmd(bp,
4299 val & DRV_STATUS_AFEX_EVENT_MASK);
4300 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4301 bnx2x_handle_eee_event(bp);
4302
4303 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4304 bnx2x_schedule_sp_rtnl(bp,
4305 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4306
4307 if (bp->link_vars.periodic_flags &
4308 PERIODIC_FLAGS_LINK_EVENT) {
4309
4310 bnx2x_acquire_phy_lock(bp);
4311 bp->link_vars.periodic_flags &=
4312 ~PERIODIC_FLAGS_LINK_EVENT;
4313 bnx2x_release_phy_lock(bp);
4314 if (IS_MF(bp))
4315 bnx2x_link_sync_notify(bp);
4316 bnx2x_link_report(bp);
4317 }
4318
4319
4320
4321 bnx2x__link_status_update(bp);
4322 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4323
4324 BNX2X_ERR("MC assert!\n");
4325 bnx2x_mc_assert(bp);
4326 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4327 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4328 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4329 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4330 bnx2x_panic();
4331
4332 } else if (attn & BNX2X_MCP_ASSERT) {
4333
4334 BNX2X_ERR("MCP assert!\n");
4335 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4336 bnx2x_fw_dump(bp);
4337
4338 } else
4339 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4340 }
4341
4342 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4343 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4344 if (attn & BNX2X_GRC_TIMEOUT) {
4345 val = CHIP_IS_E1(bp) ? 0 :
4346 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4347 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4348 }
4349 if (attn & BNX2X_GRC_RSV) {
4350 val = CHIP_IS_E1(bp) ? 0 :
4351 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4352 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4353 }
4354 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4355 }
4356 }
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4373
4374 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4375 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4376 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4377 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4378 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4379 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4380 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
4381
4382
4383
4384
4385
4386
4387 void bnx2x_set_reset_global(struct bnx2x *bp)
4388 {
4389 u32 val;
4390 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4391 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4392 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4393 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4394 }
4395
4396
4397
4398
4399
4400
4401 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4402 {
4403 u32 val;
4404 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4406 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4407 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4408 }
4409
4410
4411
4412
4413
4414
4415 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4416 {
4417 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4418
4419 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4420 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4421 }
4422
4423
4424
4425
4426
4427
4428 static void bnx2x_set_reset_done(struct bnx2x *bp)
4429 {
4430 u32 val;
4431 u32 bit = BP_PATH(bp) ?
4432 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4433 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4434 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4435
4436
4437 val &= ~bit;
4438 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4439
4440 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4441 }
4442
4443
4444
4445
4446
4447
4448 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4449 {
4450 u32 val;
4451 u32 bit = BP_PATH(bp) ?
4452 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4453 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4454 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4455
4456
4457 val |= bit;
4458 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4459 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4460 }
4461
4462
4463
4464
4465
4466 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4467 {
4468 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4469 u32 bit = engine ?
4470 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4471
4472
4473 return (val & bit) ? false : true;
4474 }
4475
4476
4477
4478
4479
4480
4481 void bnx2x_set_pf_load(struct bnx2x *bp)
4482 {
4483 u32 val1, val;
4484 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4485 BNX2X_PATH0_LOAD_CNT_MASK;
4486 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4487 BNX2X_PATH0_LOAD_CNT_SHIFT;
4488
4489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4490 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4491
4492 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4493
4494
4495 val1 = (val & mask) >> shift;
4496
4497
4498 val1 |= (1 << bp->pf_num);
4499
4500
4501 val &= ~mask;
4502
4503
4504 val |= ((val1 << shift) & mask);
4505
4506 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4507 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4508 }
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4520 {
4521 u32 val1, val;
4522 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4523 BNX2X_PATH0_LOAD_CNT_MASK;
4524 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4525 BNX2X_PATH0_LOAD_CNT_SHIFT;
4526
4527 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4528 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4529 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4530
4531
4532 val1 = (val & mask) >> shift;
4533
4534
4535 val1 &= ~(1 << bp->pf_num);
4536
4537
4538 val &= ~mask;
4539
4540
4541 val |= ((val1 << shift) & mask);
4542
4543 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4544 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4545 return val1 != 0;
4546 }
4547
4548
4549
4550
4551
4552
4553 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4554 {
4555 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4556 BNX2X_PATH0_LOAD_CNT_MASK);
4557 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4558 BNX2X_PATH0_LOAD_CNT_SHIFT);
4559 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4560
4561 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4562
4563 val = (val & mask) >> shift;
4564
4565 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4566 engine, val);
4567
4568 return val != 0;
4569 }
4570
4571 static void _print_parity(struct bnx2x *bp, u32 reg)
4572 {
4573 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4574 }
4575
4576 static void _print_next_block(int idx, const char *blk)
4577 {
4578 pr_cont("%s%s", idx ? ", " : "", blk);
4579 }
4580
4581 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4582 int *par_num, bool print)
4583 {
4584 u32 cur_bit;
4585 bool res;
4586 int i;
4587
4588 res = false;
4589
4590 for (i = 0; sig; i++) {
4591 cur_bit = (0x1UL << i);
4592 if (sig & cur_bit) {
4593 res |= true;
4594
4595 if (print) {
4596 switch (cur_bit) {
4597 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4598 _print_next_block((*par_num)++, "BRB");
4599 _print_parity(bp,
4600 BRB1_REG_BRB1_PRTY_STS);
4601 break;
4602 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4603 _print_next_block((*par_num)++,
4604 "PARSER");
4605 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4606 break;
4607 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4608 _print_next_block((*par_num)++, "TSDM");
4609 _print_parity(bp,
4610 TSDM_REG_TSDM_PRTY_STS);
4611 break;
4612 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4613 _print_next_block((*par_num)++,
4614 "SEARCHER");
4615 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4616 break;
4617 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4618 _print_next_block((*par_num)++, "TCM");
4619 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4620 break;
4621 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4622 _print_next_block((*par_num)++,
4623 "TSEMI");
4624 _print_parity(bp,
4625 TSEM_REG_TSEM_PRTY_STS_0);
4626 _print_parity(bp,
4627 TSEM_REG_TSEM_PRTY_STS_1);
4628 break;
4629 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4630 _print_next_block((*par_num)++, "XPB");
4631 _print_parity(bp, GRCBASE_XPB +
4632 PB_REG_PB_PRTY_STS);
4633 break;
4634 }
4635 }
4636
4637
4638 sig &= ~cur_bit;
4639 }
4640 }
4641
4642 return res;
4643 }
4644
4645 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4646 int *par_num, bool *global,
4647 bool print)
4648 {
4649 u32 cur_bit;
4650 bool res;
4651 int i;
4652
4653 res = false;
4654
4655 for (i = 0; sig; i++) {
4656 cur_bit = (0x1UL << i);
4657 if (sig & cur_bit) {
4658 res |= true;
4659 switch (cur_bit) {
4660 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4661 if (print) {
4662 _print_next_block((*par_num)++, "PBF");
4663 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4664 }
4665 break;
4666 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4667 if (print) {
4668 _print_next_block((*par_num)++, "QM");
4669 _print_parity(bp, QM_REG_QM_PRTY_STS);
4670 }
4671 break;
4672 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4673 if (print) {
4674 _print_next_block((*par_num)++, "TM");
4675 _print_parity(bp, TM_REG_TM_PRTY_STS);
4676 }
4677 break;
4678 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4679 if (print) {
4680 _print_next_block((*par_num)++, "XSDM");
4681 _print_parity(bp,
4682 XSDM_REG_XSDM_PRTY_STS);
4683 }
4684 break;
4685 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4686 if (print) {
4687 _print_next_block((*par_num)++, "XCM");
4688 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4689 }
4690 break;
4691 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4692 if (print) {
4693 _print_next_block((*par_num)++,
4694 "XSEMI");
4695 _print_parity(bp,
4696 XSEM_REG_XSEM_PRTY_STS_0);
4697 _print_parity(bp,
4698 XSEM_REG_XSEM_PRTY_STS_1);
4699 }
4700 break;
4701 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4702 if (print) {
4703 _print_next_block((*par_num)++,
4704 "DOORBELLQ");
4705 _print_parity(bp,
4706 DORQ_REG_DORQ_PRTY_STS);
4707 }
4708 break;
4709 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4710 if (print) {
4711 _print_next_block((*par_num)++, "NIG");
4712 if (CHIP_IS_E1x(bp)) {
4713 _print_parity(bp,
4714 NIG_REG_NIG_PRTY_STS);
4715 } else {
4716 _print_parity(bp,
4717 NIG_REG_NIG_PRTY_STS_0);
4718 _print_parity(bp,
4719 NIG_REG_NIG_PRTY_STS_1);
4720 }
4721 }
4722 break;
4723 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4724 if (print)
4725 _print_next_block((*par_num)++,
4726 "VAUX PCI CORE");
4727 *global = true;
4728 break;
4729 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4730 if (print) {
4731 _print_next_block((*par_num)++,
4732 "DEBUG");
4733 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4734 }
4735 break;
4736 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4737 if (print) {
4738 _print_next_block((*par_num)++, "USDM");
4739 _print_parity(bp,
4740 USDM_REG_USDM_PRTY_STS);
4741 }
4742 break;
4743 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4744 if (print) {
4745 _print_next_block((*par_num)++, "UCM");
4746 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4747 }
4748 break;
4749 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4750 if (print) {
4751 _print_next_block((*par_num)++,
4752 "USEMI");
4753 _print_parity(bp,
4754 USEM_REG_USEM_PRTY_STS_0);
4755 _print_parity(bp,
4756 USEM_REG_USEM_PRTY_STS_1);
4757 }
4758 break;
4759 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4760 if (print) {
4761 _print_next_block((*par_num)++, "UPB");
4762 _print_parity(bp, GRCBASE_UPB +
4763 PB_REG_PB_PRTY_STS);
4764 }
4765 break;
4766 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4767 if (print) {
4768 _print_next_block((*par_num)++, "CSDM");
4769 _print_parity(bp,
4770 CSDM_REG_CSDM_PRTY_STS);
4771 }
4772 break;
4773 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4774 if (print) {
4775 _print_next_block((*par_num)++, "CCM");
4776 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4777 }
4778 break;
4779 }
4780
4781
4782 sig &= ~cur_bit;
4783 }
4784 }
4785
4786 return res;
4787 }
4788
4789 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4790 int *par_num, bool print)
4791 {
4792 u32 cur_bit;
4793 bool res;
4794 int i;
4795
4796 res = false;
4797
4798 for (i = 0; sig; i++) {
4799 cur_bit = (0x1UL << i);
4800 if (sig & cur_bit) {
4801 res = true;
4802 if (print) {
4803 switch (cur_bit) {
4804 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4805 _print_next_block((*par_num)++,
4806 "CSEMI");
4807 _print_parity(bp,
4808 CSEM_REG_CSEM_PRTY_STS_0);
4809 _print_parity(bp,
4810 CSEM_REG_CSEM_PRTY_STS_1);
4811 break;
4812 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4813 _print_next_block((*par_num)++, "PXP");
4814 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4815 _print_parity(bp,
4816 PXP2_REG_PXP2_PRTY_STS_0);
4817 _print_parity(bp,
4818 PXP2_REG_PXP2_PRTY_STS_1);
4819 break;
4820 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4821 _print_next_block((*par_num)++,
4822 "PXPPCICLOCKCLIENT");
4823 break;
4824 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4825 _print_next_block((*par_num)++, "CFC");
4826 _print_parity(bp,
4827 CFC_REG_CFC_PRTY_STS);
4828 break;
4829 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4830 _print_next_block((*par_num)++, "CDU");
4831 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4832 break;
4833 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4834 _print_next_block((*par_num)++, "DMAE");
4835 _print_parity(bp,
4836 DMAE_REG_DMAE_PRTY_STS);
4837 break;
4838 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4839 _print_next_block((*par_num)++, "IGU");
4840 if (CHIP_IS_E1x(bp))
4841 _print_parity(bp,
4842 HC_REG_HC_PRTY_STS);
4843 else
4844 _print_parity(bp,
4845 IGU_REG_IGU_PRTY_STS);
4846 break;
4847 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4848 _print_next_block((*par_num)++, "MISC");
4849 _print_parity(bp,
4850 MISC_REG_MISC_PRTY_STS);
4851 break;
4852 }
4853 }
4854
4855
4856 sig &= ~cur_bit;
4857 }
4858 }
4859
4860 return res;
4861 }
4862
4863 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4864 int *par_num, bool *global,
4865 bool print)
4866 {
4867 bool res = false;
4868 u32 cur_bit;
4869 int i;
4870
4871 for (i = 0; sig; i++) {
4872 cur_bit = (0x1UL << i);
4873 if (sig & cur_bit) {
4874 switch (cur_bit) {
4875 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4876 if (print)
4877 _print_next_block((*par_num)++,
4878 "MCP ROM");
4879 *global = true;
4880 res = true;
4881 break;
4882 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4883 if (print)
4884 _print_next_block((*par_num)++,
4885 "MCP UMP RX");
4886 *global = true;
4887 res = true;
4888 break;
4889 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4890 if (print)
4891 _print_next_block((*par_num)++,
4892 "MCP UMP TX");
4893 *global = true;
4894 res = true;
4895 break;
4896 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4897 (*par_num)++;
4898
4899 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4900 1UL << 10);
4901 break;
4902 }
4903
4904
4905 sig &= ~cur_bit;
4906 }
4907 }
4908
4909 return res;
4910 }
4911
4912 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4913 int *par_num, bool print)
4914 {
4915 u32 cur_bit;
4916 bool res;
4917 int i;
4918
4919 res = false;
4920
4921 for (i = 0; sig; i++) {
4922 cur_bit = (0x1UL << i);
4923 if (sig & cur_bit) {
4924 res = true;
4925 if (print) {
4926 switch (cur_bit) {
4927 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4928 _print_next_block((*par_num)++,
4929 "PGLUE_B");
4930 _print_parity(bp,
4931 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4932 break;
4933 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4934 _print_next_block((*par_num)++, "ATC");
4935 _print_parity(bp,
4936 ATC_REG_ATC_PRTY_STS);
4937 break;
4938 }
4939 }
4940
4941 sig &= ~cur_bit;
4942 }
4943 }
4944
4945 return res;
4946 }
4947
4948 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4949 u32 *sig)
4950 {
4951 bool res = false;
4952
4953 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4954 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4955 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4956 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4957 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4958 int par_num = 0;
4959
4960 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4961 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4962 sig[0] & HW_PRTY_ASSERT_SET_0,
4963 sig[1] & HW_PRTY_ASSERT_SET_1,
4964 sig[2] & HW_PRTY_ASSERT_SET_2,
4965 sig[3] & HW_PRTY_ASSERT_SET_3,
4966 sig[4] & HW_PRTY_ASSERT_SET_4);
4967 if (print) {
4968 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4969 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4970 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4971 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4972 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4973 netdev_err(bp->dev,
4974 "Parity errors detected in blocks: ");
4975 } else {
4976 print = false;
4977 }
4978 }
4979 res |= bnx2x_check_blocks_with_parity0(bp,
4980 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4981 res |= bnx2x_check_blocks_with_parity1(bp,
4982 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4983 res |= bnx2x_check_blocks_with_parity2(bp,
4984 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4985 res |= bnx2x_check_blocks_with_parity3(bp,
4986 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4987 res |= bnx2x_check_blocks_with_parity4(bp,
4988 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4989
4990 if (print)
4991 pr_cont("\n");
4992 }
4993
4994 return res;
4995 }
4996
4997
4998
4999
5000
5001
5002
5003
5004 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5005 {
5006 struct attn_route attn = { {0} };
5007 int port = BP_PORT(bp);
5008
5009 attn.sig[0] = REG_RD(bp,
5010 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5011 port*4);
5012 attn.sig[1] = REG_RD(bp,
5013 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5014 port*4);
5015 attn.sig[2] = REG_RD(bp,
5016 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5017 port*4);
5018 attn.sig[3] = REG_RD(bp,
5019 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5020 port*4);
5021
5022
5023
5024 attn.sig[3] &= ((REG_RD(bp,
5025 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5026 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5027 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5028 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5029
5030 if (!CHIP_IS_E1x(bp))
5031 attn.sig[4] = REG_RD(bp,
5032 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5033 port*4);
5034
5035 return bnx2x_parity_attn(bp, global, print, attn.sig);
5036 }
5037
5038 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5039 {
5040 u32 val;
5041 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5042
5043 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5044 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5045 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5046 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5047 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5048 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5049 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5050 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5051 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5052 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5053 if (val &
5054 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5055 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5056 if (val &
5057 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5058 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5059 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5060 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5061 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5062 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5063 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5064 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5065 }
5066 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5067 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5068 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5069 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5070 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5071 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5072 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5073 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5074 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5075 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5076 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5077 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5078 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5079 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5080 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5081 }
5082
5083 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5084 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5085 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5086 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5087 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5088 }
5089 }
5090
5091 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5092 {
5093 struct attn_route attn, *group_mask;
5094 int port = BP_PORT(bp);
5095 int index;
5096 u32 reg_addr;
5097 u32 val;
5098 u32 aeu_mask;
5099 bool global = false;
5100
5101
5102
5103 bnx2x_acquire_alr(bp);
5104
5105 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5106 #ifndef BNX2X_STOP_ON_ERROR
5107 bp->recovery_state = BNX2X_RECOVERY_INIT;
5108 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5109
5110 bnx2x_int_disable(bp);
5111
5112
5113
5114 #else
5115 bnx2x_panic();
5116 #endif
5117 bnx2x_release_alr(bp);
5118 return;
5119 }
5120
5121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5124 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5125 if (!CHIP_IS_E1x(bp))
5126 attn.sig[4] =
5127 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5128 else
5129 attn.sig[4] = 0;
5130
5131 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5132 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5133
5134 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5135 if (deasserted & (1 << index)) {
5136 group_mask = &bp->attn_group[index];
5137
5138 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5139 index,
5140 group_mask->sig[0], group_mask->sig[1],
5141 group_mask->sig[2], group_mask->sig[3],
5142 group_mask->sig[4]);
5143
5144 bnx2x_attn_int_deasserted4(bp,
5145 attn.sig[4] & group_mask->sig[4]);
5146 bnx2x_attn_int_deasserted3(bp,
5147 attn.sig[3] & group_mask->sig[3]);
5148 bnx2x_attn_int_deasserted1(bp,
5149 attn.sig[1] & group_mask->sig[1]);
5150 bnx2x_attn_int_deasserted2(bp,
5151 attn.sig[2] & group_mask->sig[2]);
5152 bnx2x_attn_int_deasserted0(bp,
5153 attn.sig[0] & group_mask->sig[0]);
5154 }
5155 }
5156
5157 bnx2x_release_alr(bp);
5158
5159 if (bp->common.int_block == INT_BLOCK_HC)
5160 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5161 COMMAND_REG_ATTN_BITS_CLR);
5162 else
5163 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5164
5165 val = ~deasserted;
5166 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5167 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5168 REG_WR(bp, reg_addr, val);
5169
5170 if (~bp->attn_state & deasserted)
5171 BNX2X_ERR("IGU ERROR\n");
5172
5173 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5174 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5175
5176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5177 aeu_mask = REG_RD(bp, reg_addr);
5178
5179 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5180 aeu_mask, deasserted);
5181 aeu_mask |= (deasserted & 0x3ff);
5182 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5183
5184 REG_WR(bp, reg_addr, aeu_mask);
5185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5186
5187 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5188 bp->attn_state &= ~deasserted;
5189 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5190 }
5191
5192 static void bnx2x_attn_int(struct bnx2x *bp)
5193 {
5194
5195 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5196 attn_bits);
5197 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5198 attn_bits_ack);
5199 u32 attn_state = bp->attn_state;
5200
5201
5202 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5203 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5204
5205 DP(NETIF_MSG_HW,
5206 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5207 attn_bits, attn_ack, asserted, deasserted);
5208
5209 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5210 BNX2X_ERR("BAD attention state\n");
5211
5212
5213 if (asserted)
5214 bnx2x_attn_int_asserted(bp, asserted);
5215
5216 if (deasserted)
5217 bnx2x_attn_int_deasserted(bp, deasserted);
5218 }
5219
5220 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5221 u16 index, u8 op, u8 update)
5222 {
5223 u32 igu_addr = bp->igu_base_addr;
5224 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5225 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5226 igu_addr);
5227 }
5228
5229 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5230 {
5231
5232 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5233 }
5234
5235 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5236 union event_ring_elem *elem)
5237 {
5238 u8 err = elem->message.error;
5239
5240 if (!bp->cnic_eth_dev.starting_cid ||
5241 (cid < bp->cnic_eth_dev.starting_cid &&
5242 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5243 return 1;
5244
5245 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5246
5247 if (unlikely(err)) {
5248
5249 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5250 cid);
5251 bnx2x_panic_dump(bp, false);
5252 }
5253 bnx2x_cnic_cfc_comp(bp, cid, err);
5254 return 0;
5255 }
5256
5257 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5258 {
5259 struct bnx2x_mcast_ramrod_params rparam;
5260 int rc;
5261
5262 memset(&rparam, 0, sizeof(rparam));
5263
5264 rparam.mcast_obj = &bp->mcast_obj;
5265
5266 netif_addr_lock_bh(bp->dev);
5267
5268
5269 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5270
5271
5272 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5273 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5274 if (rc < 0)
5275 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5276 rc);
5277 }
5278
5279 netif_addr_unlock_bh(bp->dev);
5280 }
5281
5282 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5283 union event_ring_elem *elem)
5284 {
5285 unsigned long ramrod_flags = 0;
5286 int rc = 0;
5287 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5288 u32 cid = echo & BNX2X_SWCID_MASK;
5289 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5290
5291
5292 __set_bit(RAMROD_CONT, &ramrod_flags);
5293
5294 switch (echo >> BNX2X_SWCID_SHIFT) {
5295 case BNX2X_FILTER_MAC_PENDING:
5296 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5297 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5298 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5299 else
5300 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5301
5302 break;
5303 case BNX2X_FILTER_VLAN_PENDING:
5304 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5305 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5306 break;
5307 case BNX2X_FILTER_MCAST_PENDING:
5308 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5309
5310
5311
5312 bnx2x_handle_mcast_eqe(bp);
5313 return;
5314 default:
5315 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5316 return;
5317 }
5318
5319 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5320
5321 if (rc < 0)
5322 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5323 else if (rc > 0)
5324 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5325 }
5326
5327 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5328
5329 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5330 {
5331 netif_addr_lock_bh(bp->dev);
5332
5333 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5334
5335
5336 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5337 bnx2x_set_storm_rx_mode(bp);
5338 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5339 &bp->sp_state))
5340 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5341 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5342 &bp->sp_state))
5343 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5344
5345 netif_addr_unlock_bh(bp->dev);
5346 }
5347
5348 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5349 union event_ring_elem *elem)
5350 {
5351 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5352 DP(BNX2X_MSG_SP,
5353 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5354 elem->message.data.vif_list_event.func_bit_map);
5355 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5356 elem->message.data.vif_list_event.func_bit_map);
5357 } else if (elem->message.data.vif_list_event.echo ==
5358 VIF_LIST_RULE_SET) {
5359 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5360 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5361 }
5362 }
5363
5364
5365 static void bnx2x_after_function_update(struct bnx2x *bp)
5366 {
5367 int q, rc;
5368 struct bnx2x_fastpath *fp;
5369 struct bnx2x_queue_state_params queue_params = {NULL};
5370 struct bnx2x_queue_update_params *q_update_params =
5371 &queue_params.params.update;
5372
5373
5374 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5375
5376
5377 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5378 &q_update_params->update_flags);
5379 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5380 &q_update_params->update_flags);
5381 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5382
5383
5384 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5385 q_update_params->silent_removal_value = 0;
5386 q_update_params->silent_removal_mask = 0;
5387 } else {
5388 q_update_params->silent_removal_value =
5389 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5390 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5391 }
5392
5393 for_each_eth_queue(bp, q) {
5394
5395 fp = &bp->fp[q];
5396 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5397
5398
5399 rc = bnx2x_queue_state_change(bp, &queue_params);
5400 if (rc < 0)
5401 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5402 q);
5403 }
5404
5405 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5406 fp = &bp->fp[FCOE_IDX(bp)];
5407 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5408
5409
5410 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5411
5412
5413 smp_mb__before_atomic();
5414 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5415 smp_mb__after_atomic();
5416
5417
5418 rc = bnx2x_queue_state_change(bp, &queue_params);
5419 if (rc < 0)
5420 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5421 q);
5422 } else {
5423
5424 bnx2x_link_report(bp);
5425 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5426 }
5427 }
5428
5429 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5430 struct bnx2x *bp, u32 cid)
5431 {
5432 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5433
5434 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5435 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5436 else
5437 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5438 }
5439
5440 static void bnx2x_eq_int(struct bnx2x *bp)
5441 {
5442 u16 hw_cons, sw_cons, sw_prod;
5443 union event_ring_elem *elem;
5444 u8 echo;
5445 u32 cid;
5446 u8 opcode;
5447 int rc, spqe_cnt = 0;
5448 struct bnx2x_queue_sp_obj *q_obj;
5449 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5450 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5451
5452 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5453
5454
5455
5456
5457
5458
5459 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5460 hw_cons++;
5461
5462
5463
5464
5465
5466 sw_cons = bp->eq_cons;
5467 sw_prod = bp->eq_prod;
5468
5469 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5470 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5471
5472 for (; sw_cons != hw_cons;
5473 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5474
5475 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5476
5477 rc = bnx2x_iov_eq_sp_event(bp, elem);
5478 if (!rc) {
5479 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5480 rc);
5481 goto next_spqe;
5482 }
5483
5484 opcode = elem->message.opcode;
5485
5486
5487 switch (opcode) {
5488 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5489 bnx2x_vf_mbx_schedule(bp,
5490 &elem->message.data.vf_pf_event);
5491 continue;
5492
5493 case EVENT_RING_OPCODE_STAT_QUERY:
5494 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5495 "got statistics comp event %d\n",
5496 bp->stats_comp++);
5497
5498 goto next_spqe;
5499
5500 case EVENT_RING_OPCODE_CFC_DEL:
5501
5502
5503
5504
5505
5506
5507
5508 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5509
5510 DP(BNX2X_MSG_SP,
5511 "got delete ramrod for MULTI[%d]\n", cid);
5512
5513 if (CNIC_LOADED(bp) &&
5514 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5515 goto next_spqe;
5516
5517 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5518
5519 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5520 break;
5521
5522 goto next_spqe;
5523
5524 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5525 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5526 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5527 if (f_obj->complete_cmd(bp, f_obj,
5528 BNX2X_F_CMD_TX_STOP))
5529 break;
5530 goto next_spqe;
5531
5532 case EVENT_RING_OPCODE_START_TRAFFIC:
5533 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5534 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5535 if (f_obj->complete_cmd(bp, f_obj,
5536 BNX2X_F_CMD_TX_START))
5537 break;
5538 goto next_spqe;
5539
5540 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5541 echo = elem->message.data.function_update_event.echo;
5542 if (echo == SWITCH_UPDATE) {
5543 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5544 "got FUNC_SWITCH_UPDATE ramrod\n");
5545 if (f_obj->complete_cmd(
5546 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5547 break;
5548
5549 } else {
5550 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5551
5552 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5553 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5554 f_obj->complete_cmd(bp, f_obj,
5555 BNX2X_F_CMD_AFEX_UPDATE);
5556
5557
5558
5559
5560
5561 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5562 }
5563
5564 goto next_spqe;
5565
5566 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5567 f_obj->complete_cmd(bp, f_obj,
5568 BNX2X_F_CMD_AFEX_VIFLISTS);
5569 bnx2x_after_afex_vif_lists(bp, elem);
5570 goto next_spqe;
5571 case EVENT_RING_OPCODE_FUNCTION_START:
5572 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5573 "got FUNC_START ramrod\n");
5574 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5575 break;
5576
5577 goto next_spqe;
5578
5579 case EVENT_RING_OPCODE_FUNCTION_STOP:
5580 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5581 "got FUNC_STOP ramrod\n");
5582 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5583 break;
5584
5585 goto next_spqe;
5586
5587 case EVENT_RING_OPCODE_SET_TIMESYNC:
5588 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5589 "got set_timesync ramrod completion\n");
5590 if (f_obj->complete_cmd(bp, f_obj,
5591 BNX2X_F_CMD_SET_TIMESYNC))
5592 break;
5593 goto next_spqe;
5594 }
5595
5596 switch (opcode | bp->state) {
5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598 BNX2X_STATE_OPEN):
5599 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5600 BNX2X_STATE_OPENING_WAIT4_PORT):
5601 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5602 BNX2X_STATE_CLOSING_WAIT4_HALT):
5603 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5604 SW_CID(elem->message.data.eth_event.echo));
5605 rss_raw->clear_pending(rss_raw);
5606 break;
5607
5608 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5609 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5610 case (EVENT_RING_OPCODE_SET_MAC |
5611 BNX2X_STATE_CLOSING_WAIT4_HALT):
5612 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5613 BNX2X_STATE_OPEN):
5614 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5615 BNX2X_STATE_DIAG):
5616 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5617 BNX2X_STATE_CLOSING_WAIT4_HALT):
5618 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5619 bnx2x_handle_classification_eqe(bp, elem);
5620 break;
5621
5622 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5623 BNX2X_STATE_OPEN):
5624 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5625 BNX2X_STATE_DIAG):
5626 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5627 BNX2X_STATE_CLOSING_WAIT4_HALT):
5628 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5629 bnx2x_handle_mcast_eqe(bp);
5630 break;
5631
5632 case (EVENT_RING_OPCODE_FILTERS_RULES |
5633 BNX2X_STATE_OPEN):
5634 case (EVENT_RING_OPCODE_FILTERS_RULES |
5635 BNX2X_STATE_DIAG):
5636 case (EVENT_RING_OPCODE_FILTERS_RULES |
5637 BNX2X_STATE_CLOSING_WAIT4_HALT):
5638 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5639 bnx2x_handle_rx_mode_eqe(bp);
5640 break;
5641 default:
5642
5643 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5644 elem->message.opcode, bp->state);
5645 }
5646 next_spqe:
5647 spqe_cnt++;
5648 }
5649
5650 smp_mb__before_atomic();
5651 atomic_add(spqe_cnt, &bp->eq_spq_left);
5652
5653 bp->eq_cons = sw_cons;
5654 bp->eq_prod = sw_prod;
5655
5656 smp_wmb();
5657
5658
5659 bnx2x_update_eq_prod(bp, bp->eq_prod);
5660 }
5661
5662 static void bnx2x_sp_task(struct work_struct *work)
5663 {
5664 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5665
5666 DP(BNX2X_MSG_SP, "sp task invoked\n");
5667
5668
5669 smp_rmb();
5670 if (atomic_read(&bp->interrupt_occurred)) {
5671
5672
5673 u16 status = bnx2x_update_dsb_idx(bp);
5674
5675 DP(BNX2X_MSG_SP, "status %x\n", status);
5676 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5677 atomic_set(&bp->interrupt_occurred, 0);
5678
5679
5680 if (status & BNX2X_DEF_SB_ATT_IDX) {
5681 bnx2x_attn_int(bp);
5682 status &= ~BNX2X_DEF_SB_ATT_IDX;
5683 }
5684
5685
5686 if (status & BNX2X_DEF_SB_IDX) {
5687 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5688
5689 if (FCOE_INIT(bp) &&
5690 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5691
5692
5693
5694 local_bh_disable();
5695 napi_schedule(&bnx2x_fcoe(bp, napi));
5696 local_bh_enable();
5697 }
5698
5699
5700 bnx2x_eq_int(bp);
5701 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5702 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5703
5704 status &= ~BNX2X_DEF_SB_IDX;
5705 }
5706
5707
5708 if (unlikely(status))
5709 DP(BNX2X_MSG_SP,
5710 "got an unknown interrupt! (status 0x%x)\n", status);
5711
5712
5713 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5714 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5715 }
5716
5717
5718 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5719 &bp->sp_state)) {
5720 bnx2x_link_report(bp);
5721 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5722 }
5723 }
5724
5725 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5726 {
5727 struct net_device *dev = dev_instance;
5728 struct bnx2x *bp = netdev_priv(dev);
5729
5730 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5731 IGU_INT_DISABLE, 0);
5732
5733 #ifdef BNX2X_STOP_ON_ERROR
5734 if (unlikely(bp->panic))
5735 return IRQ_HANDLED;
5736 #endif
5737
5738 if (CNIC_LOADED(bp)) {
5739 struct cnic_ops *c_ops;
5740
5741 rcu_read_lock();
5742 c_ops = rcu_dereference(bp->cnic_ops);
5743 if (c_ops)
5744 c_ops->cnic_handler(bp->cnic_data, NULL);
5745 rcu_read_unlock();
5746 }
5747
5748
5749
5750
5751 bnx2x_schedule_sp_task(bp);
5752
5753 return IRQ_HANDLED;
5754 }
5755
5756
5757
5758 void bnx2x_drv_pulse(struct bnx2x *bp)
5759 {
5760 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5761 bp->fw_drv_pulse_wr_seq);
5762 }
5763
5764 static void bnx2x_timer(struct timer_list *t)
5765 {
5766 struct bnx2x *bp = from_timer(bp, t, timer);
5767
5768 if (!netif_running(bp->dev))
5769 return;
5770
5771 if (IS_PF(bp) &&
5772 !BP_NOMCP(bp)) {
5773 int mb_idx = BP_FW_MB_IDX(bp);
5774 u16 drv_pulse;
5775 u16 mcp_pulse;
5776
5777 ++bp->fw_drv_pulse_wr_seq;
5778 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5779 drv_pulse = bp->fw_drv_pulse_wr_seq;
5780 bnx2x_drv_pulse(bp);
5781
5782 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5783 MCP_PULSE_SEQ_MASK);
5784
5785
5786
5787
5788
5789 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5790 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5791 drv_pulse, mcp_pulse);
5792 }
5793
5794 if (bp->state == BNX2X_STATE_OPEN)
5795 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5796
5797
5798 if (IS_VF(bp))
5799 bnx2x_timer_sriov(bp);
5800
5801 mod_timer(&bp->timer, jiffies + bp->current_interval);
5802 }
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5813 {
5814 u32 i;
5815 if (!(len%4) && !(addr%4))
5816 for (i = 0; i < len; i += 4)
5817 REG_WR(bp, addr + i, fill);
5818 else
5819 for (i = 0; i < len; i++)
5820 REG_WR8(bp, addr + i, fill);
5821 }
5822
5823
5824 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5825 int fw_sb_id,
5826 u32 *sb_data_p,
5827 u32 data_size)
5828 {
5829 int index;
5830 for (index = 0; index < data_size; index++)
5831 REG_WR(bp, BAR_CSTRORM_INTMEM +
5832 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5833 sizeof(u32)*index,
5834 *(sb_data_p + index));
5835 }
5836
5837 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5838 {
5839 u32 *sb_data_p;
5840 u32 data_size = 0;
5841 struct hc_status_block_data_e2 sb_data_e2;
5842 struct hc_status_block_data_e1x sb_data_e1x;
5843
5844
5845 if (!CHIP_IS_E1x(bp)) {
5846 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5847 sb_data_e2.common.state = SB_DISABLED;
5848 sb_data_e2.common.p_func.vf_valid = false;
5849 sb_data_p = (u32 *)&sb_data_e2;
5850 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5851 } else {
5852 memset(&sb_data_e1x, 0,
5853 sizeof(struct hc_status_block_data_e1x));
5854 sb_data_e1x.common.state = SB_DISABLED;
5855 sb_data_e1x.common.p_func.vf_valid = false;
5856 sb_data_p = (u32 *)&sb_data_e1x;
5857 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5858 }
5859 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5860
5861 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5862 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5863 CSTORM_STATUS_BLOCK_SIZE);
5864 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5865 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5866 CSTORM_SYNC_BLOCK_SIZE);
5867 }
5868
5869
5870 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5871 struct hc_sp_status_block_data *sp_sb_data)
5872 {
5873 int func = BP_FUNC(bp);
5874 int i;
5875 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5876 REG_WR(bp, BAR_CSTRORM_INTMEM +
5877 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5878 i*sizeof(u32),
5879 *((u32 *)sp_sb_data + i));
5880 }
5881
5882 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5883 {
5884 int func = BP_FUNC(bp);
5885 struct hc_sp_status_block_data sp_sb_data;
5886 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5887
5888 sp_sb_data.state = SB_DISABLED;
5889 sp_sb_data.p_func.vf_valid = false;
5890
5891 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5892
5893 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5894 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5895 CSTORM_SP_STATUS_BLOCK_SIZE);
5896 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5897 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5898 CSTORM_SP_SYNC_BLOCK_SIZE);
5899 }
5900
5901 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5902 int igu_sb_id, int igu_seg_id)
5903 {
5904 hc_sm->igu_sb_id = igu_sb_id;
5905 hc_sm->igu_seg_id = igu_seg_id;
5906 hc_sm->timer_value = 0xFF;
5907 hc_sm->time_to_expire = 0xFFFFFFFF;
5908 }
5909
5910
5911 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5912 {
5913
5914
5915 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5916
5917
5918 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5919 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5920 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5921 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5922
5923
5924
5925 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5926 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5927
5928
5929 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5930 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5931 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5932 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5933 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5934 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5935 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5936 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5937 }
5938
5939 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5940 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5941 {
5942 int igu_seg_id;
5943
5944 struct hc_status_block_data_e2 sb_data_e2;
5945 struct hc_status_block_data_e1x sb_data_e1x;
5946 struct hc_status_block_sm *hc_sm_p;
5947 int data_size;
5948 u32 *sb_data_p;
5949
5950 if (CHIP_INT_MODE_IS_BC(bp))
5951 igu_seg_id = HC_SEG_ACCESS_NORM;
5952 else
5953 igu_seg_id = IGU_SEG_ACCESS_NORM;
5954
5955 bnx2x_zero_fp_sb(bp, fw_sb_id);
5956
5957 if (!CHIP_IS_E1x(bp)) {
5958 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5959 sb_data_e2.common.state = SB_ENABLED;
5960 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5961 sb_data_e2.common.p_func.vf_id = vfid;
5962 sb_data_e2.common.p_func.vf_valid = vf_valid;
5963 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5964 sb_data_e2.common.same_igu_sb_1b = true;
5965 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5966 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5967 hc_sm_p = sb_data_e2.common.state_machine;
5968 sb_data_p = (u32 *)&sb_data_e2;
5969 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5970 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5971 } else {
5972 memset(&sb_data_e1x, 0,
5973 sizeof(struct hc_status_block_data_e1x));
5974 sb_data_e1x.common.state = SB_ENABLED;
5975 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5976 sb_data_e1x.common.p_func.vf_id = 0xff;
5977 sb_data_e1x.common.p_func.vf_valid = false;
5978 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5979 sb_data_e1x.common.same_igu_sb_1b = true;
5980 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5981 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5982 hc_sm_p = sb_data_e1x.common.state_machine;
5983 sb_data_p = (u32 *)&sb_data_e1x;
5984 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5985 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5986 }
5987
5988 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5989 igu_sb_id, igu_seg_id);
5990 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5991 igu_sb_id, igu_seg_id);
5992
5993 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5994
5995
5996 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5997 }
5998
5999 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6000 u16 tx_usec, u16 rx_usec)
6001 {
6002 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6003 false, rx_usec);
6004 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6005 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6006 tx_usec);
6007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6008 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6009 tx_usec);
6010 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6011 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6012 tx_usec);
6013 }
6014
6015 static void bnx2x_init_def_sb(struct bnx2x *bp)
6016 {
6017 struct host_sp_status_block *def_sb = bp->def_status_blk;
6018 dma_addr_t mapping = bp->def_status_blk_mapping;
6019 int igu_sp_sb_index;
6020 int igu_seg_id;
6021 int port = BP_PORT(bp);
6022 int func = BP_FUNC(bp);
6023 int reg_offset, reg_offset_en5;
6024 u64 section;
6025 int index;
6026 struct hc_sp_status_block_data sp_sb_data;
6027 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6028
6029 if (CHIP_INT_MODE_IS_BC(bp)) {
6030 igu_sp_sb_index = DEF_SB_IGU_ID;
6031 igu_seg_id = HC_SEG_ACCESS_DEF;
6032 } else {
6033 igu_sp_sb_index = bp->igu_dsb_id;
6034 igu_seg_id = IGU_SEG_ACCESS_DEF;
6035 }
6036
6037
6038 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6039 atten_status_block);
6040 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6041
6042 bp->attn_state = 0;
6043
6044 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6045 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6046 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6047 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6048 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6049 int sindex;
6050
6051 for (sindex = 0; sindex < 4; sindex++)
6052 bp->attn_group[index].sig[sindex] =
6053 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6054
6055 if (!CHIP_IS_E1x(bp))
6056
6057
6058
6059
6060
6061 bp->attn_group[index].sig[4] = REG_RD(bp,
6062 reg_offset_en5 + 0x4*index);
6063 else
6064 bp->attn_group[index].sig[4] = 0;
6065 }
6066
6067 if (bp->common.int_block == INT_BLOCK_HC) {
6068 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6069 HC_REG_ATTN_MSG0_ADDR_L);
6070
6071 REG_WR(bp, reg_offset, U64_LO(section));
6072 REG_WR(bp, reg_offset + 4, U64_HI(section));
6073 } else if (!CHIP_IS_E1x(bp)) {
6074 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6075 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6076 }
6077
6078 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6079 sp_sb);
6080
6081 bnx2x_zero_sp_sb(bp);
6082
6083
6084 sp_sb_data.state = SB_ENABLED;
6085 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6086 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6087 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6088 sp_sb_data.igu_seg_id = igu_seg_id;
6089 sp_sb_data.p_func.pf_id = func;
6090 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6091 sp_sb_data.p_func.vf_id = 0xff;
6092
6093 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6094
6095 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6096 }
6097
6098 void bnx2x_update_coalesce(struct bnx2x *bp)
6099 {
6100 int i;
6101
6102 for_each_eth_queue(bp, i)
6103 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6104 bp->tx_ticks, bp->rx_ticks);
6105 }
6106
6107 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6108 {
6109 spin_lock_init(&bp->spq_lock);
6110 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6111
6112 bp->spq_prod_idx = 0;
6113 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6114 bp->spq_prod_bd = bp->spq;
6115 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6116 }
6117
6118 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6119 {
6120 int i;
6121 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6122 union event_ring_elem *elem =
6123 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6124
6125 elem->next_page.addr.hi =
6126 cpu_to_le32(U64_HI(bp->eq_mapping +
6127 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6128 elem->next_page.addr.lo =
6129 cpu_to_le32(U64_LO(bp->eq_mapping +
6130 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6131 }
6132 bp->eq_cons = 0;
6133 bp->eq_prod = NUM_EQ_DESC;
6134 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6135
6136 atomic_set(&bp->eq_spq_left,
6137 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6138 }
6139
6140
6141 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6142 unsigned long rx_mode_flags,
6143 unsigned long rx_accept_flags,
6144 unsigned long tx_accept_flags,
6145 unsigned long ramrod_flags)
6146 {
6147 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6148 int rc;
6149
6150 memset(&ramrod_param, 0, sizeof(ramrod_param));
6151
6152
6153 ramrod_param.cid = 0;
6154 ramrod_param.cl_id = cl_id;
6155 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6156 ramrod_param.func_id = BP_FUNC(bp);
6157
6158 ramrod_param.pstate = &bp->sp_state;
6159 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6160
6161 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6162 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6163
6164 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6165
6166 ramrod_param.ramrod_flags = ramrod_flags;
6167 ramrod_param.rx_mode_flags = rx_mode_flags;
6168
6169 ramrod_param.rx_accept_flags = rx_accept_flags;
6170 ramrod_param.tx_accept_flags = tx_accept_flags;
6171
6172 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6173 if (rc < 0) {
6174 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6175 return rc;
6176 }
6177
6178 return 0;
6179 }
6180
6181 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6182 unsigned long *rx_accept_flags,
6183 unsigned long *tx_accept_flags)
6184 {
6185
6186 *rx_accept_flags = 0;
6187 *tx_accept_flags = 0;
6188
6189 switch (rx_mode) {
6190 case BNX2X_RX_MODE_NONE:
6191
6192
6193
6194
6195 break;
6196 case BNX2X_RX_MODE_NORMAL:
6197 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6198 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6199 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6200
6201
6202 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6203 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6204 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6205
6206 if (bp->accept_any_vlan) {
6207 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6208 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6209 }
6210
6211 break;
6212 case BNX2X_RX_MODE_ALLMULTI:
6213 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6214 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6215 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6216
6217
6218 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6220 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6221
6222 if (bp->accept_any_vlan) {
6223 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6225 }
6226
6227 break;
6228 case BNX2X_RX_MODE_PROMISC:
6229
6230
6231
6232
6233 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6234 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6235 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6236 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6237
6238
6239 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6240 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6241
6242 if (IS_MF_SI(bp))
6243 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6244 else
6245 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6246
6247 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6248 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6249
6250 break;
6251 default:
6252 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6253 return -EINVAL;
6254 }
6255
6256 return 0;
6257 }
6258
6259
6260 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6261 {
6262 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6263 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6264 int rc;
6265
6266 if (!NO_FCOE(bp))
6267
6268 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6269
6270 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6271 &tx_accept_flags);
6272 if (rc)
6273 return rc;
6274
6275 __set_bit(RAMROD_RX, &ramrod_flags);
6276 __set_bit(RAMROD_TX, &ramrod_flags);
6277
6278 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6279 rx_accept_flags, tx_accept_flags,
6280 ramrod_flags);
6281 }
6282
6283 static void bnx2x_init_internal_common(struct bnx2x *bp)
6284 {
6285 int i;
6286
6287
6288
6289 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6290 REG_WR(bp, BAR_USTRORM_INTMEM +
6291 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6292 if (!CHIP_IS_E1x(bp)) {
6293 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6294 CHIP_INT_MODE_IS_BC(bp) ?
6295 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6296 }
6297 }
6298
6299 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6300 {
6301 switch (load_code) {
6302 case FW_MSG_CODE_DRV_LOAD_COMMON:
6303 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6304 bnx2x_init_internal_common(bp);
6305
6306
6307 case FW_MSG_CODE_DRV_LOAD_PORT:
6308
6309
6310
6311 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6312
6313
6314 break;
6315
6316 default:
6317 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6318 break;
6319 }
6320 }
6321
6322 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6323 {
6324 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6325 }
6326
6327 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6328 {
6329 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6330 }
6331
6332 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6333 {
6334 if (CHIP_IS_E1x(fp->bp))
6335 return BP_L_ID(fp->bp) + fp->index;
6336 else
6337 return bnx2x_fp_igu_sb_id(fp);
6338 }
6339
6340 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6341 {
6342 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6343 u8 cos;
6344 unsigned long q_type = 0;
6345 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6346 fp->rx_queue = fp_idx;
6347 fp->cid = fp_idx;
6348 fp->cl_id = bnx2x_fp_cl_id(fp);
6349 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6350 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6351
6352 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6353
6354
6355 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6356
6357
6358 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6359
6360
6361 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6362 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6363
6364 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6365
6366
6367 for_each_cos_in_tx_queue(fp, cos) {
6368 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6369 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6370 FP_COS_TO_TXQ(fp, cos, bp),
6371 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6372 cids[cos] = fp->txdata_ptr[cos]->cid;
6373 }
6374
6375
6376 if (IS_VF(bp))
6377 return;
6378
6379 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6380 fp->fw_sb_id, fp->igu_sb_id);
6381 bnx2x_update_fpsb_idx(fp);
6382 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6383 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6384 bnx2x_sp_mapping(bp, q_rdata), q_type);
6385
6386
6387
6388
6389 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6390
6391 DP(NETIF_MSG_IFUP,
6392 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6393 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6394 fp->igu_sb_id);
6395 }
6396
6397 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6398 {
6399 int i;
6400
6401 for (i = 1; i <= NUM_TX_RINGS; i++) {
6402 struct eth_tx_next_bd *tx_next_bd =
6403 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6404
6405 tx_next_bd->addr_hi =
6406 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6407 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6408 tx_next_bd->addr_lo =
6409 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6410 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6411 }
6412
6413 *txdata->tx_cons_sb = cpu_to_le16(0);
6414
6415 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6416 txdata->tx_db.data.zero_fill1 = 0;
6417 txdata->tx_db.data.prod = 0;
6418
6419 txdata->tx_pkt_prod = 0;
6420 txdata->tx_pkt_cons = 0;
6421 txdata->tx_bd_prod = 0;
6422 txdata->tx_bd_cons = 0;
6423 txdata->tx_pkt = 0;
6424 }
6425
6426 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6427 {
6428 int i;
6429
6430 for_each_tx_queue_cnic(bp, i)
6431 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6432 }
6433
6434 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6435 {
6436 int i;
6437 u8 cos;
6438
6439 for_each_eth_queue(bp, i)
6440 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6441 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6442 }
6443
6444 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6445 {
6446 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6447 unsigned long q_type = 0;
6448
6449 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6450 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6451 BNX2X_FCOE_ETH_CL_ID_IDX);
6452 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6453 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6454 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6455 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6456 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6457 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6458 fp);
6459
6460 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6461
6462
6463 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6464
6465 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6466 bnx2x_rx_ustorm_prods_offset(fp);
6467
6468
6469 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6470 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6471
6472
6473 BUG_ON(fp->max_cos != 1);
6474
6475 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6476 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6477 bnx2x_sp_mapping(bp, q_rdata), q_type);
6478
6479 DP(NETIF_MSG_IFUP,
6480 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6481 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6482 fp->igu_sb_id);
6483 }
6484
6485 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6486 {
6487 if (!NO_FCOE(bp))
6488 bnx2x_init_fcoe_fp(bp);
6489
6490 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6491 BNX2X_VF_ID_INVALID, false,
6492 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6493
6494
6495 rmb();
6496 bnx2x_init_rx_rings_cnic(bp);
6497 bnx2x_init_tx_rings_cnic(bp);
6498
6499
6500 mb();
6501 }
6502
6503 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6504 {
6505 int i;
6506
6507
6508 for_each_eth_queue(bp, i)
6509 bnx2x_init_eth_fp(bp, i);
6510
6511
6512 rmb();
6513 bnx2x_init_rx_rings(bp);
6514 bnx2x_init_tx_rings(bp);
6515
6516 if (IS_PF(bp)) {
6517
6518 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6519 bp->common.shmem_base,
6520 bp->common.shmem2_base, BP_PORT(bp));
6521
6522
6523 bnx2x_init_def_sb(bp);
6524 bnx2x_update_dsb_idx(bp);
6525 bnx2x_init_sp_ring(bp);
6526 } else {
6527 bnx2x_memset_stats(bp);
6528 }
6529 }
6530
6531 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6532 {
6533 bnx2x_init_eq_ring(bp);
6534 bnx2x_init_internal(bp, load_code);
6535 bnx2x_pf_init(bp);
6536 bnx2x_stats_init(bp);
6537
6538
6539 mb();
6540
6541 bnx2x_int_enable(bp);
6542
6543
6544 bnx2x_attn_int_deasserted0(bp,
6545 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6546 AEU_INPUTS_ATTN_BITS_SPIO5);
6547 }
6548
6549
6550 static int bnx2x_gunzip_init(struct bnx2x *bp)
6551 {
6552 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6553 &bp->gunzip_mapping, GFP_KERNEL);
6554 if (bp->gunzip_buf == NULL)
6555 goto gunzip_nomem1;
6556
6557 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6558 if (bp->strm == NULL)
6559 goto gunzip_nomem2;
6560
6561 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6562 if (bp->strm->workspace == NULL)
6563 goto gunzip_nomem3;
6564
6565 return 0;
6566
6567 gunzip_nomem3:
6568 kfree(bp->strm);
6569 bp->strm = NULL;
6570
6571 gunzip_nomem2:
6572 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6573 bp->gunzip_mapping);
6574 bp->gunzip_buf = NULL;
6575
6576 gunzip_nomem1:
6577 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6578 return -ENOMEM;
6579 }
6580
6581 static void bnx2x_gunzip_end(struct bnx2x *bp)
6582 {
6583 if (bp->strm) {
6584 vfree(bp->strm->workspace);
6585 kfree(bp->strm);
6586 bp->strm = NULL;
6587 }
6588
6589 if (bp->gunzip_buf) {
6590 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6591 bp->gunzip_mapping);
6592 bp->gunzip_buf = NULL;
6593 }
6594 }
6595
6596 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6597 {
6598 int n, rc;
6599
6600
6601 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6602 BNX2X_ERR("Bad gzip header\n");
6603 return -EINVAL;
6604 }
6605
6606 n = 10;
6607
6608 #define FNAME 0x8
6609
6610 if (zbuf[3] & FNAME)
6611 while ((zbuf[n++] != 0) && (n < len));
6612
6613 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6614 bp->strm->avail_in = len - n;
6615 bp->strm->next_out = bp->gunzip_buf;
6616 bp->strm->avail_out = FW_BUF_SIZE;
6617
6618 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6619 if (rc != Z_OK)
6620 return rc;
6621
6622 rc = zlib_inflate(bp->strm, Z_FINISH);
6623 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6624 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6625 bp->strm->msg);
6626
6627 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6628 if (bp->gunzip_outlen & 0x3)
6629 netdev_err(bp->dev,
6630 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6631 bp->gunzip_outlen);
6632 bp->gunzip_outlen >>= 2;
6633
6634 zlib_inflateEnd(bp->strm);
6635
6636 if (rc == Z_STREAM_END)
6637 return 0;
6638
6639 return rc;
6640 }
6641
6642
6643
6644
6645
6646
6647
6648
6649 static void bnx2x_lb_pckt(struct bnx2x *bp)
6650 {
6651 u32 wb_write[3];
6652
6653
6654 wb_write[0] = 0x55555555;
6655 wb_write[1] = 0x55555555;
6656 wb_write[2] = 0x20;
6657 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6658
6659
6660 wb_write[0] = 0x09000000;
6661 wb_write[1] = 0x55555555;
6662 wb_write[2] = 0x10;
6663 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6664 }
6665
6666
6667
6668
6669
6670 static int bnx2x_int_mem_test(struct bnx2x *bp)
6671 {
6672 int factor;
6673 int count, i;
6674 u32 val = 0;
6675
6676 if (CHIP_REV_IS_FPGA(bp))
6677 factor = 120;
6678 else if (CHIP_REV_IS_EMUL(bp))
6679 factor = 200;
6680 else
6681 factor = 1;
6682
6683
6684 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6685 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6686 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6687 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6688
6689
6690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6691
6692
6693 bnx2x_lb_pckt(bp);
6694
6695
6696
6697 count = 1000 * factor;
6698 while (count) {
6699
6700 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6701 val = *bnx2x_sp(bp, wb_data[0]);
6702 if (val == 0x10)
6703 break;
6704
6705 usleep_range(10000, 20000);
6706 count--;
6707 }
6708 if (val != 0x10) {
6709 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6710 return -1;
6711 }
6712
6713
6714 count = 1000 * factor;
6715 while (count) {
6716 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6717 if (val == 1)
6718 break;
6719
6720 usleep_range(10000, 20000);
6721 count--;
6722 }
6723 if (val != 0x1) {
6724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6725 return -2;
6726 }
6727
6728
6729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6730 msleep(50);
6731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6732 msleep(50);
6733 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6734 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6735
6736 DP(NETIF_MSG_HW, "part2\n");
6737
6738
6739 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6740 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6741 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6742 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6743
6744
6745 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6746
6747
6748 for (i = 0; i < 10; i++)
6749 bnx2x_lb_pckt(bp);
6750
6751
6752
6753 count = 1000 * factor;
6754 while (count) {
6755
6756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6757 val = *bnx2x_sp(bp, wb_data[0]);
6758 if (val == 0xb0)
6759 break;
6760
6761 usleep_range(10000, 20000);
6762 count--;
6763 }
6764 if (val != 0xb0) {
6765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6766 return -3;
6767 }
6768
6769
6770 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6771 if (val != 2)
6772 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6773
6774
6775 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6776
6777
6778 msleep(10 * factor);
6779
6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6781 if (val != 3)
6782 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6783
6784
6785 for (i = 0; i < 11; i++)
6786 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6787 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6788 if (val != 1) {
6789 BNX2X_ERR("clear of NIG failed\n");
6790 return -4;
6791 }
6792
6793
6794 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6795 msleep(50);
6796 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6797 msleep(50);
6798 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6799 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6800 if (!CNIC_SUPPORT(bp))
6801
6802 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6803
6804
6805 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6806 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6807 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6808 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6809
6810 DP(NETIF_MSG_HW, "done\n");
6811
6812 return 0;
6813 }
6814
6815 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6816 {
6817 u32 val;
6818
6819 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6820 if (!CHIP_IS_E1x(bp))
6821 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6822 else
6823 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6824 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6825 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6826
6827
6828
6829
6830
6831
6832 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6833 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6834 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6835 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6836 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6837 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6838
6839
6840 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6841 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6842 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6843
6844
6845 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6846 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6847 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6848 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6849
6850
6851
6852 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6853 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6854 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6855 if (!CHIP_IS_E1x(bp))
6856 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6857 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6858 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6859
6860 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6861 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6862 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6863
6864
6865 if (!CHIP_IS_E1x(bp))
6866
6867 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6868
6869 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6870 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6871
6872 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6873 }
6874
6875 static void bnx2x_reset_common(struct bnx2x *bp)
6876 {
6877 u32 val = 0x1400;
6878
6879
6880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6881 0xd3ffff7f);
6882
6883 if (CHIP_IS_E3(bp)) {
6884 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6885 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6886 }
6887
6888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6889 }
6890
6891 static void bnx2x_setup_dmae(struct bnx2x *bp)
6892 {
6893 bp->dmae_ready = 0;
6894 spin_lock_init(&bp->dmae_lock);
6895 }
6896
6897 static void bnx2x_init_pxp(struct bnx2x *bp)
6898 {
6899 u16 devctl;
6900 int r_order, w_order;
6901
6902 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6903 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6904 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6905 if (bp->mrrs == -1)
6906 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6907 else {
6908 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6909 r_order = bp->mrrs;
6910 }
6911
6912 bnx2x_init_pxp_arb(bp, r_order, w_order);
6913 }
6914
6915 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6916 {
6917 int is_required;
6918 u32 val;
6919 int port;
6920
6921 if (BP_NOMCP(bp))
6922 return;
6923
6924 is_required = 0;
6925 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6926 SHARED_HW_CFG_FAN_FAILURE_MASK;
6927
6928 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6929 is_required = 1;
6930
6931
6932
6933
6934
6935
6936 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6937 for (port = PORT_0; port < PORT_MAX; port++) {
6938 is_required |=
6939 bnx2x_fan_failure_det_req(
6940 bp,
6941 bp->common.shmem_base,
6942 bp->common.shmem2_base,
6943 port);
6944 }
6945
6946 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6947
6948 if (is_required == 0)
6949 return;
6950
6951
6952 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6953
6954
6955 val = REG_RD(bp, MISC_REG_SPIO_INT);
6956 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6957 REG_WR(bp, MISC_REG_SPIO_INT, val);
6958
6959
6960 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6961 val |= MISC_SPIO_SPIO5;
6962 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6963 }
6964
6965 void bnx2x_pf_disable(struct bnx2x *bp)
6966 {
6967 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6968 val &= ~IGU_PF_CONF_FUNC_EN;
6969
6970 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6971 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6972 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6973 }
6974
6975 static void bnx2x__common_init_phy(struct bnx2x *bp)
6976 {
6977 u32 shmem_base[2], shmem2_base[2];
6978
6979 if (SHMEM2_RD(bp, size) >
6980 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6981 return;
6982 shmem_base[0] = bp->common.shmem_base;
6983 shmem2_base[0] = bp->common.shmem2_base;
6984 if (!CHIP_IS_E1x(bp)) {
6985 shmem_base[1] =
6986 SHMEM2_RD(bp, other_shmem_base_addr);
6987 shmem2_base[1] =
6988 SHMEM2_RD(bp, other_shmem2_base_addr);
6989 }
6990 bnx2x_acquire_phy_lock(bp);
6991 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6992 bp->common.chip_id);
6993 bnx2x_release_phy_lock(bp);
6994 }
6995
6996 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6997 {
6998 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6999 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7000 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7001 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7002 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7003
7004
7005 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7006
7007 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7008 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7009 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7010 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7011 }
7012
7013 static void bnx2x_set_endianity(struct bnx2x *bp)
7014 {
7015 #ifdef __BIG_ENDIAN
7016 bnx2x_config_endianity(bp, 1);
7017 #else
7018 bnx2x_config_endianity(bp, 0);
7019 #endif
7020 }
7021
7022 static void bnx2x_reset_endianity(struct bnx2x *bp)
7023 {
7024 bnx2x_config_endianity(bp, 0);
7025 }
7026
7027
7028
7029
7030
7031
7032 static int bnx2x_init_hw_common(struct bnx2x *bp)
7033 {
7034 u32 val;
7035
7036 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7037
7038
7039
7040
7041
7042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7043
7044 bnx2x_reset_common(bp);
7045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7046
7047 val = 0xfffc;
7048 if (CHIP_IS_E3(bp)) {
7049 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7050 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7051 }
7052 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7053
7054 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7055
7056 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7057
7058 if (!CHIP_IS_E1x(bp)) {
7059 u8 abs_func_id;
7060
7061
7062
7063
7064
7065
7066
7067
7068 for (abs_func_id = BP_PATH(bp);
7069 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7070 if (abs_func_id == BP_ABS_FUNC(bp)) {
7071 REG_WR(bp,
7072 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7073 1);
7074 continue;
7075 }
7076
7077 bnx2x_pretend_func(bp, abs_func_id);
7078
7079 bnx2x_pf_disable(bp);
7080 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7081 }
7082 }
7083
7084 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7085 if (CHIP_IS_E1(bp)) {
7086
7087
7088 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7089 }
7090
7091 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7092 bnx2x_init_pxp(bp);
7093 bnx2x_set_endianity(bp);
7094 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7095
7096 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7097 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7098
7099
7100 msleep(100);
7101
7102 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7103 if (val != 1) {
7104 BNX2X_ERR("PXP2 CFG failed\n");
7105 return -EBUSY;
7106 }
7107 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7108 if (val != 1) {
7109 BNX2X_ERR("PXP2 RD_INIT failed\n");
7110 return -EBUSY;
7111 }
7112
7113
7114
7115
7116
7117
7118 if (!CHIP_IS_E1x(bp)) {
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181 struct ilt_client_info ilt_cli;
7182 struct bnx2x_ilt ilt;
7183 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7184 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7185
7186
7187 ilt_cli.start = 0;
7188 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7189 ilt_cli.client_num = ILT_CLIENT_TM;
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7203 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7204 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7205
7206 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7207 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7208 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7209 }
7210
7211 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7212 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7213
7214 if (!CHIP_IS_E1x(bp)) {
7215 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7216 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7217 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7218
7219 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7220
7221
7222 do {
7223 msleep(200);
7224 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7225 } while (factor-- && (val != 1));
7226
7227 if (val != 1) {
7228 BNX2X_ERR("ATC_INIT failed\n");
7229 return -EBUSY;
7230 }
7231 }
7232
7233 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7234
7235 bnx2x_iov_init_dmae(bp);
7236
7237
7238 bp->dmae_ready = 1;
7239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7240
7241 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7242
7243 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7244
7245 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7246
7247 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7248
7249 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7250 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7251 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7252 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7253
7254 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7255
7256
7257 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7258
7259
7260 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7261 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7262
7263 if (CNIC_SUPPORT(bp))
7264 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7265
7266 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7267
7268 if (!CHIP_REV_IS_SLOW(bp))
7269
7270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7271
7272 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7273
7274 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7275 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7276
7277 if (!CHIP_IS_E1(bp))
7278 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7279
7280 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7281 if (IS_MF_AFEX(bp)) {
7282
7283
7284
7285 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7286 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7287 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7288 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7289 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7290 } else {
7291
7292
7293
7294 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7295 bp->path_has_ovlan ? 7 : 6);
7296 }
7297 }
7298
7299 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7300 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7301 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7302 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7303
7304 if (!CHIP_IS_E1x(bp)) {
7305
7306 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7307 VFC_MEMORIES_RST_REG_CAM_RST |
7308 VFC_MEMORIES_RST_REG_RAM_RST);
7309 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7310 VFC_MEMORIES_RST_REG_CAM_RST |
7311 VFC_MEMORIES_RST_REG_RAM_RST);
7312
7313 msleep(20);
7314 }
7315
7316 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7317 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7318 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7319 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7320
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323 0x80000000);
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7325 0x80000000);
7326
7327 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7328 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7329 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7330
7331 if (!CHIP_IS_E1x(bp)) {
7332 if (IS_MF_AFEX(bp)) {
7333
7334
7335
7336 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7337 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7338 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7339 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7340 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7341 } else {
7342 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7343 bp->path_has_ovlan ? 7 : 6);
7344 }
7345 }
7346
7347 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7348
7349 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7350
7351 if (CNIC_SUPPORT(bp)) {
7352 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7353 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7354 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7355 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7356 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7357 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7358 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7359 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7360 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7361 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7362 }
7363 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7364
7365 if (sizeof(union cdu_context) != 1024)
7366
7367 dev_alert(&bp->pdev->dev,
7368 "please adjust the size of cdu_context(%ld)\n",
7369 (long)sizeof(union cdu_context));
7370
7371 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7372 val = (4 << 24) + (0 << 12) + 1024;
7373 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7374
7375 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7376 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7377
7378 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7379
7380
7381 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7382
7383 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7384
7385 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7386 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7387
7388 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7389 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7390
7391
7392 REG_WR(bp, 0x2814, 0xffffffff);
7393 REG_WR(bp, 0x3820, 0xffffffff);
7394
7395 if (!CHIP_IS_E1x(bp)) {
7396 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7397 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7398 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7399 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7400 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7401 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7402 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7403 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7404 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7405 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7406 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7407 }
7408
7409 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7410 if (!CHIP_IS_E1(bp)) {
7411
7412 if (!CHIP_IS_E3(bp))
7413 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7414 }
7415 if (CHIP_IS_E1H(bp))
7416
7417 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7418
7419 if (CHIP_REV_IS_SLOW(bp))
7420 msleep(200);
7421
7422
7423 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7424 if (val != 1) {
7425 BNX2X_ERR("CFC LL_INIT failed\n");
7426 return -EBUSY;
7427 }
7428 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7429 if (val != 1) {
7430 BNX2X_ERR("CFC AC_INIT failed\n");
7431 return -EBUSY;
7432 }
7433 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7434 if (val != 1) {
7435 BNX2X_ERR("CFC CAM_INIT failed\n");
7436 return -EBUSY;
7437 }
7438 REG_WR(bp, CFC_REG_DEBUG0, 0);
7439
7440 if (CHIP_IS_E1(bp)) {
7441
7442
7443 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7444 val = *bnx2x_sp(bp, wb_data[0]);
7445
7446
7447 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7448 BNX2X_ERR("internal mem self test failed\n");
7449 return -EBUSY;
7450 }
7451 }
7452
7453 bnx2x_setup_fan_failure_detection(bp);
7454
7455
7456 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7457
7458 bnx2x_enable_blocks_attention(bp);
7459 bnx2x_enable_blocks_parity(bp);
7460
7461 if (!BP_NOMCP(bp)) {
7462 if (CHIP_IS_E1x(bp))
7463 bnx2x__common_init_phy(bp);
7464 } else
7465 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7466
7467 if (SHMEM2_HAS(bp, netproc_fw_ver))
7468 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7469
7470 return 0;
7471 }
7472
7473
7474
7475
7476
7477
7478 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7479 {
7480 int rc = bnx2x_init_hw_common(bp);
7481
7482 if (rc)
7483 return rc;
7484
7485
7486 if (!BP_NOMCP(bp))
7487 bnx2x__common_init_phy(bp);
7488
7489 return 0;
7490 }
7491
7492 static int bnx2x_init_hw_port(struct bnx2x *bp)
7493 {
7494 int port = BP_PORT(bp);
7495 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7496 u32 low, high;
7497 u32 val, reg;
7498
7499 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7500
7501 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7502
7503 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7504 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7505 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7506
7507
7508
7509
7510
7511
7512 if (!CHIP_IS_E1x(bp))
7513 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7514
7515 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7516 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7517 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7518 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7519
7520 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7521 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7522 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7523 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7524
7525
7526 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7527
7528 if (CNIC_SUPPORT(bp)) {
7529 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7530 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7531 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7532 }
7533
7534 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7535
7536 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7537
7538 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7539
7540 if (IS_MF(bp))
7541 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7542 else if (bp->dev->mtu > 4096) {
7543 if (bp->flags & ONE_PORT_FLAG)
7544 low = 160;
7545 else {
7546 val = bp->dev->mtu;
7547
7548 low = 96 + (val/64) +
7549 ((val % 64) ? 1 : 0);
7550 }
7551 } else
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7553 high = low + 56;
7554 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7555 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7556 }
7557
7558 if (CHIP_MODE_IS_4_PORT(bp))
7559 REG_WR(bp, (BP_PORT(bp) ?
7560 BRB1_REG_MAC_GUARANTIED_1 :
7561 BRB1_REG_MAC_GUARANTIED_0), 40);
7562
7563 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7564 if (CHIP_IS_E3B0(bp)) {
7565 if (IS_MF_AFEX(bp)) {
7566
7567 REG_WR(bp, BP_PORT(bp) ?
7568 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7569 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7570 REG_WR(bp, BP_PORT(bp) ?
7571 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7572 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7573 REG_WR(bp, BP_PORT(bp) ?
7574 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7575 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7576 } else {
7577
7578
7579
7580
7581 REG_WR(bp, BP_PORT(bp) ?
7582 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7583 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7584 (bp->path_has_ovlan ? 7 : 6));
7585 }
7586 }
7587
7588 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7589 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7590 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7591 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7592
7593 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7594 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7595 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7596 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7597
7598 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7599 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7600
7601 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7602
7603 if (CHIP_IS_E1x(bp)) {
7604
7605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7606
7607
7608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7609
7610 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7611
7612
7613 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7614 udelay(50);
7615 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7616 }
7617
7618 if (CNIC_SUPPORT(bp))
7619 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7620
7621 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7622 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7623
7624 if (CHIP_IS_E1(bp)) {
7625 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7626 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7627 }
7628 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7629
7630 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7631
7632 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7633
7634
7635
7636
7637 val = IS_MF(bp) ? 0xF7 : 0x7;
7638
7639 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7640 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7641
7642
7643 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7644 REG_WR(bp, reg,
7645 REG_RD(bp, reg) &
7646 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7647
7648 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7649 REG_WR(bp, reg,
7650 REG_RD(bp, reg) &
7651 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7652
7653 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7654
7655 if (!CHIP_IS_E1x(bp)) {
7656
7657
7658
7659 if (IS_MF_AFEX(bp))
7660 REG_WR(bp, BP_PORT(bp) ?
7661 NIG_REG_P1_HDRS_AFTER_BASIC :
7662 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7663 else
7664 REG_WR(bp, BP_PORT(bp) ?
7665 NIG_REG_P1_HDRS_AFTER_BASIC :
7666 NIG_REG_P0_HDRS_AFTER_BASIC,
7667 IS_MF_SD(bp) ? 7 : 6);
7668
7669 if (CHIP_IS_E3(bp))
7670 REG_WR(bp, BP_PORT(bp) ?
7671 NIG_REG_LLH1_MF_MODE :
7672 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7673 }
7674 if (!CHIP_IS_E3(bp))
7675 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7676
7677 if (!CHIP_IS_E1(bp)) {
7678
7679 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7680 (IS_MF_SD(bp) ? 0x1 : 0x2));
7681
7682 if (!CHIP_IS_E1x(bp)) {
7683 val = 0;
7684 switch (bp->mf_mode) {
7685 case MULTI_FUNCTION_SD:
7686 val = 1;
7687 break;
7688 case MULTI_FUNCTION_SI:
7689 case MULTI_FUNCTION_AFEX:
7690 val = 2;
7691 break;
7692 }
7693
7694 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7695 NIG_REG_LLH0_CLS_TYPE), val);
7696 }
7697 {
7698 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7699 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7700 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7701 }
7702 }
7703
7704
7705 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7706 if (val & MISC_SPIO_SPIO5) {
7707 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7708 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7709 val = REG_RD(bp, reg_addr);
7710 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7711 REG_WR(bp, reg_addr, val);
7712 }
7713
7714 if (CHIP_IS_E3B0(bp))
7715 bp->flags |= PTP_SUPPORTED;
7716
7717 return 0;
7718 }
7719
7720 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7721 {
7722 int reg;
7723 u32 wb_write[2];
7724
7725 if (CHIP_IS_E1(bp))
7726 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7727 else
7728 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7729
7730 wb_write[0] = ONCHIP_ADDR1(addr);
7731 wb_write[1] = ONCHIP_ADDR2(addr);
7732 REG_WR_DMAE(bp, reg, wb_write, 2);
7733 }
7734
7735 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7736 {
7737 u32 data, ctl, cnt = 100;
7738 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7739 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7740 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7741 u32 sb_bit = 1 << (idu_sb_id%32);
7742 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7743 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7744
7745
7746 if (CHIP_INT_MODE_IS_BC(bp))
7747 return;
7748
7749 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7750 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7751 IGU_REGULAR_CLEANUP_SET |
7752 IGU_REGULAR_BCLEANUP;
7753
7754 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7755 func_encode << IGU_CTRL_REG_FID_SHIFT |
7756 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7757
7758 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7759 data, igu_addr_data);
7760 REG_WR(bp, igu_addr_data, data);
7761 barrier();
7762 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7763 ctl, igu_addr_ctl);
7764 REG_WR(bp, igu_addr_ctl, ctl);
7765 barrier();
7766
7767
7768 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7769 msleep(20);
7770
7771 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7772 DP(NETIF_MSG_HW,
7773 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7774 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7775 }
7776 }
7777
7778 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7779 {
7780 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7781 }
7782
7783 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7784 {
7785 u32 i, base = FUNC_ILT_BASE(func);
7786 for (i = base; i < base + ILT_PER_FUNC; i++)
7787 bnx2x_ilt_wr(bp, i, 0);
7788 }
7789
7790 static void bnx2x_init_searcher(struct bnx2x *bp)
7791 {
7792 int port = BP_PORT(bp);
7793 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7794
7795 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7796 }
7797
7798 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7799 {
7800 int rc;
7801 struct bnx2x_func_state_params func_params = {NULL};
7802 struct bnx2x_func_switch_update_params *switch_update_params =
7803 &func_params.params.switch_update;
7804
7805
7806 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7807 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7808
7809 func_params.f_obj = &bp->func_obj;
7810 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7811
7812
7813 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7814 &switch_update_params->changes);
7815 if (suspend)
7816 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7817 &switch_update_params->changes);
7818
7819 rc = bnx2x_func_state_change(bp, &func_params);
7820
7821 return rc;
7822 }
7823
7824 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7825 {
7826 int rc, i, port = BP_PORT(bp);
7827 int vlan_en = 0, mac_en[NUM_MACS];
7828
7829
7830 if (bp->mf_mode == SINGLE_FUNCTION) {
7831 bnx2x_set_rx_filter(&bp->link_params, 0);
7832 } else {
7833 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7834 NIG_REG_LLH0_FUNC_EN);
7835 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7836 NIG_REG_LLH0_FUNC_EN, 0);
7837 for (i = 0; i < NUM_MACS; i++) {
7838 mac_en[i] = REG_RD(bp, port ?
7839 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7840 4 * i) :
7841 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7842 4 * i));
7843 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7844 4 * i) :
7845 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7846 }
7847 }
7848
7849
7850 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7851 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7852
7853
7854
7855
7856
7857
7858 rc = bnx2x_func_switch_update(bp, 1);
7859 if (rc) {
7860 BNX2X_ERR("Can't suspend tx-switching!\n");
7861 return rc;
7862 }
7863
7864
7865 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7866
7867
7868 if (bp->mf_mode == SINGLE_FUNCTION) {
7869 bnx2x_set_rx_filter(&bp->link_params, 1);
7870 } else {
7871 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7872 NIG_REG_LLH0_FUNC_EN, vlan_en);
7873 for (i = 0; i < NUM_MACS; i++) {
7874 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7875 4 * i) :
7876 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7877 mac_en[i]);
7878 }
7879 }
7880
7881
7882 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7883 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7884
7885
7886 rc = bnx2x_func_switch_update(bp, 0);
7887 if (rc) {
7888 BNX2X_ERR("Can't resume tx-switching!\n");
7889 return rc;
7890 }
7891
7892 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7893 return 0;
7894 }
7895
7896 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7897 {
7898 int rc;
7899
7900 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7901
7902 if (CONFIGURE_NIC_MODE(bp)) {
7903
7904 bnx2x_init_searcher(bp);
7905
7906
7907 rc = bnx2x_reset_nic_mode(bp);
7908 if (rc)
7909 BNX2X_ERR("Can't change NIC mode!\n");
7910 return rc;
7911 }
7912
7913 return 0;
7914 }
7915
7916
7917
7918
7919
7920
7921
7922
7923 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7924 {
7925 if (!CHIP_IS_E1x(bp))
7926 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7927 1 << BP_ABS_FUNC(bp));
7928 }
7929
7930 static int bnx2x_init_hw_func(struct bnx2x *bp)
7931 {
7932 int port = BP_PORT(bp);
7933 int func = BP_FUNC(bp);
7934 int init_phase = PHASE_PF0 + func;
7935 struct bnx2x_ilt *ilt = BP_ILT(bp);
7936 u16 cdu_ilt_start;
7937 u32 addr, val;
7938 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7939 int i, main_mem_width, rc;
7940
7941 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7942
7943
7944 if (!CHIP_IS_E1x(bp)) {
7945 rc = bnx2x_pf_flr_clnup(bp);
7946 if (rc) {
7947 bnx2x_fw_dump(bp);
7948 return rc;
7949 }
7950 }
7951
7952
7953 if (bp->common.int_block == INT_BLOCK_HC) {
7954 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7955 val = REG_RD(bp, addr);
7956 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7957 REG_WR(bp, addr, val);
7958 }
7959
7960 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7961 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7962
7963 ilt = BP_ILT(bp);
7964 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7965
7966 if (IS_SRIOV(bp))
7967 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7968 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7969
7970
7971
7972
7973 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7974 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7975 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7976 ilt->lines[cdu_ilt_start + i].page_mapping =
7977 bp->context[i].cxt_mapping;
7978 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7979 }
7980
7981 bnx2x_ilt_init_op(bp, INITOP_SET);
7982
7983 if (!CONFIGURE_NIC_MODE(bp)) {
7984 bnx2x_init_searcher(bp);
7985 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7986 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7987 } else {
7988
7989 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7990 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7991 }
7992
7993 if (!CHIP_IS_E1x(bp)) {
7994 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7995
7996
7997
7998
7999 if (!(bp->flags & USING_MSIX_FLAG))
8000 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8001
8002
8003
8004
8005
8006
8007 msleep(20);
8008
8009
8010
8011
8012
8013 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8014
8015 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8016 }
8017
8018 bp->dmae_ready = 1;
8019
8020 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8021
8022 bnx2x_clean_pglue_errors(bp);
8023
8024 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8025 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8026 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8027 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8028 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8029 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8030 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8031 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8032 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8033 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8034 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8035 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8036 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8037
8038 if (!CHIP_IS_E1x(bp))
8039 REG_WR(bp, QM_REG_PF_EN, 1);
8040
8041 if (!CHIP_IS_E1x(bp)) {
8042 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8043 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8044 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8045 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8046 }
8047 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8048
8049 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8050 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8051 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8052
8053 bnx2x_iov_init_dq(bp);
8054
8055 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8056 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8057 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8058 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8059 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8060 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8061 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8062 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8063 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8064 if (!CHIP_IS_E1x(bp))
8065 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8066
8067 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8068
8069 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8070
8071 if (!CHIP_IS_E1x(bp))
8072 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8073
8074 if (IS_MF(bp)) {
8075 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8076 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8077 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8078 bp->mf_ov);
8079 }
8080 }
8081
8082 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8083
8084
8085 if (bp->common.int_block == INT_BLOCK_HC) {
8086 if (CHIP_IS_E1H(bp)) {
8087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8088
8089 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8090 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8091 }
8092 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8093
8094 } else {
8095 int num_segs, sb_idx, prod_offset;
8096
8097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8098
8099 if (!CHIP_IS_E1x(bp)) {
8100 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8101 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8102 }
8103
8104 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8105
8106 if (!CHIP_IS_E1x(bp)) {
8107 int dsb_idx = 0;
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8130 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8131 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8132 prod_offset = (bp->igu_base_sb + sb_idx) *
8133 num_segs;
8134
8135 for (i = 0; i < num_segs; i++) {
8136 addr = IGU_REG_PROD_CONS_MEMORY +
8137 (prod_offset + i) * 4;
8138 REG_WR(bp, addr, 0);
8139 }
8140
8141 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8142 USTORM_ID, 0, IGU_INT_NOP, 1);
8143 bnx2x_igu_clear_sb(bp,
8144 bp->igu_base_sb + sb_idx);
8145 }
8146
8147
8148 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8149 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8150
8151 if (CHIP_MODE_IS_4_PORT(bp))
8152 dsb_idx = BP_FUNC(bp);
8153 else
8154 dsb_idx = BP_VN(bp);
8155
8156 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8157 IGU_BC_BASE_DSB_PROD + dsb_idx :
8158 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8159
8160
8161
8162
8163
8164 for (i = 0; i < (num_segs * E1HVN_MAX);
8165 i += E1HVN_MAX) {
8166 addr = IGU_REG_PROD_CONS_MEMORY +
8167 (prod_offset + i)*4;
8168 REG_WR(bp, addr, 0);
8169 }
8170
8171 if (CHIP_INT_MODE_IS_BC(bp)) {
8172 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8173 USTORM_ID, 0, IGU_INT_NOP, 1);
8174 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8175 CSTORM_ID, 0, IGU_INT_NOP, 1);
8176 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8177 XSTORM_ID, 0, IGU_INT_NOP, 1);
8178 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8179 TSTORM_ID, 0, IGU_INT_NOP, 1);
8180 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8181 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8182 } else {
8183 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8184 USTORM_ID, 0, IGU_INT_NOP, 1);
8185 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8186 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8187 }
8188 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8189
8190
8191
8192 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8193 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8194 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8195 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8196 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8197 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8198 }
8199 }
8200
8201
8202 REG_WR(bp, 0x2114, 0xffffffff);
8203 REG_WR(bp, 0x2120, 0xffffffff);
8204
8205 if (CHIP_IS_E1x(bp)) {
8206 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8207 main_mem_base = HC_REG_MAIN_MEMORY +
8208 BP_PORT(bp) * (main_mem_size * 4);
8209 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8210 main_mem_width = 8;
8211
8212 val = REG_RD(bp, main_mem_prty_clr);
8213 if (val)
8214 DP(NETIF_MSG_HW,
8215 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8216 val);
8217
8218
8219 for (i = main_mem_base;
8220 i < main_mem_base + main_mem_size * 4;
8221 i += main_mem_width) {
8222 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8223 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8224 i, main_mem_width / 4);
8225 }
8226
8227 REG_RD(bp, main_mem_prty_clr);
8228 }
8229
8230 #ifdef BNX2X_STOP_ON_ERROR
8231
8232 REG_WR8(bp, BAR_USTRORM_INTMEM +
8233 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8234 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8235 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8236 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8237 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8238 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8239 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8240 #endif
8241
8242 bnx2x_phy_probe(&bp->link_params);
8243
8244 return 0;
8245 }
8246
8247 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8248 {
8249 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8250
8251 if (!CHIP_IS_E1x(bp))
8252 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8253 sizeof(struct host_hc_status_block_e2));
8254 else
8255 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8256 sizeof(struct host_hc_status_block_e1x));
8257
8258 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8259 }
8260
8261 void bnx2x_free_mem(struct bnx2x *bp)
8262 {
8263 int i;
8264
8265 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8266 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8267
8268 if (IS_VF(bp))
8269 return;
8270
8271 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8272 sizeof(struct host_sp_status_block));
8273
8274 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8275 sizeof(struct bnx2x_slowpath));
8276
8277 for (i = 0; i < L2_ILT_LINES(bp); i++)
8278 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8279 bp->context[i].size);
8280 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8281
8282 BNX2X_FREE(bp->ilt->lines);
8283
8284 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8285
8286 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8287 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8288
8289 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8290
8291 bnx2x_iov_free_mem(bp);
8292 }
8293
8294 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8295 {
8296 if (!CHIP_IS_E1x(bp)) {
8297
8298 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8299 sizeof(struct host_hc_status_block_e2));
8300 if (!bp->cnic_sb.e2_sb)
8301 goto alloc_mem_err;
8302 } else {
8303 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8304 sizeof(struct host_hc_status_block_e1x));
8305 if (!bp->cnic_sb.e1x_sb)
8306 goto alloc_mem_err;
8307 }
8308
8309 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8310
8311 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8312 if (!bp->t2)
8313 goto alloc_mem_err;
8314 }
8315
8316
8317 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8318 &bp->slowpath->drv_info_to_mcp;
8319
8320 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8321 goto alloc_mem_err;
8322
8323 return 0;
8324
8325 alloc_mem_err:
8326 bnx2x_free_mem_cnic(bp);
8327 BNX2X_ERR("Can't allocate memory\n");
8328 return -ENOMEM;
8329 }
8330
8331 int bnx2x_alloc_mem(struct bnx2x *bp)
8332 {
8333 int i, allocated, context_size;
8334
8335 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8336
8337 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8338 if (!bp->t2)
8339 goto alloc_mem_err;
8340 }
8341
8342 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8343 sizeof(struct host_sp_status_block));
8344 if (!bp->def_status_blk)
8345 goto alloc_mem_err;
8346
8347 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8348 sizeof(struct bnx2x_slowpath));
8349 if (!bp->slowpath)
8350 goto alloc_mem_err;
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361
8362
8363
8364
8365 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8366
8367 for (i = 0, allocated = 0; allocated < context_size; i++) {
8368 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8369 (context_size - allocated));
8370 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8371 bp->context[i].size);
8372 if (!bp->context[i].vcxt)
8373 goto alloc_mem_err;
8374 allocated += bp->context[i].size;
8375 }
8376 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8377 GFP_KERNEL);
8378 if (!bp->ilt->lines)
8379 goto alloc_mem_err;
8380
8381 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8382 goto alloc_mem_err;
8383
8384 if (bnx2x_iov_alloc_mem(bp))
8385 goto alloc_mem_err;
8386
8387
8388 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8389 if (!bp->spq)
8390 goto alloc_mem_err;
8391
8392
8393 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8394 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8395 if (!bp->eq_ring)
8396 goto alloc_mem_err;
8397
8398 return 0;
8399
8400 alloc_mem_err:
8401 bnx2x_free_mem(bp);
8402 BNX2X_ERR("Can't allocate memory\n");
8403 return -ENOMEM;
8404 }
8405
8406
8407
8408
8409
8410 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8411 struct bnx2x_vlan_mac_obj *obj, bool set,
8412 int mac_type, unsigned long *ramrod_flags)
8413 {
8414 int rc;
8415 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8416
8417 memset(&ramrod_param, 0, sizeof(ramrod_param));
8418
8419
8420 ramrod_param.vlan_mac_obj = obj;
8421 ramrod_param.ramrod_flags = *ramrod_flags;
8422
8423
8424 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8425 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8426
8427 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8428
8429
8430 if (set)
8431 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8432 else
8433 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8434 }
8435
8436 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8437
8438 if (rc == -EEXIST) {
8439 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8440
8441 rc = 0;
8442 } else if (rc < 0)
8443 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8444
8445 return rc;
8446 }
8447
8448 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8449 struct bnx2x_vlan_mac_obj *obj, bool set,
8450 unsigned long *ramrod_flags)
8451 {
8452 int rc;
8453 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8454
8455 memset(&ramrod_param, 0, sizeof(ramrod_param));
8456
8457
8458 ramrod_param.vlan_mac_obj = obj;
8459 ramrod_param.ramrod_flags = *ramrod_flags;
8460
8461
8462 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8463 ramrod_param.user_req.u.vlan.vlan = vlan;
8464 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8465
8466 if (set)
8467 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8468 else
8469 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8470 }
8471
8472 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8473
8474 if (rc == -EEXIST) {
8475
8476 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8477 rc = 0;
8478 } else if (rc < 0) {
8479 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8480 }
8481
8482 return rc;
8483 }
8484
8485 void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486 {
8487 struct bnx2x_vlan_entry *vlan;
8488
8489
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494 }
8495
8496 static int bnx2x_del_all_vlans(struct bnx2x *bp)
8497 {
8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8500 int rc;
8501
8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8503 __set_bit(BNX2X_VLAN, &vlan_flags);
8504 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8505 if (rc)
8506 return rc;
8507
8508 bnx2x_clear_vlan_info(bp);
8509
8510 return 0;
8511 }
8512
8513 int bnx2x_del_all_macs(struct bnx2x *bp,
8514 struct bnx2x_vlan_mac_obj *mac_obj,
8515 int mac_type, bool wait_for_comp)
8516 {
8517 int rc;
8518 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8519
8520
8521 if (wait_for_comp)
8522 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8523
8524
8525 __set_bit(mac_type, &vlan_mac_flags);
8526
8527 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8528 if (rc < 0)
8529 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8530
8531 return rc;
8532 }
8533
8534 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8535 {
8536 if (IS_PF(bp)) {
8537 unsigned long ramrod_flags = 0;
8538
8539 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8540 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8541 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8542 &bp->sp_objs->mac_obj, set,
8543 BNX2X_ETH_MAC, &ramrod_flags);
8544 } else {
8545 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8546 bp->fp->index, set);
8547 }
8548 }
8549
8550 int bnx2x_setup_leading(struct bnx2x *bp)
8551 {
8552 if (IS_PF(bp))
8553 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8554 else
8555 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8556 }
8557
8558
8559
8560
8561
8562
8563
8564
8565 int bnx2x_set_int_mode(struct bnx2x *bp)
8566 {
8567 int rc = 0;
8568
8569 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8570 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8571 return -EINVAL;
8572 }
8573
8574 switch (int_mode) {
8575 case BNX2X_INT_MODE_MSIX:
8576
8577 rc = bnx2x_enable_msix(bp);
8578
8579
8580 if (!rc)
8581 return 0;
8582
8583
8584 if (rc && IS_VF(bp))
8585 return rc;
8586
8587
8588 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8589 bp->num_queues,
8590 1 + bp->num_cnic_queues);
8591
8592
8593 case BNX2X_INT_MODE_MSI:
8594 bnx2x_enable_msi(bp);
8595
8596
8597 case BNX2X_INT_MODE_INTX:
8598 bp->num_ethernet_queues = 1;
8599 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8600 BNX2X_DEV_INFO("set number of queues to 1\n");
8601 break;
8602 default:
8603 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8604 return -EINVAL;
8605 }
8606 return 0;
8607 }
8608
8609
8610 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8611 {
8612 if (IS_SRIOV(bp))
8613 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8614 return L2_ILT_LINES(bp);
8615 }
8616
8617 void bnx2x_ilt_set_info(struct bnx2x *bp)
8618 {
8619 struct ilt_client_info *ilt_client;
8620 struct bnx2x_ilt *ilt = BP_ILT(bp);
8621 u16 line = 0;
8622
8623 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8624 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8625
8626
8627 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8628 ilt_client->client_num = ILT_CLIENT_CDU;
8629 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8630 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8631 ilt_client->start = line;
8632 line += bnx2x_cid_ilt_lines(bp);
8633
8634 if (CNIC_SUPPORT(bp))
8635 line += CNIC_ILT_LINES;
8636 ilt_client->end = line - 1;
8637
8638 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8639 ilt_client->start,
8640 ilt_client->end,
8641 ilt_client->page_size,
8642 ilt_client->flags,
8643 ilog2(ilt_client->page_size >> 12));
8644
8645
8646 if (QM_INIT(bp->qm_cid_count)) {
8647 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8648 ilt_client->client_num = ILT_CLIENT_QM;
8649 ilt_client->page_size = QM_ILT_PAGE_SZ;
8650 ilt_client->flags = 0;
8651 ilt_client->start = line;
8652
8653
8654 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8655 QM_ILT_PAGE_SZ);
8656
8657 ilt_client->end = line - 1;
8658
8659 DP(NETIF_MSG_IFUP,
8660 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8661 ilt_client->start,
8662 ilt_client->end,
8663 ilt_client->page_size,
8664 ilt_client->flags,
8665 ilog2(ilt_client->page_size >> 12));
8666 }
8667
8668 if (CNIC_SUPPORT(bp)) {
8669
8670 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8671 ilt_client->client_num = ILT_CLIENT_SRC;
8672 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8673 ilt_client->flags = 0;
8674 ilt_client->start = line;
8675 line += SRC_ILT_LINES;
8676 ilt_client->end = line - 1;
8677
8678 DP(NETIF_MSG_IFUP,
8679 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8680 ilt_client->start,
8681 ilt_client->end,
8682 ilt_client->page_size,
8683 ilt_client->flags,
8684 ilog2(ilt_client->page_size >> 12));
8685
8686
8687 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8688 ilt_client->client_num = ILT_CLIENT_TM;
8689 ilt_client->page_size = TM_ILT_PAGE_SZ;
8690 ilt_client->flags = 0;
8691 ilt_client->start = line;
8692 line += TM_ILT_LINES;
8693 ilt_client->end = line - 1;
8694
8695 DP(NETIF_MSG_IFUP,
8696 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8697 ilt_client->start,
8698 ilt_client->end,
8699 ilt_client->page_size,
8700 ilt_client->flags,
8701 ilog2(ilt_client->page_size >> 12));
8702 }
8703
8704 BUG_ON(line > ILT_MAX_LINES);
8705 }
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
8716
8717
8718 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8719 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8720 {
8721 u8 cos;
8722 int cxt_index, cxt_offset;
8723
8724
8725 if (!IS_FCOE_FP(fp)) {
8726 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8727 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8728
8729
8730
8731
8732 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8733 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8734
8735
8736 init_params->rx.hc_rate = bp->rx_ticks ?
8737 (1000000 / bp->rx_ticks) : 0;
8738 init_params->tx.hc_rate = bp->tx_ticks ?
8739 (1000000 / bp->tx_ticks) : 0;
8740
8741
8742 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8743 fp->fw_sb_id;
8744
8745
8746
8747
8748
8749 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8750 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8751 }
8752
8753
8754 init_params->max_cos = fp->max_cos;
8755
8756 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8757 fp->index, init_params->max_cos);
8758
8759
8760 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8761 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8762 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8763 ILT_PAGE_CIDS);
8764 init_params->cxts[cos] =
8765 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8766 }
8767 }
8768
8769 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8770 struct bnx2x_queue_state_params *q_params,
8771 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8772 int tx_index, bool leading)
8773 {
8774 memset(tx_only_params, 0, sizeof(*tx_only_params));
8775
8776
8777 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8778
8779
8780 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8781
8782
8783 tx_only_params->cid_index = tx_index;
8784
8785
8786 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8787
8788
8789 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8790
8791 DP(NETIF_MSG_IFUP,
8792 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8793 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8794 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8795 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8796
8797
8798 return bnx2x_queue_state_change(bp, q_params);
8799 }
8800
8801
8802
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8813 bool leading)
8814 {
8815 struct bnx2x_queue_state_params q_params = {NULL};
8816 struct bnx2x_queue_setup_params *setup_params =
8817 &q_params.params.setup;
8818 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8819 &q_params.params.tx_only;
8820 int rc;
8821 u8 tx_index;
8822
8823 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8824
8825
8826 if (!IS_FCOE_FP(fp))
8827 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8828 IGU_INT_ENABLE, 0);
8829
8830 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8831
8832 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8833
8834
8835 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8836
8837
8838 q_params.cmd = BNX2X_Q_CMD_INIT;
8839
8840
8841 rc = bnx2x_queue_state_change(bp, &q_params);
8842 if (rc) {
8843 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8844 return rc;
8845 }
8846
8847 DP(NETIF_MSG_IFUP, "init complete\n");
8848
8849
8850 memset(setup_params, 0, sizeof(*setup_params));
8851
8852
8853 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8854
8855
8856 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8857 FIRST_TX_COS_INDEX);
8858
8859 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8860 &setup_params->rxq_params);
8861
8862 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8863 FIRST_TX_COS_INDEX);
8864
8865
8866 q_params.cmd = BNX2X_Q_CMD_SETUP;
8867
8868 if (IS_FCOE_FP(fp))
8869 bp->fcoe_init = true;
8870
8871
8872 rc = bnx2x_queue_state_change(bp, &q_params);
8873 if (rc) {
8874 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8875 return rc;
8876 }
8877
8878
8879 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8880 tx_index < fp->max_cos;
8881 tx_index++) {
8882
8883
8884 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8885 tx_only_params, tx_index, leading);
8886 if (rc) {
8887 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8888 fp->index, tx_index);
8889 return rc;
8890 }
8891 }
8892
8893 return rc;
8894 }
8895
8896 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8897 {
8898 struct bnx2x_fastpath *fp = &bp->fp[index];
8899 struct bnx2x_fp_txdata *txdata;
8900 struct bnx2x_queue_state_params q_params = {NULL};
8901 int rc, tx_index;
8902
8903 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8904
8905 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8906
8907 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8908
8909
8910 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8911 tx_index < fp->max_cos;
8912 tx_index++){
8913
8914
8915 txdata = fp->txdata_ptr[tx_index];
8916
8917 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8918 txdata->txq_index);
8919
8920
8921 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8922 memset(&q_params.params.terminate, 0,
8923 sizeof(q_params.params.terminate));
8924 q_params.params.terminate.cid_index = tx_index;
8925
8926 rc = bnx2x_queue_state_change(bp, &q_params);
8927 if (rc)
8928 return rc;
8929
8930
8931 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8932 memset(&q_params.params.cfc_del, 0,
8933 sizeof(q_params.params.cfc_del));
8934 q_params.params.cfc_del.cid_index = tx_index;
8935 rc = bnx2x_queue_state_change(bp, &q_params);
8936 if (rc)
8937 return rc;
8938 }
8939
8940
8941 q_params.cmd = BNX2X_Q_CMD_HALT;
8942 rc = bnx2x_queue_state_change(bp, &q_params);
8943 if (rc)
8944 return rc;
8945
8946
8947 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8948 memset(&q_params.params.terminate, 0,
8949 sizeof(q_params.params.terminate));
8950 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8951 rc = bnx2x_queue_state_change(bp, &q_params);
8952 if (rc)
8953 return rc;
8954
8955 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8956 memset(&q_params.params.cfc_del, 0,
8957 sizeof(q_params.params.cfc_del));
8958 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8959 return bnx2x_queue_state_change(bp, &q_params);
8960 }
8961
8962 static void bnx2x_reset_func(struct bnx2x *bp)
8963 {
8964 int port = BP_PORT(bp);
8965 int func = BP_FUNC(bp);
8966 int i;
8967
8968
8969 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8970 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8971 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8972 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8973
8974
8975 for_each_eth_queue(bp, i) {
8976 struct bnx2x_fastpath *fp = &bp->fp[i];
8977 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8978 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8979 SB_DISABLED);
8980 }
8981
8982 if (CNIC_LOADED(bp))
8983
8984 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8985 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8986 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8987
8988
8989 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8990 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8991 SB_DISABLED);
8992
8993 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8994 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8995 0);
8996
8997
8998 if (bp->common.int_block == INT_BLOCK_HC) {
8999 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9000 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9001 } else {
9002 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9003 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9004 }
9005
9006 if (CNIC_LOADED(bp)) {
9007
9008 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9009
9010
9011
9012
9013 for (i = 0; i < 200; i++) {
9014 usleep_range(10000, 20000);
9015 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9016 break;
9017 }
9018 }
9019
9020 bnx2x_clear_func_ilt(bp, func);
9021
9022
9023
9024
9025 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9026 struct ilt_client_info ilt_cli;
9027
9028 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9029 ilt_cli.start = 0;
9030 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9031 ilt_cli.client_num = ILT_CLIENT_TM;
9032
9033 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9034 }
9035
9036
9037 if (!CHIP_IS_E1x(bp))
9038 bnx2x_pf_disable(bp);
9039
9040 bp->dmae_ready = 0;
9041 }
9042
9043 static void bnx2x_reset_port(struct bnx2x *bp)
9044 {
9045 int port = BP_PORT(bp);
9046 u32 val;
9047
9048
9049 bnx2x__link_reset(bp);
9050
9051 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9052
9053
9054 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9055
9056 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9057 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9058
9059
9060 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9061
9062 msleep(100);
9063
9064 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9065 if (val)
9066 DP(NETIF_MSG_IFDOWN,
9067 "BRB1 is not empty %d blocks are occupied\n", val);
9068
9069
9070 }
9071
9072 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9073 {
9074 struct bnx2x_func_state_params func_params = {NULL};
9075
9076
9077 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9078
9079 func_params.f_obj = &bp->func_obj;
9080 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9081
9082 func_params.params.hw_init.load_phase = load_code;
9083
9084 return bnx2x_func_state_change(bp, &func_params);
9085 }
9086
9087 static int bnx2x_func_stop(struct bnx2x *bp)
9088 {
9089 struct bnx2x_func_state_params func_params = {NULL};
9090 int rc;
9091
9092
9093 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9094 func_params.f_obj = &bp->func_obj;
9095 func_params.cmd = BNX2X_F_CMD_STOP;
9096
9097
9098
9099
9100
9101
9102
9103 rc = bnx2x_func_state_change(bp, &func_params);
9104 if (rc) {
9105 #ifdef BNX2X_STOP_ON_ERROR
9106 return rc;
9107 #else
9108 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9109 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9110 return bnx2x_func_state_change(bp, &func_params);
9111 #endif
9112 }
9113
9114 return 0;
9115 }
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9126 {
9127 u32 reset_code = 0;
9128 int port = BP_PORT(bp);
9129
9130
9131 if (unload_mode == UNLOAD_NORMAL)
9132 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9133
9134 else if (bp->flags & NO_WOL_FLAG)
9135 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9136
9137 else if (bp->wol) {
9138 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9139 u8 *mac_addr = bp->dev->dev_addr;
9140 struct pci_dev *pdev = bp->pdev;
9141 u32 val;
9142 u16 pmc;
9143
9144
9145
9146
9147 u8 entry = (BP_VN(bp) + 1)*8;
9148
9149 val = (mac_addr[0] << 8) | mac_addr[1];
9150 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9151
9152 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9153 (mac_addr[4] << 8) | mac_addr[5];
9154 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9155
9156
9157 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9158 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9159 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9160
9161 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9162
9163 } else
9164 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9165
9166
9167 if (!BP_NOMCP(bp))
9168 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9169 else {
9170 int path = BP_PATH(bp);
9171
9172 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9173 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9174 bnx2x_load_count[path][2]);
9175 bnx2x_load_count[path][0]--;
9176 bnx2x_load_count[path][1 + port]--;
9177 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9178 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9179 bnx2x_load_count[path][2]);
9180 if (bnx2x_load_count[path][0] == 0)
9181 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9182 else if (bnx2x_load_count[path][1 + port] == 0)
9183 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9184 else
9185 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9186 }
9187
9188 return reset_code;
9189 }
9190
9191
9192
9193
9194
9195
9196
9197 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9198 {
9199 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9200
9201
9202 if (!BP_NOMCP(bp))
9203 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9204 }
9205
9206 static int bnx2x_func_wait_started(struct bnx2x *bp)
9207 {
9208 int tout = 50;
9209 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9210
9211 if (!bp->port.pmf)
9212 return 0;
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229 if (msix)
9230 synchronize_irq(bp->msix_table[0].vector);
9231 else
9232 synchronize_irq(bp->pdev->irq);
9233
9234 flush_workqueue(bnx2x_wq);
9235 flush_workqueue(bnx2x_iov_wq);
9236
9237 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9238 BNX2X_F_STATE_STARTED && tout--)
9239 msleep(20);
9240
9241 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9242 BNX2X_F_STATE_STARTED) {
9243 #ifdef BNX2X_STOP_ON_ERROR
9244 BNX2X_ERR("Wrong function state\n");
9245 return -EBUSY;
9246 #else
9247
9248
9249
9250
9251 struct bnx2x_func_state_params func_params = {NULL};
9252
9253 DP(NETIF_MSG_IFDOWN,
9254 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9255
9256 func_params.f_obj = &bp->func_obj;
9257 __set_bit(RAMROD_DRV_CLR_ONLY,
9258 &func_params.ramrod_flags);
9259
9260
9261 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9262 bnx2x_func_state_change(bp, &func_params);
9263
9264
9265 func_params.cmd = BNX2X_F_CMD_TX_START;
9266 return bnx2x_func_state_change(bp, &func_params);
9267 #endif
9268 }
9269
9270 return 0;
9271 }
9272
9273 static void bnx2x_disable_ptp(struct bnx2x *bp)
9274 {
9275 int port = BP_PORT(bp);
9276
9277
9278 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9279 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9280
9281
9282 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9283 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9284 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9285 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9286 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9287 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9288 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9289 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9290
9291
9292 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9293 NIG_REG_P0_PTP_EN, 0x0);
9294 }
9295
9296
9297 static void bnx2x_stop_ptp(struct bnx2x *bp)
9298 {
9299
9300
9301
9302 cancel_work_sync(&bp->ptp_task);
9303
9304 if (bp->ptp_tx_skb) {
9305 dev_kfree_skb_any(bp->ptp_tx_skb);
9306 bp->ptp_tx_skb = NULL;
9307 }
9308
9309
9310 bnx2x_disable_ptp(bp);
9311
9312 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9313 }
9314
9315 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9316 {
9317 int port = BP_PORT(bp);
9318 int i, rc = 0;
9319 u8 cos;
9320 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9321 u32 reset_code;
9322
9323
9324 for_each_tx_queue(bp, i) {
9325 struct bnx2x_fastpath *fp = &bp->fp[i];
9326
9327 for_each_cos_in_tx_queue(fp, cos)
9328 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9329 #ifdef BNX2X_STOP_ON_ERROR
9330 if (rc)
9331 return;
9332 #endif
9333 }
9334
9335
9336 usleep_range(1000, 2000);
9337
9338
9339 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9340 false);
9341 if (rc < 0)
9342 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9343
9344
9345 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9346 true);
9347 if (rc < 0)
9348 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9349 rc);
9350
9351
9352
9353
9354
9355 if (!CHIP_IS_E1x(bp)) {
9356
9357 rc = bnx2x_del_all_vlans(bp);
9358 if (rc < 0)
9359 BNX2X_ERR("Failed to delete all VLANs\n");
9360 }
9361
9362
9363 if (!CHIP_IS_E1(bp))
9364 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9365
9366
9367
9368
9369
9370 netif_addr_lock_bh(bp->dev);
9371
9372 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9373 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9374 else if (bp->slowpath)
9375 bnx2x_set_storm_rx_mode(bp);
9376
9377
9378 rparam.mcast_obj = &bp->mcast_obj;
9379 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9380 if (rc < 0)
9381 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9382
9383 netif_addr_unlock_bh(bp->dev);
9384
9385 bnx2x_iov_chip_cleanup(bp);
9386
9387
9388
9389
9390
9391
9392 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9393
9394
9395
9396
9397
9398 rc = bnx2x_func_wait_started(bp);
9399 if (rc) {
9400 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9401 #ifdef BNX2X_STOP_ON_ERROR
9402 return;
9403 #endif
9404 }
9405
9406
9407
9408
9409 for_each_eth_queue(bp, i)
9410 if (bnx2x_stop_queue(bp, i))
9411 #ifdef BNX2X_STOP_ON_ERROR
9412 return;
9413 #else
9414 goto unload_error;
9415 #endif
9416
9417 if (CNIC_LOADED(bp)) {
9418 for_each_cnic_queue(bp, i)
9419 if (bnx2x_stop_queue(bp, i))
9420 #ifdef BNX2X_STOP_ON_ERROR
9421 return;
9422 #else
9423 goto unload_error;
9424 #endif
9425 }
9426
9427
9428
9429
9430 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9431 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9432
9433 #ifndef BNX2X_STOP_ON_ERROR
9434 unload_error:
9435 #endif
9436 rc = bnx2x_func_stop(bp);
9437 if (rc) {
9438 BNX2X_ERR("Function stop failed!\n");
9439 #ifdef BNX2X_STOP_ON_ERROR
9440 return;
9441 #endif
9442 }
9443
9444
9445
9446
9447
9448
9449 if (bp->flags & PTP_SUPPORTED) {
9450 bnx2x_stop_ptp(bp);
9451 if (bp->ptp_clock) {
9452 ptp_clock_unregister(bp->ptp_clock);
9453 bp->ptp_clock = NULL;
9454 }
9455 }
9456
9457
9458 bnx2x_netif_stop(bp, 1);
9459
9460 bnx2x_del_all_napi(bp);
9461 if (CNIC_LOADED(bp))
9462 bnx2x_del_all_napi_cnic(bp);
9463
9464
9465 bnx2x_free_irq(bp);
9466
9467
9468
9469
9470
9471
9472 if (!pci_channel_offline(bp->pdev)) {
9473 rc = bnx2x_reset_hw(bp, reset_code);
9474 if (rc)
9475 BNX2X_ERR("HW_RESET failed\n");
9476 }
9477
9478
9479 bnx2x_send_unload_done(bp, keep_link);
9480 }
9481
9482 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9483 {
9484 u32 val;
9485
9486 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9487
9488 if (CHIP_IS_E1(bp)) {
9489 int port = BP_PORT(bp);
9490 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9491 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9492
9493 val = REG_RD(bp, addr);
9494 val &= ~(0x300);
9495 REG_WR(bp, addr, val);
9496 } else {
9497 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9498 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9499 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9500 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9501 }
9502 }
9503
9504
9505 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9506 {
9507 u32 val;
9508
9509
9510 if (!CHIP_IS_E1(bp)) {
9511
9512 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9513
9514 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9515 }
9516
9517
9518 if (CHIP_IS_E1x(bp)) {
9519
9520 val = REG_RD(bp, HC_REG_CONFIG_1);
9521 REG_WR(bp, HC_REG_CONFIG_1,
9522 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9523 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9524
9525 val = REG_RD(bp, HC_REG_CONFIG_0);
9526 REG_WR(bp, HC_REG_CONFIG_0,
9527 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9528 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9529 } else {
9530
9531 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9532
9533 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9534 (!close) ?
9535 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9536 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9537 }
9538
9539 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9540 close ? "closing" : "opening");
9541 }
9542
9543 #define SHARED_MF_CLP_MAGIC 0x80000000
9544
9545 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9546 {
9547
9548 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9549 *magic_val = val & SHARED_MF_CLP_MAGIC;
9550 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9551 }
9552
9553
9554
9555
9556
9557
9558
9559 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9560 {
9561
9562 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9563 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9564 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9565 }
9566
9567
9568
9569
9570
9571
9572
9573
9574
9575 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9576 {
9577 u32 shmem;
9578 u32 validity_offset;
9579
9580 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9581
9582
9583 if (!CHIP_IS_E1(bp))
9584 bnx2x_clp_reset_prep(bp, magic_val);
9585
9586
9587 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9588 validity_offset =
9589 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9590
9591
9592 if (shmem > 0)
9593 REG_WR(bp, shmem + validity_offset, 0);
9594 }
9595
9596 #define MCP_TIMEOUT 5000
9597 #define MCP_ONE_TIMEOUT 100
9598
9599
9600
9601
9602
9603
9604 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9605 {
9606
9607
9608 if (CHIP_REV_IS_SLOW(bp))
9609 msleep(MCP_ONE_TIMEOUT*10);
9610 else
9611 msleep(MCP_ONE_TIMEOUT);
9612 }
9613
9614
9615
9616
9617 static int bnx2x_init_shmem(struct bnx2x *bp)
9618 {
9619 int cnt = 0;
9620 u32 val = 0;
9621
9622 do {
9623 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9624
9625
9626
9627
9628 if (bp->common.shmem_base == 0xFFFFFFFF) {
9629 bp->flags |= NO_MCP_FLAG;
9630 return -ENODEV;
9631 }
9632
9633 if (bp->common.shmem_base) {
9634 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9635 if (val & SHR_MEM_VALIDITY_MB)
9636 return 0;
9637 }
9638
9639 bnx2x_mcp_wait_one(bp);
9640
9641 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9642
9643 BNX2X_ERR("BAD MCP validity signature\n");
9644
9645 return -ENODEV;
9646 }
9647
9648 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9649 {
9650 int rc = bnx2x_init_shmem(bp);
9651
9652
9653 if (!CHIP_IS_E1(bp))
9654 bnx2x_clp_reset_done(bp, magic_val);
9655
9656 return rc;
9657 }
9658
9659 static void bnx2x_pxp_prep(struct bnx2x *bp)
9660 {
9661 if (!CHIP_IS_E1(bp)) {
9662 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9663 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9664 }
9665 }
9666
9667
9668
9669
9670
9671
9672
9673
9674
9675
9676
9677 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9678 {
9679 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9680 u32 global_bits2, stay_reset2;
9681
9682
9683
9684
9685
9686 global_bits2 =
9687 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9688 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9689
9690
9691
9692
9693
9694
9695 not_reset_mask1 =
9696 MISC_REGISTERS_RESET_REG_1_RST_HC |
9697 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9698 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9699
9700 not_reset_mask2 =
9701 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9702 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9703 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9704 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9705 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9706 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9707 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9708 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9709 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9710 MISC_REGISTERS_RESET_REG_2_PGLC |
9711 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9712 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9713 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9714 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9715 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9716 MISC_REGISTERS_RESET_REG_2_UMAC1;
9717
9718
9719
9720
9721
9722 stay_reset2 =
9723 MISC_REGISTERS_RESET_REG_2_XMAC |
9724 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9725
9726
9727 reset_mask1 = 0xffffffff;
9728
9729 if (CHIP_IS_E1(bp))
9730 reset_mask2 = 0xffff;
9731 else if (CHIP_IS_E1H(bp))
9732 reset_mask2 = 0x1ffff;
9733 else if (CHIP_IS_E2(bp))
9734 reset_mask2 = 0xfffff;
9735 else
9736 reset_mask2 = 0x3ffffff;
9737
9738
9739 if (!global)
9740 reset_mask2 &= ~global_bits2;
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9757 reset_mask2 & (~not_reset_mask2));
9758
9759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9760 reset_mask1 & (~not_reset_mask1));
9761
9762 barrier();
9763
9764 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9765 reset_mask2 & (~stay_reset2));
9766
9767 barrier();
9768
9769 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9770 }
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9782 {
9783 u32 cnt = 1000;
9784 u32 pend_bits = 0;
9785
9786 do {
9787 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9788
9789 if (pend_bits == 0)
9790 break;
9791
9792 usleep_range(1000, 2000);
9793 } while (cnt-- > 0);
9794
9795 if (cnt <= 0) {
9796 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9797 pend_bits);
9798 return -EBUSY;
9799 }
9800
9801 return 0;
9802 }
9803
9804 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9805 {
9806 int cnt = 1000;
9807 u32 val = 0;
9808 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9809 u32 tags_63_32 = 0;
9810
9811
9812 do {
9813 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9814 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9815 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9816 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9817 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9818 if (CHIP_IS_E3(bp))
9819 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9820
9821 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9822 ((port_is_idle_0 & 0x1) == 0x1) &&
9823 ((port_is_idle_1 & 0x1) == 0x1) &&
9824 (pgl_exp_rom2 == 0xffffffff) &&
9825 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9826 break;
9827 usleep_range(1000, 2000);
9828 } while (cnt-- > 0);
9829
9830 if (cnt <= 0) {
9831 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9832 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9833 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9834 pgl_exp_rom2);
9835 return -EAGAIN;
9836 }
9837
9838 barrier();
9839
9840
9841 bnx2x_set_234_gates(bp, true);
9842
9843
9844 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9845 return -EAGAIN;
9846
9847
9848
9849
9850 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9851 barrier();
9852
9853
9854
9855
9856 usleep_range(1000, 2000);
9857
9858
9859
9860 if (global)
9861 bnx2x_reset_mcp_prep(bp, &val);
9862
9863
9864 bnx2x_pxp_prep(bp);
9865 barrier();
9866
9867
9868 bnx2x_process_kill_chip_reset(bp, global);
9869 barrier();
9870
9871
9872 if (!CHIP_IS_E1x(bp))
9873 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9874
9875
9876
9877 if (global && bnx2x_reset_mcp_comp(bp, val))
9878 return -EAGAIN;
9879
9880
9881
9882
9883 bnx2x_set_234_gates(bp, false);
9884
9885
9886
9887
9888 return 0;
9889 }
9890
9891 static int bnx2x_leader_reset(struct bnx2x *bp)
9892 {
9893 int rc = 0;
9894 bool global = bnx2x_reset_is_global(bp);
9895 u32 load_code;
9896
9897
9898
9899
9900 if (!global && !BP_NOMCP(bp)) {
9901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9902 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9903 if (!load_code) {
9904 BNX2X_ERR("MCP response failure, aborting\n");
9905 rc = -EAGAIN;
9906 goto exit_leader_reset;
9907 }
9908 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9909 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9910 BNX2X_ERR("MCP unexpected resp, aborting\n");
9911 rc = -EAGAIN;
9912 goto exit_leader_reset2;
9913 }
9914 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9915 if (!load_code) {
9916 BNX2X_ERR("MCP response failure, aborting\n");
9917 rc = -EAGAIN;
9918 goto exit_leader_reset2;
9919 }
9920 }
9921
9922
9923 if (bnx2x_process_kill(bp, global)) {
9924 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9925 BP_PATH(bp));
9926 rc = -EAGAIN;
9927 goto exit_leader_reset2;
9928 }
9929
9930
9931
9932
9933
9934 bnx2x_set_reset_done(bp);
9935 if (global)
9936 bnx2x_clear_reset_global(bp);
9937
9938 exit_leader_reset2:
9939
9940 if (!global && !BP_NOMCP(bp)) {
9941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9942 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9943 }
9944 exit_leader_reset:
9945 bp->is_leader = 0;
9946 bnx2x_release_leader_lock(bp);
9947 smp_mb();
9948 return rc;
9949 }
9950
9951 static void bnx2x_recovery_failed(struct bnx2x *bp)
9952 {
9953 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9954
9955
9956 netif_device_detach(bp->dev);
9957
9958
9959
9960
9961
9962 bnx2x_set_reset_in_progress(bp);
9963
9964
9965 bnx2x_set_power_state(bp, PCI_D3hot);
9966
9967 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9968
9969 smp_mb();
9970 }
9971
9972
9973
9974
9975
9976
9977 static void bnx2x_parity_recover(struct bnx2x *bp)
9978 {
9979 u32 error_recovered, error_unrecovered;
9980 bool is_parity, global = false;
9981 #ifdef CONFIG_BNX2X_SRIOV
9982 int vf_idx;
9983
9984 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
9985 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
9986
9987 if (vf)
9988 vf->state = VF_LOST;
9989 }
9990 #endif
9991 DP(NETIF_MSG_HW, "Handling parity\n");
9992 while (1) {
9993 switch (bp->recovery_state) {
9994 case BNX2X_RECOVERY_INIT:
9995 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9996 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9997 WARN_ON(!is_parity);
9998
9999
10000 if (bnx2x_trylock_leader_lock(bp)) {
10001 bnx2x_set_reset_in_progress(bp);
10002
10003
10004
10005
10006
10007
10008 if (global)
10009 bnx2x_set_reset_global(bp);
10010
10011 bp->is_leader = 1;
10012 }
10013
10014
10015
10016 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10017 return;
10018
10019 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10020
10021
10022
10023
10024
10025 smp_mb();
10026 break;
10027
10028 case BNX2X_RECOVERY_WAIT:
10029 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10030 if (bp->is_leader) {
10031 int other_engine = BP_PATH(bp) ? 0 : 1;
10032 bool other_load_status =
10033 bnx2x_get_load_status(bp, other_engine);
10034 bool load_status =
10035 bnx2x_get_load_status(bp, BP_PATH(bp));
10036 global = bnx2x_reset_is_global(bp);
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046 if (load_status ||
10047 (global && other_load_status)) {
10048
10049
10050
10051 schedule_delayed_work(&bp->sp_rtnl_task,
10052 HZ/10);
10053 return;
10054 } else {
10055
10056
10057
10058
10059
10060 if (bnx2x_leader_reset(bp)) {
10061 bnx2x_recovery_failed(bp);
10062 return;
10063 }
10064
10065
10066
10067
10068
10069
10070 break;
10071 }
10072 } else {
10073 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10074
10075
10076
10077
10078
10079
10080 if (bnx2x_trylock_leader_lock(bp)) {
10081
10082
10083
10084 bp->is_leader = 1;
10085 break;
10086 }
10087
10088 schedule_delayed_work(&bp->sp_rtnl_task,
10089 HZ/10);
10090 return;
10091
10092 } else {
10093
10094
10095
10096
10097 if (bnx2x_reset_is_global(bp)) {
10098 schedule_delayed_work(
10099 &bp->sp_rtnl_task,
10100 HZ/10);
10101 return;
10102 }
10103
10104 error_recovered =
10105 bp->eth_stats.recoverable_error;
10106 error_unrecovered =
10107 bp->eth_stats.unrecoverable_error;
10108 bp->recovery_state =
10109 BNX2X_RECOVERY_NIC_LOADING;
10110 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10111 error_unrecovered++;
10112 netdev_err(bp->dev,
10113 "Recovery failed. Power cycle needed\n");
10114
10115 netif_device_detach(bp->dev);
10116
10117 bnx2x_set_power_state(
10118 bp, PCI_D3hot);
10119 smp_mb();
10120 } else {
10121 bp->recovery_state =
10122 BNX2X_RECOVERY_DONE;
10123 error_recovered++;
10124 smp_mb();
10125 }
10126 bp->eth_stats.recoverable_error =
10127 error_recovered;
10128 bp->eth_stats.unrecoverable_error =
10129 error_unrecovered;
10130
10131 return;
10132 }
10133 }
10134 default:
10135 return;
10136 }
10137 }
10138 }
10139
10140 static int bnx2x_udp_port_update(struct bnx2x *bp)
10141 {
10142 struct bnx2x_func_switch_update_params *switch_update_params;
10143 struct bnx2x_func_state_params func_params = {NULL};
10144 struct bnx2x_udp_tunnel *udp_tunnel;
10145 u16 vxlan_port = 0, geneve_port = 0;
10146 int rc;
10147
10148 switch_update_params = &func_params.params.switch_update;
10149
10150
10151 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10152 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10153
10154 func_params.f_obj = &bp->func_obj;
10155 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10156
10157
10158 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10159 &switch_update_params->changes);
10160
10161 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10162 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10163 geneve_port = udp_tunnel->dst_port;
10164 switch_update_params->geneve_dst_port = geneve_port;
10165 }
10166
10167 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10168 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10169 vxlan_port = udp_tunnel->dst_port;
10170 switch_update_params->vxlan_dst_port = vxlan_port;
10171 }
10172
10173
10174 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10175 &switch_update_params->changes);
10176
10177 rc = bnx2x_func_state_change(bp, &func_params);
10178 if (rc)
10179 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10180 vxlan_port, geneve_port, rc);
10181 else
10182 DP(BNX2X_MSG_SP,
10183 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10184 vxlan_port, geneve_port);
10185
10186 return rc;
10187 }
10188
10189 static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10190 enum bnx2x_udp_port_type type)
10191 {
10192 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10193
10194 if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
10195 return;
10196
10197 if (udp_port->count && udp_port->dst_port == port) {
10198 udp_port->count++;
10199 return;
10200 }
10201
10202 if (udp_port->count) {
10203 DP(BNX2X_MSG_SP,
10204 "UDP tunnel [%d] - destination port limit reached\n",
10205 type);
10206 return;
10207 }
10208
10209 udp_port->dst_port = port;
10210 udp_port->count = 1;
10211 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10212 }
10213
10214 static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10215 enum bnx2x_udp_port_type type)
10216 {
10217 struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10218
10219 if (!IS_PF(bp) || CHIP_IS_E1x(bp))
10220 return;
10221
10222 if (!udp_port->count || udp_port->dst_port != port) {
10223 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10224 type);
10225 return;
10226 }
10227
10228
10229 udp_port->count--;
10230 if (udp_port->count)
10231 return;
10232 udp_port->dst_port = 0;
10233
10234 if (netif_running(bp->dev))
10235 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10236 else
10237 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10238 type, port);
10239 }
10240
10241 static void bnx2x_udp_tunnel_add(struct net_device *netdev,
10242 struct udp_tunnel_info *ti)
10243 {
10244 struct bnx2x *bp = netdev_priv(netdev);
10245 u16 t_port = ntohs(ti->port);
10246
10247 switch (ti->type) {
10248 case UDP_TUNNEL_TYPE_VXLAN:
10249 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10250 break;
10251 case UDP_TUNNEL_TYPE_GENEVE:
10252 __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10253 break;
10254 default:
10255 break;
10256 }
10257 }
10258
10259 static void bnx2x_udp_tunnel_del(struct net_device *netdev,
10260 struct udp_tunnel_info *ti)
10261 {
10262 struct bnx2x *bp = netdev_priv(netdev);
10263 u16 t_port = ntohs(ti->port);
10264
10265 switch (ti->type) {
10266 case UDP_TUNNEL_TYPE_VXLAN:
10267 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10268 break;
10269 case UDP_TUNNEL_TYPE_GENEVE:
10270 __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10271 break;
10272 default:
10273 break;
10274 }
10275 }
10276
10277 static int bnx2x_close(struct net_device *dev);
10278
10279
10280
10281
10282 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10283 {
10284 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10285
10286 rtnl_lock();
10287
10288 if (!netif_running(bp->dev)) {
10289 rtnl_unlock();
10290 return;
10291 }
10292
10293 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10294 #ifdef BNX2X_STOP_ON_ERROR
10295 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10296 "you will need to reboot when done\n");
10297 goto sp_rtnl_not_reset;
10298 #endif
10299
10300
10301
10302
10303 bp->sp_rtnl_state = 0;
10304 smp_mb();
10305
10306 bnx2x_parity_recover(bp);
10307
10308 rtnl_unlock();
10309 return;
10310 }
10311
10312 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10313 #ifdef BNX2X_STOP_ON_ERROR
10314 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10315 "you will need to reboot when done\n");
10316 goto sp_rtnl_not_reset;
10317 #endif
10318
10319
10320
10321
10322
10323 bp->sp_rtnl_state = 0;
10324 smp_mb();
10325
10326
10327 bp->link_vars.link_up = 0;
10328 bp->force_link_down = true;
10329 netif_carrier_off(bp->dev);
10330 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10331
10332 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10333
10334
10335
10336
10337 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10338 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10339 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10340 BNX2X_ERR("Open the NIC fails again!\n");
10341 }
10342 rtnl_unlock();
10343 return;
10344 }
10345 #ifdef BNX2X_STOP_ON_ERROR
10346 sp_rtnl_not_reset:
10347 #endif
10348 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10349 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10350 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10351 bnx2x_after_function_update(bp);
10352
10353
10354
10355
10356
10357 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10358 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10359 netif_device_detach(bp->dev);
10360 bnx2x_close(bp->dev);
10361 rtnl_unlock();
10362 return;
10363 }
10364
10365 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10366 DP(BNX2X_MSG_SP,
10367 "sending set mcast vf pf channel message from rtnl sp-task\n");
10368 bnx2x_vfpf_set_mcast(bp->dev);
10369 }
10370 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10371 &bp->sp_rtnl_state)){
10372 if (netif_carrier_ok(bp->dev)) {
10373 bnx2x_tx_disable(bp);
10374 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10375 }
10376 }
10377
10378 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10379 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10380 bnx2x_set_rx_mode_inner(bp);
10381 }
10382
10383 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10384 &bp->sp_rtnl_state))
10385 bnx2x_pf_set_vfs_vlan(bp);
10386
10387 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10388 bnx2x_dcbx_stop_hw_tx(bp);
10389 bnx2x_dcbx_resume_hw_tx(bp);
10390 }
10391
10392 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10393 &bp->sp_rtnl_state))
10394 bnx2x_update_mng_version(bp);
10395
10396 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10397 bnx2x_handle_update_svid_cmd(bp);
10398
10399 if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10400 &bp->sp_rtnl_state)) {
10401 if (bnx2x_udp_port_update(bp)) {
10402
10403 memset(bp->udp_tunnel_ports, 0,
10404 sizeof(struct bnx2x_udp_tunnel) *
10405 BNX2X_UDP_PORT_MAX);
10406 } else {
10407
10408
10409
10410
10411 if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
10412 !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10413 udp_tunnel_get_rx_info(bp->dev);
10414 }
10415 }
10416
10417
10418
10419
10420 rtnl_unlock();
10421
10422
10423 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10424 &bp->sp_rtnl_state)) {
10425 bnx2x_disable_sriov(bp);
10426 bnx2x_enable_sriov(bp);
10427 }
10428 }
10429
10430 static void bnx2x_period_task(struct work_struct *work)
10431 {
10432 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10433
10434 if (!netif_running(bp->dev))
10435 goto period_task_exit;
10436
10437 if (CHIP_REV_IS_SLOW(bp)) {
10438 BNX2X_ERR("period task called on emulation, ignoring\n");
10439 goto period_task_exit;
10440 }
10441
10442 bnx2x_acquire_phy_lock(bp);
10443
10444
10445
10446
10447
10448 smp_mb();
10449 if (bp->port.pmf) {
10450 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10451
10452
10453 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10454 }
10455
10456 bnx2x_release_phy_lock(bp);
10457 period_task_exit:
10458 return;
10459 }
10460
10461
10462
10463
10464
10465 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10466 {
10467 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10468 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10469 return base + (BP_ABS_FUNC(bp)) * stride;
10470 }
10471
10472 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10473 u8 port, u32 reset_reg,
10474 struct bnx2x_mac_vals *vals)
10475 {
10476 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10477 u32 base_addr;
10478
10479 if (!(mask & reset_reg))
10480 return false;
10481
10482 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10483 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10484 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10485 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10486 REG_WR(bp, vals->umac_addr[port], 0);
10487
10488 return true;
10489 }
10490
10491 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10492 struct bnx2x_mac_vals *vals)
10493 {
10494 u32 val, base_addr, offset, mask, reset_reg;
10495 bool mac_stopped = false;
10496 u8 port = BP_PORT(bp);
10497
10498
10499 memset(vals, 0, sizeof(*vals));
10500
10501 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10502
10503 if (!CHIP_IS_E3(bp)) {
10504 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10505 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10506 if ((mask & reset_reg) && val) {
10507 u32 wb_data[2];
10508 BNX2X_DEV_INFO("Disable bmac Rx\n");
10509 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10510 : NIG_REG_INGRESS_BMAC0_MEM;
10511 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10512 : BIGMAC_REGISTER_BMAC_CONTROL;
10513
10514
10515
10516
10517
10518
10519
10520 wb_data[0] = REG_RD(bp, base_addr + offset);
10521 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10522 vals->bmac_addr = base_addr + offset;
10523 vals->bmac_val[0] = wb_data[0];
10524 vals->bmac_val[1] = wb_data[1];
10525 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10526 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10527 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10528 }
10529 BNX2X_DEV_INFO("Disable emac Rx\n");
10530 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10531 vals->emac_val = REG_RD(bp, vals->emac_addr);
10532 REG_WR(bp, vals->emac_addr, 0);
10533 mac_stopped = true;
10534 } else {
10535 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10536 BNX2X_DEV_INFO("Disable xmac Rx\n");
10537 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10538 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10539 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10540 val & ~(1 << 1));
10541 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10542 val | (1 << 1));
10543 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10544 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10545 REG_WR(bp, vals->xmac_addr, 0);
10546 mac_stopped = true;
10547 }
10548
10549 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10550 reset_reg, vals);
10551 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10552 reset_reg, vals);
10553 }
10554
10555 if (mac_stopped)
10556 msleep(20);
10557 }
10558
10559 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10560 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10561 0x1848 + ((f) << 4))
10562 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10563 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10564 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10565
10566 #define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10567 #define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10568 #define BCM_5710_UNDI_FW_MF_VERS (0x05)
10569
10570 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10571 {
10572
10573
10574
10575 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10576 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10577 return false;
10578
10579 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10580 BNX2X_DEV_INFO("UNDI previously loaded\n");
10581 return true;
10582 }
10583
10584 return false;
10585 }
10586
10587 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10588 {
10589 u16 rcq, bd;
10590 u32 addr, tmp_reg;
10591
10592 if (BP_FUNC(bp) < 2)
10593 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10594 else
10595 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10596
10597 tmp_reg = REG_RD(bp, addr);
10598 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10599 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10600
10601 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10602 REG_WR(bp, addr, tmp_reg);
10603
10604 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10605 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10606 }
10607
10608 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10609 {
10610 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10611 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10612 if (!rc) {
10613 BNX2X_ERR("MCP response failure, aborting\n");
10614 return -EBUSY;
10615 }
10616
10617 return 0;
10618 }
10619
10620 static struct bnx2x_prev_path_list *
10621 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10622 {
10623 struct bnx2x_prev_path_list *tmp_list;
10624
10625 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10626 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10627 bp->pdev->bus->number == tmp_list->bus &&
10628 BP_PATH(bp) == tmp_list->path)
10629 return tmp_list;
10630
10631 return NULL;
10632 }
10633
10634 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10635 {
10636 struct bnx2x_prev_path_list *tmp_list;
10637 int rc;
10638
10639 rc = down_interruptible(&bnx2x_prev_sem);
10640 if (rc) {
10641 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10642 return rc;
10643 }
10644
10645 tmp_list = bnx2x_prev_path_get_entry(bp);
10646 if (tmp_list) {
10647 tmp_list->aer = 1;
10648 rc = 0;
10649 } else {
10650 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10651 BP_PATH(bp));
10652 }
10653
10654 up(&bnx2x_prev_sem);
10655
10656 return rc;
10657 }
10658
10659 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10660 {
10661 struct bnx2x_prev_path_list *tmp_list;
10662 bool rc = false;
10663
10664 if (down_trylock(&bnx2x_prev_sem))
10665 return false;
10666
10667 tmp_list = bnx2x_prev_path_get_entry(bp);
10668 if (tmp_list) {
10669 if (tmp_list->aer) {
10670 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10671 BP_PATH(bp));
10672 } else {
10673 rc = true;
10674 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10675 BP_PATH(bp));
10676 }
10677 }
10678
10679 up(&bnx2x_prev_sem);
10680
10681 return rc;
10682 }
10683
10684 bool bnx2x_port_after_undi(struct bnx2x *bp)
10685 {
10686 struct bnx2x_prev_path_list *entry;
10687 bool val;
10688
10689 down(&bnx2x_prev_sem);
10690
10691 entry = bnx2x_prev_path_get_entry(bp);
10692 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10693
10694 up(&bnx2x_prev_sem);
10695
10696 return val;
10697 }
10698
10699 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10700 {
10701 struct bnx2x_prev_path_list *tmp_list;
10702 int rc;
10703
10704 rc = down_interruptible(&bnx2x_prev_sem);
10705 if (rc) {
10706 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10707 return rc;
10708 }
10709
10710
10711 tmp_list = bnx2x_prev_path_get_entry(bp);
10712 if (tmp_list) {
10713 if (!tmp_list->aer) {
10714 BNX2X_ERR("Re-Marking the path.\n");
10715 } else {
10716 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10717 BP_PATH(bp));
10718 tmp_list->aer = 0;
10719 }
10720 up(&bnx2x_prev_sem);
10721 return 0;
10722 }
10723 up(&bnx2x_prev_sem);
10724
10725
10726 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10727 if (!tmp_list) {
10728 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10729 return -ENOMEM;
10730 }
10731
10732 tmp_list->bus = bp->pdev->bus->number;
10733 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10734 tmp_list->path = BP_PATH(bp);
10735 tmp_list->aer = 0;
10736 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10737
10738 rc = down_interruptible(&bnx2x_prev_sem);
10739 if (rc) {
10740 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10741 kfree(tmp_list);
10742 } else {
10743 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10744 BP_PATH(bp));
10745 list_add(&tmp_list->list, &bnx2x_prev_list);
10746 up(&bnx2x_prev_sem);
10747 }
10748
10749 return rc;
10750 }
10751
10752 static int bnx2x_do_flr(struct bnx2x *bp)
10753 {
10754 struct pci_dev *dev = bp->pdev;
10755
10756 if (CHIP_IS_E1x(bp)) {
10757 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10758 return -EINVAL;
10759 }
10760
10761
10762 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10763 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10764 bp->common.bc_ver);
10765 return -EINVAL;
10766 }
10767
10768 if (!pci_wait_for_pending_transaction(dev))
10769 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10770
10771 BNX2X_DEV_INFO("Initiating FLR\n");
10772 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10773
10774 return 0;
10775 }
10776
10777 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10778 {
10779 int rc;
10780
10781 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10782
10783
10784 if (bnx2x_prev_is_path_marked(bp))
10785 return bnx2x_prev_mcp_done(bp);
10786
10787 BNX2X_DEV_INFO("Path is unmarked\n");
10788
10789
10790 if (bnx2x_prev_is_after_undi(bp))
10791 goto out;
10792
10793
10794
10795
10796
10797 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10798
10799 if (!rc) {
10800
10801 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10802 rc = bnx2x_do_flr(bp);
10803 }
10804
10805 if (!rc) {
10806
10807 BNX2X_DEV_INFO("FLR successful\n");
10808 return 0;
10809 }
10810
10811 BNX2X_DEV_INFO("Could not FLR\n");
10812
10813 out:
10814
10815 rc = bnx2x_prev_mcp_done(bp);
10816 if (!rc)
10817 rc = BNX2X_PREV_WAIT_NEEDED;
10818
10819 return rc;
10820 }
10821
10822 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10823 {
10824 u32 reset_reg, tmp_reg = 0, rc;
10825 bool prev_undi = false;
10826 struct bnx2x_mac_vals mac_vals;
10827
10828
10829
10830
10831
10832 BNX2X_DEV_INFO("Common unload Flow\n");
10833
10834 memset(&mac_vals, 0, sizeof(mac_vals));
10835
10836 if (bnx2x_prev_is_path_marked(bp))
10837 return bnx2x_prev_mcp_done(bp);
10838
10839 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10840
10841
10842 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10843 u32 timer_count = 1000;
10844
10845
10846 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10847
10848
10849 bnx2x_set_rx_filter(&bp->link_params, 0);
10850 bp->link_params.port ^= 1;
10851 bnx2x_set_rx_filter(&bp->link_params, 0);
10852 bp->link_params.port ^= 1;
10853
10854
10855 if (bnx2x_prev_is_after_undi(bp)) {
10856 prev_undi = true;
10857
10858 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10859
10860 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10861 }
10862 if (!CHIP_IS_E1x(bp))
10863
10864 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10865
10866
10867 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10868 while (timer_count) {
10869 u32 prev_brb = tmp_reg;
10870
10871 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10872 if (!tmp_reg)
10873 break;
10874
10875 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10876
10877
10878 if (prev_brb > tmp_reg)
10879 timer_count = 1000;
10880 else
10881 timer_count--;
10882
10883
10884 if (prev_undi)
10885 bnx2x_prev_unload_undi_inc(bp, 1);
10886
10887 udelay(10);
10888 }
10889
10890 if (!timer_count)
10891 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10892 }
10893
10894
10895 bnx2x_reset_common(bp);
10896
10897 if (mac_vals.xmac_addr)
10898 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10899 if (mac_vals.umac_addr[0])
10900 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10901 if (mac_vals.umac_addr[1])
10902 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10903 if (mac_vals.emac_addr)
10904 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10905 if (mac_vals.bmac_addr) {
10906 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10907 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10908 }
10909
10910 rc = bnx2x_prev_mark_path(bp, prev_undi);
10911 if (rc) {
10912 bnx2x_prev_mcp_done(bp);
10913 return rc;
10914 }
10915
10916 return bnx2x_prev_mcp_done(bp);
10917 }
10918
10919 static int bnx2x_prev_unload(struct bnx2x *bp)
10920 {
10921 int time_counter = 10;
10922 u32 rc, fw, hw_lock_reg, hw_lock_val;
10923 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10924
10925
10926
10927
10928 bnx2x_clean_pglue_errors(bp);
10929
10930
10931 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10932 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10933 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10934
10935 hw_lock_val = REG_RD(bp, hw_lock_reg);
10936 if (hw_lock_val) {
10937 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10938 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10939 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10940 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10941 }
10942
10943 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10944 REG_WR(bp, hw_lock_reg, 0xffffffff);
10945 } else
10946 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10947
10948 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10949 BNX2X_DEV_INFO("Release previously held alr\n");
10950 bnx2x_release_alr(bp);
10951 }
10952
10953 do {
10954 int aer = 0;
10955
10956 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10957 if (!fw) {
10958 BNX2X_ERR("MCP response failure, aborting\n");
10959 rc = -EBUSY;
10960 break;
10961 }
10962
10963 rc = down_interruptible(&bnx2x_prev_sem);
10964 if (rc) {
10965 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10966 rc);
10967 } else {
10968
10969 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10970 bnx2x_prev_path_get_entry(bp)->aer);
10971 up(&bnx2x_prev_sem);
10972 }
10973
10974 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10975 rc = bnx2x_prev_unload_common(bp);
10976 break;
10977 }
10978
10979
10980 rc = bnx2x_prev_unload_uncommon(bp);
10981 if (rc != BNX2X_PREV_WAIT_NEEDED)
10982 break;
10983
10984 msleep(20);
10985 } while (--time_counter);
10986
10987 if (!time_counter || rc) {
10988 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10989 rc = -EPROBE_DEFER;
10990 }
10991
10992
10993 if (bnx2x_port_after_undi(bp))
10994 bp->link_params.feature_config_flags |=
10995 FEATURE_CONFIG_BOOT_FROM_SAN;
10996
10997 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10998
10999 return rc;
11000 }
11001
11002 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
11003 {
11004 u32 val, val2, val3, val4, id, boot_mode;
11005 u16 pmc;
11006
11007
11008
11009 val = REG_RD(bp, MISC_REG_CHIP_NUM);
11010 id = ((val & 0xffff) << 16);
11011 val = REG_RD(bp, MISC_REG_CHIP_REV);
11012 id |= ((val & 0xf) << 12);
11013
11014
11015
11016
11017 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
11018 id |= (((val >> 24) & 0xf) << 4);
11019 val = REG_RD(bp, MISC_REG_BOND_ID);
11020 id |= (val & 0xf);
11021 bp->common.chip_id = id;
11022
11023
11024 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
11025 if (CHIP_IS_57810(bp))
11026 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
11027 (bp->common.chip_id & 0x0000FFFF);
11028 else if (CHIP_IS_57810_MF(bp))
11029 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
11030 (bp->common.chip_id & 0x0000FFFF);
11031 bp->common.chip_id |= 0x1;
11032 }
11033
11034
11035 bp->db_size = (1 << BNX2X_DB_SHIFT);
11036
11037 if (!CHIP_IS_E1x(bp)) {
11038 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
11039 if ((val & 1) == 0)
11040 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
11041 else
11042 val = (val >> 1) & 1;
11043 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
11044 "2_PORT_MODE");
11045 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
11046 CHIP_2_PORT_MODE;
11047
11048 if (CHIP_MODE_IS_4_PORT(bp))
11049 bp->pfid = (bp->pf_num >> 1);
11050 else
11051 bp->pfid = (bp->pf_num & 0x6);
11052 } else {
11053 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
11054 bp->pfid = bp->pf_num;
11055 }
11056
11057 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
11058
11059 bp->link_params.chip_id = bp->common.chip_id;
11060 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
11061
11062 val = (REG_RD(bp, 0x2874) & 0x55);
11063 if ((bp->common.chip_id & 0x1) ||
11064 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11065 bp->flags |= ONE_PORT_FLAG;
11066 BNX2X_DEV_INFO("single port device\n");
11067 }
11068
11069 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11070 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11071 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11072 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11073 bp->common.flash_size, bp->common.flash_size);
11074
11075 bnx2x_init_shmem(bp);
11076
11077 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11078 MISC_REG_GENERIC_CR_1 :
11079 MISC_REG_GENERIC_CR_0));
11080
11081 bp->link_params.shmem_base = bp->common.shmem_base;
11082 bp->link_params.shmem2_base = bp->common.shmem2_base;
11083 if (SHMEM2_RD(bp, size) >
11084 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11085 bp->link_params.lfa_base =
11086 REG_RD(bp, bp->common.shmem2_base +
11087 (u32)offsetof(struct shmem2_region,
11088 lfa_host_addr[BP_PORT(bp)]));
11089 else
11090 bp->link_params.lfa_base = 0;
11091 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11092 bp->common.shmem_base, bp->common.shmem2_base);
11093
11094 if (!bp->common.shmem_base) {
11095 BNX2X_DEV_INFO("MCP not active\n");
11096 bp->flags |= NO_MCP_FLAG;
11097 return;
11098 }
11099
11100 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11101 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11102
11103 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11104 SHARED_HW_CFG_LED_MODE_MASK) >>
11105 SHARED_HW_CFG_LED_MODE_SHIFT);
11106
11107 bp->link_params.feature_config_flags = 0;
11108 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11109 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11110 bp->link_params.feature_config_flags |=
11111 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11112 else
11113 bp->link_params.feature_config_flags &=
11114 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11115
11116 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11117 bp->common.bc_ver = val;
11118 BNX2X_DEV_INFO("bc_ver %X\n", val);
11119 if (val < BNX2X_BC_VER) {
11120
11121
11122 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11123 BNX2X_BC_VER, val);
11124 }
11125 bp->link_params.feature_config_flags |=
11126 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11127 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11128
11129 bp->link_params.feature_config_flags |=
11130 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11131 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11132 bp->link_params.feature_config_flags |=
11133 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11134 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11135 bp->link_params.feature_config_flags |=
11136 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11137 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11138
11139 bp->link_params.feature_config_flags |=
11140 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11141 FEATURE_CONFIG_MT_SUPPORT : 0;
11142
11143 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11144 BC_SUPPORTS_PFC_STATS : 0;
11145
11146 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11147 BC_SUPPORTS_FCOE_FEATURES : 0;
11148
11149 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11150 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11151
11152 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11153 BC_SUPPORTS_RMMOD_CMD : 0;
11154
11155 boot_mode = SHMEM_RD(bp,
11156 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11157 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11158 switch (boot_mode) {
11159 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11160 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11161 break;
11162 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11163 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11164 break;
11165 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11166 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11167 break;
11168 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11169 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11170 break;
11171 }
11172
11173 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11174 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11175
11176 BNX2X_DEV_INFO("%sWoL capable\n",
11177 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11178
11179 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11180 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11181 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11182 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11183
11184 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11185 val, val2, val3, val4);
11186 }
11187
11188 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11189 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11190
11191 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11192 {
11193 int pfid = BP_FUNC(bp);
11194 int igu_sb_id;
11195 u32 val;
11196 u8 fid, igu_sb_cnt = 0;
11197
11198 bp->igu_base_sb = 0xff;
11199 if (CHIP_INT_MODE_IS_BC(bp)) {
11200 int vn = BP_VN(bp);
11201 igu_sb_cnt = bp->igu_sb_cnt;
11202 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11203 FP_SB_MAX_E1x;
11204
11205 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11206 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11207
11208 return 0;
11209 }
11210
11211
11212 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11213 igu_sb_id++) {
11214 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11215 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11216 continue;
11217 fid = IGU_FID(val);
11218 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11219 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11220 continue;
11221 if (IGU_VEC(val) == 0)
11222
11223 bp->igu_dsb_id = igu_sb_id;
11224 else {
11225 if (bp->igu_base_sb == 0xff)
11226 bp->igu_base_sb = igu_sb_id;
11227 igu_sb_cnt++;
11228 }
11229 }
11230 }
11231
11232 #ifdef CONFIG_PCI_MSI
11233
11234
11235
11236
11237
11238
11239 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11240 #endif
11241
11242 if (igu_sb_cnt == 0) {
11243 BNX2X_ERR("CAM configuration error\n");
11244 return -EINVAL;
11245 }
11246
11247 return 0;
11248 }
11249
11250 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11251 {
11252 int cfg_size = 0, idx, port = BP_PORT(bp);
11253
11254
11255 bp->port.supported[0] = 0;
11256 bp->port.supported[1] = 0;
11257 switch (bp->link_params.num_phys) {
11258 case 1:
11259 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11260 cfg_size = 1;
11261 break;
11262 case 2:
11263 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11264 cfg_size = 1;
11265 break;
11266 case 3:
11267 if (bp->link_params.multi_phy_config &
11268 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11269 bp->port.supported[1] =
11270 bp->link_params.phy[EXT_PHY1].supported;
11271 bp->port.supported[0] =
11272 bp->link_params.phy[EXT_PHY2].supported;
11273 } else {
11274 bp->port.supported[0] =
11275 bp->link_params.phy[EXT_PHY1].supported;
11276 bp->port.supported[1] =
11277 bp->link_params.phy[EXT_PHY2].supported;
11278 }
11279 cfg_size = 2;
11280 break;
11281 }
11282
11283 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11284 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11285 SHMEM_RD(bp,
11286 dev_info.port_hw_config[port].external_phy_config),
11287 SHMEM_RD(bp,
11288 dev_info.port_hw_config[port].external_phy_config2));
11289 return;
11290 }
11291
11292 if (CHIP_IS_E3(bp))
11293 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11294 else {
11295 switch (switch_cfg) {
11296 case SWITCH_CFG_1G:
11297 bp->port.phy_addr = REG_RD(
11298 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11299 break;
11300 case SWITCH_CFG_10G:
11301 bp->port.phy_addr = REG_RD(
11302 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11303 break;
11304 default:
11305 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11306 bp->port.link_config[0]);
11307 return;
11308 }
11309 }
11310 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11311
11312 for (idx = 0; idx < cfg_size; idx++) {
11313 if (!(bp->link_params.speed_cap_mask[idx] &
11314 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11315 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11316
11317 if (!(bp->link_params.speed_cap_mask[idx] &
11318 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11319 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11320
11321 if (!(bp->link_params.speed_cap_mask[idx] &
11322 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11323 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11324
11325 if (!(bp->link_params.speed_cap_mask[idx] &
11326 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11327 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11328
11329 if (!(bp->link_params.speed_cap_mask[idx] &
11330 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11331 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11332 SUPPORTED_1000baseT_Full);
11333
11334 if (!(bp->link_params.speed_cap_mask[idx] &
11335 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11336 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11337
11338 if (!(bp->link_params.speed_cap_mask[idx] &
11339 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11340 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11341
11342 if (!(bp->link_params.speed_cap_mask[idx] &
11343 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11344 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11345 }
11346
11347 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11348 bp->port.supported[1]);
11349 }
11350
11351 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11352 {
11353 u32 link_config, idx, cfg_size = 0;
11354 bp->port.advertising[0] = 0;
11355 bp->port.advertising[1] = 0;
11356 switch (bp->link_params.num_phys) {
11357 case 1:
11358 case 2:
11359 cfg_size = 1;
11360 break;
11361 case 3:
11362 cfg_size = 2;
11363 break;
11364 }
11365 for (idx = 0; idx < cfg_size; idx++) {
11366 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11367 link_config = bp->port.link_config[idx];
11368 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11369 case PORT_FEATURE_LINK_SPEED_AUTO:
11370 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11371 bp->link_params.req_line_speed[idx] =
11372 SPEED_AUTO_NEG;
11373 bp->port.advertising[idx] |=
11374 bp->port.supported[idx];
11375 if (bp->link_params.phy[EXT_PHY1].type ==
11376 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11377 bp->port.advertising[idx] |=
11378 (SUPPORTED_100baseT_Half |
11379 SUPPORTED_100baseT_Full);
11380 } else {
11381
11382 bp->link_params.req_line_speed[idx] =
11383 SPEED_10000;
11384 bp->port.advertising[idx] |=
11385 (ADVERTISED_10000baseT_Full |
11386 ADVERTISED_FIBRE);
11387 continue;
11388 }
11389 break;
11390
11391 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11392 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11393 bp->link_params.req_line_speed[idx] =
11394 SPEED_10;
11395 bp->port.advertising[idx] |=
11396 (ADVERTISED_10baseT_Full |
11397 ADVERTISED_TP);
11398 } else {
11399 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11400 link_config,
11401 bp->link_params.speed_cap_mask[idx]);
11402 return;
11403 }
11404 break;
11405
11406 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11407 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11408 bp->link_params.req_line_speed[idx] =
11409 SPEED_10;
11410 bp->link_params.req_duplex[idx] =
11411 DUPLEX_HALF;
11412 bp->port.advertising[idx] |=
11413 (ADVERTISED_10baseT_Half |
11414 ADVERTISED_TP);
11415 } else {
11416 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11417 link_config,
11418 bp->link_params.speed_cap_mask[idx]);
11419 return;
11420 }
11421 break;
11422
11423 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11424 if (bp->port.supported[idx] &
11425 SUPPORTED_100baseT_Full) {
11426 bp->link_params.req_line_speed[idx] =
11427 SPEED_100;
11428 bp->port.advertising[idx] |=
11429 (ADVERTISED_100baseT_Full |
11430 ADVERTISED_TP);
11431 } else {
11432 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11433 link_config,
11434 bp->link_params.speed_cap_mask[idx]);
11435 return;
11436 }
11437 break;
11438
11439 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11440 if (bp->port.supported[idx] &
11441 SUPPORTED_100baseT_Half) {
11442 bp->link_params.req_line_speed[idx] =
11443 SPEED_100;
11444 bp->link_params.req_duplex[idx] =
11445 DUPLEX_HALF;
11446 bp->port.advertising[idx] |=
11447 (ADVERTISED_100baseT_Half |
11448 ADVERTISED_TP);
11449 } else {
11450 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11451 link_config,
11452 bp->link_params.speed_cap_mask[idx]);
11453 return;
11454 }
11455 break;
11456
11457 case PORT_FEATURE_LINK_SPEED_1G:
11458 if (bp->port.supported[idx] &
11459 SUPPORTED_1000baseT_Full) {
11460 bp->link_params.req_line_speed[idx] =
11461 SPEED_1000;
11462 bp->port.advertising[idx] |=
11463 (ADVERTISED_1000baseT_Full |
11464 ADVERTISED_TP);
11465 } else if (bp->port.supported[idx] &
11466 SUPPORTED_1000baseKX_Full) {
11467 bp->link_params.req_line_speed[idx] =
11468 SPEED_1000;
11469 bp->port.advertising[idx] |=
11470 ADVERTISED_1000baseKX_Full;
11471 } else {
11472 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11473 link_config,
11474 bp->link_params.speed_cap_mask[idx]);
11475 return;
11476 }
11477 break;
11478
11479 case PORT_FEATURE_LINK_SPEED_2_5G:
11480 if (bp->port.supported[idx] &
11481 SUPPORTED_2500baseX_Full) {
11482 bp->link_params.req_line_speed[idx] =
11483 SPEED_2500;
11484 bp->port.advertising[idx] |=
11485 (ADVERTISED_2500baseX_Full |
11486 ADVERTISED_TP);
11487 } else {
11488 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11489 link_config,
11490 bp->link_params.speed_cap_mask[idx]);
11491 return;
11492 }
11493 break;
11494
11495 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11496 if (bp->port.supported[idx] &
11497 SUPPORTED_10000baseT_Full) {
11498 bp->link_params.req_line_speed[idx] =
11499 SPEED_10000;
11500 bp->port.advertising[idx] |=
11501 (ADVERTISED_10000baseT_Full |
11502 ADVERTISED_FIBRE);
11503 } else if (bp->port.supported[idx] &
11504 SUPPORTED_10000baseKR_Full) {
11505 bp->link_params.req_line_speed[idx] =
11506 SPEED_10000;
11507 bp->port.advertising[idx] |=
11508 (ADVERTISED_10000baseKR_Full |
11509 ADVERTISED_FIBRE);
11510 } else {
11511 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11512 link_config,
11513 bp->link_params.speed_cap_mask[idx]);
11514 return;
11515 }
11516 break;
11517 case PORT_FEATURE_LINK_SPEED_20G:
11518 bp->link_params.req_line_speed[idx] = SPEED_20000;
11519
11520 break;
11521 default:
11522 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11523 link_config);
11524 bp->link_params.req_line_speed[idx] =
11525 SPEED_AUTO_NEG;
11526 bp->port.advertising[idx] =
11527 bp->port.supported[idx];
11528 break;
11529 }
11530
11531 bp->link_params.req_flow_ctrl[idx] = (link_config &
11532 PORT_FEATURE_FLOW_CONTROL_MASK);
11533 if (bp->link_params.req_flow_ctrl[idx] ==
11534 BNX2X_FLOW_CTRL_AUTO) {
11535 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11536 bp->link_params.req_flow_ctrl[idx] =
11537 BNX2X_FLOW_CTRL_NONE;
11538 else
11539 bnx2x_set_requested_fc(bp);
11540 }
11541
11542 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11543 bp->link_params.req_line_speed[idx],
11544 bp->link_params.req_duplex[idx],
11545 bp->link_params.req_flow_ctrl[idx],
11546 bp->port.advertising[idx]);
11547 }
11548 }
11549
11550 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11551 {
11552 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11553 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11554 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11555 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11556 }
11557
11558 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11559 {
11560 int port = BP_PORT(bp);
11561 u32 config;
11562 u32 ext_phy_type, ext_phy_config, eee_mode;
11563
11564 bp->link_params.bp = bp;
11565 bp->link_params.port = port;
11566
11567 bp->link_params.lane_config =
11568 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11569
11570 bp->link_params.speed_cap_mask[0] =
11571 SHMEM_RD(bp,
11572 dev_info.port_hw_config[port].speed_capability_mask) &
11573 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11574 bp->link_params.speed_cap_mask[1] =
11575 SHMEM_RD(bp,
11576 dev_info.port_hw_config[port].speed_capability_mask2) &
11577 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11578 bp->port.link_config[0] =
11579 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11580
11581 bp->port.link_config[1] =
11582 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11583
11584 bp->link_params.multi_phy_config =
11585 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11586
11587
11588
11589 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11590 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11591 (config & PORT_FEATURE_WOL_ENABLED));
11592
11593 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11594 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11595 bp->flags |= NO_ISCSI_FLAG;
11596 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11597 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11598 bp->flags |= NO_FCOE_FLAG;
11599
11600 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11601 bp->link_params.lane_config,
11602 bp->link_params.speed_cap_mask[0],
11603 bp->port.link_config[0]);
11604
11605 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11606 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11607 bnx2x_phy_probe(&bp->link_params);
11608 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11609
11610 bnx2x_link_settings_requested(bp);
11611
11612
11613
11614
11615
11616 ext_phy_config =
11617 SHMEM_RD(bp,
11618 dev_info.port_hw_config[port].external_phy_config);
11619 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11620 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11621 bp->mdio.prtad = bp->port.phy_addr;
11622
11623 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11624 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11625 bp->mdio.prtad =
11626 XGXS_EXT_PHY_ADDR(ext_phy_config);
11627
11628
11629 eee_mode = (((SHMEM_RD(bp, dev_info.
11630 port_feature_config[port].eee_power_mode)) &
11631 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11632 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11633 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11634 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11635 EEE_MODE_ENABLE_LPI |
11636 EEE_MODE_OUTPUT_TIME;
11637 } else {
11638 bp->link_params.eee_mode = 0;
11639 }
11640 }
11641
11642 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11643 {
11644 u32 no_flags = NO_ISCSI_FLAG;
11645 int port = BP_PORT(bp);
11646 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11647 drv_lic_key[port].max_iscsi_conn);
11648
11649 if (!CNIC_SUPPORT(bp)) {
11650 bp->flags |= no_flags;
11651 return;
11652 }
11653
11654
11655 bp->cnic_eth_dev.max_iscsi_conn =
11656 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11657 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11658
11659 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11660 bp->cnic_eth_dev.max_iscsi_conn);
11661
11662
11663
11664
11665
11666 if (!bp->cnic_eth_dev.max_iscsi_conn)
11667 bp->flags |= no_flags;
11668 }
11669
11670 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11671 {
11672
11673 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11674 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11675 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11676 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11677
11678
11679 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11680 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11681 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11682 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11683 }
11684
11685 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11686 {
11687 u8 count = 0;
11688
11689 if (IS_MF(bp)) {
11690 u8 fid;
11691
11692
11693 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11694 if (IS_MF_SD(bp)) {
11695 u32 cfg = MF_CFG_RD(bp,
11696 func_mf_config[fid].config);
11697
11698 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11699 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11700 FUNC_MF_CFG_PROTOCOL_FCOE))
11701 count++;
11702 } else {
11703 u32 cfg = MF_CFG_RD(bp,
11704 func_ext_config[fid].
11705 func_cfg);
11706
11707 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11708 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11709 count++;
11710 }
11711 }
11712 } else {
11713 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11714
11715 for (port = 0; port < port_cnt; port++) {
11716 u32 lic = SHMEM_RD(bp,
11717 drv_lic_key[port].max_fcoe_conn) ^
11718 FW_ENCODE_32BIT_PATTERN;
11719 if (lic)
11720 count++;
11721 }
11722 }
11723
11724 return count;
11725 }
11726
11727 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11728 {
11729 int port = BP_PORT(bp);
11730 int func = BP_ABS_FUNC(bp);
11731 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11732 drv_lic_key[port].max_fcoe_conn);
11733 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11734
11735 if (!CNIC_SUPPORT(bp)) {
11736 bp->flags |= NO_FCOE_FLAG;
11737 return;
11738 }
11739
11740
11741 bp->cnic_eth_dev.max_fcoe_conn =
11742 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11743 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11744
11745
11746 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11747
11748
11749 if (num_fcoe_func)
11750 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11751
11752
11753 if (!IS_MF(bp)) {
11754
11755 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11756 SHMEM_RD(bp,
11757 dev_info.port_hw_config[port].
11758 fcoe_wwn_port_name_upper);
11759 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11760 SHMEM_RD(bp,
11761 dev_info.port_hw_config[port].
11762 fcoe_wwn_port_name_lower);
11763
11764
11765 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11766 SHMEM_RD(bp,
11767 dev_info.port_hw_config[port].
11768 fcoe_wwn_node_name_upper);
11769 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11770 SHMEM_RD(bp,
11771 dev_info.port_hw_config[port].
11772 fcoe_wwn_node_name_lower);
11773 } else if (!IS_MF_SD(bp)) {
11774
11775
11776
11777 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11778 bnx2x_get_ext_wwn_info(bp, func);
11779 } else {
11780 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11781 bnx2x_get_ext_wwn_info(bp, func);
11782 }
11783
11784 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11785
11786
11787
11788
11789
11790 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11791 bp->flags |= NO_FCOE_FLAG;
11792 eth_zero_addr(bp->fip_mac);
11793 }
11794 }
11795
11796 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11797 {
11798
11799
11800
11801
11802
11803 bnx2x_get_iscsi_info(bp);
11804 bnx2x_get_fcoe_info(bp);
11805 }
11806
11807 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11808 {
11809 u32 val, val2;
11810 int func = BP_ABS_FUNC(bp);
11811 int port = BP_PORT(bp);
11812 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11813 u8 *fip_mac = bp->fip_mac;
11814
11815 if (IS_MF(bp)) {
11816
11817
11818
11819
11820
11821 if (!IS_MF_SD(bp)) {
11822 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11823 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11824 val2 = MF_CFG_RD(bp, func_ext_config[func].
11825 iscsi_mac_addr_upper);
11826 val = MF_CFG_RD(bp, func_ext_config[func].
11827 iscsi_mac_addr_lower);
11828 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11829 BNX2X_DEV_INFO
11830 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11831 } else {
11832 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11833 }
11834
11835 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11836 val2 = MF_CFG_RD(bp, func_ext_config[func].
11837 fcoe_mac_addr_upper);
11838 val = MF_CFG_RD(bp, func_ext_config[func].
11839 fcoe_mac_addr_lower);
11840 bnx2x_set_mac_buf(fip_mac, val, val2);
11841 BNX2X_DEV_INFO
11842 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11843 } else {
11844 bp->flags |= NO_FCOE_FLAG;
11845 }
11846
11847 bp->mf_ext_config = cfg;
11848
11849 } else {
11850 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11851
11852 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11853
11854 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11855 BNX2X_DEV_INFO
11856 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11857 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11858
11859 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11860 BNX2X_DEV_INFO("SD FCoE MODE\n");
11861 BNX2X_DEV_INFO
11862 ("Read FIP MAC: %pM\n", fip_mac);
11863 }
11864 }
11865
11866
11867
11868
11869
11870 if (IS_MF_FCOE_AFEX(bp))
11871 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11872 } else {
11873 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11874 iscsi_mac_upper);
11875 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11876 iscsi_mac_lower);
11877 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11878
11879 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11880 fcoe_fip_mac_upper);
11881 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11882 fcoe_fip_mac_lower);
11883 bnx2x_set_mac_buf(fip_mac, val, val2);
11884 }
11885
11886
11887 if (!is_valid_ether_addr(iscsi_mac)) {
11888 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11889 eth_zero_addr(iscsi_mac);
11890 }
11891
11892
11893 if (!is_valid_ether_addr(fip_mac)) {
11894 bp->flags |= NO_FCOE_FLAG;
11895 eth_zero_addr(bp->fip_mac);
11896 }
11897 }
11898
11899 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11900 {
11901 u32 val, val2;
11902 int func = BP_ABS_FUNC(bp);
11903 int port = BP_PORT(bp);
11904
11905
11906 eth_zero_addr(bp->dev->dev_addr);
11907
11908 if (BP_NOMCP(bp)) {
11909 BNX2X_ERROR("warning: random MAC workaround active\n");
11910 eth_hw_addr_random(bp->dev);
11911 } else if (IS_MF(bp)) {
11912 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11913 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11914 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11915 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11916 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11917
11918 if (CNIC_SUPPORT(bp))
11919 bnx2x_get_cnic_mac_hwinfo(bp);
11920 } else {
11921
11922 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11923 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11924 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11925
11926 if (CNIC_SUPPORT(bp))
11927 bnx2x_get_cnic_mac_hwinfo(bp);
11928 }
11929
11930 if (!BP_NOMCP(bp)) {
11931
11932 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11933 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11934 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11935 bp->flags |= HAS_PHYS_PORT_ID;
11936 }
11937
11938 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11939
11940 if (!is_valid_ether_addr(bp->dev->dev_addr))
11941 dev_err(&bp->pdev->dev,
11942 "bad Ethernet MAC address configuration: %pM\n"
11943 "change it manually before bringing up the appropriate network interface\n",
11944 bp->dev->dev_addr);
11945 }
11946
11947 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11948 {
11949 int tmp;
11950 u32 cfg;
11951
11952 if (IS_VF(bp))
11953 return false;
11954
11955 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11956
11957 tmp = BP_ABS_FUNC(bp);
11958 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11959 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11960 } else {
11961
11962 tmp = BP_PORT(bp);
11963 cfg = SHMEM_RD(bp,
11964 dev_info.port_hw_config[tmp].generic_features);
11965 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11966 }
11967 return cfg;
11968 }
11969
11970 static void validate_set_si_mode(struct bnx2x *bp)
11971 {
11972 u8 func = BP_ABS_FUNC(bp);
11973 u32 val;
11974
11975 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11976
11977
11978 if (val != 0xffff) {
11979 bp->mf_mode = MULTI_FUNCTION_SI;
11980 bp->mf_config[BP_VN(bp)] =
11981 MF_CFG_RD(bp, func_mf_config[func].config);
11982 } else
11983 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11984 }
11985
11986 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11987 {
11988 int func = BP_ABS_FUNC(bp);
11989 int vn;
11990 u32 val = 0, val2 = 0;
11991 int rc = 0;
11992
11993
11994 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11995 dev_err(&bp->pdev->dev,
11996 "Chip read returns all Fs. Preventing probe from continuing\n");
11997 return -EINVAL;
11998 }
11999
12000 bnx2x_get_common_hwinfo(bp);
12001
12002
12003
12004
12005 if (CHIP_IS_E1x(bp)) {
12006 bp->common.int_block = INT_BLOCK_HC;
12007
12008 bp->igu_dsb_id = DEF_SB_IGU_ID;
12009 bp->igu_base_sb = 0;
12010 } else {
12011 bp->common.int_block = INT_BLOCK_IGU;
12012
12013
12014 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12015
12016 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
12017
12018 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12019 int tout = 5000;
12020
12021 BNX2X_DEV_INFO("FORCING Normal Mode\n");
12022
12023 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
12024 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
12025 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
12026
12027 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12028 tout--;
12029 usleep_range(1000, 2000);
12030 }
12031
12032 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
12033 dev_err(&bp->pdev->dev,
12034 "FORCING Normal Mode failed!!!\n");
12035 bnx2x_release_hw_lock(bp,
12036 HW_LOCK_RESOURCE_RESET);
12037 return -EPERM;
12038 }
12039 }
12040
12041 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
12042 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
12043 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
12044 } else
12045 BNX2X_DEV_INFO("IGU Normal Mode\n");
12046
12047 rc = bnx2x_get_igu_cam_info(bp);
12048 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
12049 if (rc)
12050 return rc;
12051 }
12052
12053
12054
12055
12056
12057
12058 if (CHIP_IS_E1x(bp))
12059 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
12060 else
12061
12062
12063
12064
12065 bp->base_fw_ndsb = bp->igu_base_sb;
12066
12067 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12068 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12069 bp->igu_sb_cnt, bp->base_fw_ndsb);
12070
12071
12072
12073
12074 bp->mf_ov = 0;
12075 bp->mf_mode = 0;
12076 bp->mf_sub_mode = 0;
12077 vn = BP_VN(bp);
12078
12079 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12080 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12081 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12082 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12083
12084 if (SHMEM2_HAS(bp, mf_cfg_addr))
12085 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12086 else
12087 bp->common.mf_cfg_base = bp->common.shmem_base +
12088 offsetof(struct shmem_region, func_mb) +
12089 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12090
12091
12092
12093
12094
12095
12096
12097
12098 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12099
12100 val = SHMEM_RD(bp,
12101 dev_info.shared_feature_config.config);
12102 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12103
12104 switch (val) {
12105 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12106 validate_set_si_mode(bp);
12107 break;
12108 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12109 if ((!CHIP_IS_E1x(bp)) &&
12110 (MF_CFG_RD(bp, func_mf_config[func].
12111 mac_upper) != 0xffff) &&
12112 (SHMEM2_HAS(bp,
12113 afex_driver_support))) {
12114 bp->mf_mode = MULTI_FUNCTION_AFEX;
12115 bp->mf_config[vn] = MF_CFG_RD(bp,
12116 func_mf_config[func].config);
12117 } else {
12118 BNX2X_DEV_INFO("can not configure afex mode\n");
12119 }
12120 break;
12121 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12122
12123 val = MF_CFG_RD(bp,
12124 func_mf_config[FUNC_0].e1hov_tag);
12125 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12126
12127 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12128 bp->mf_mode = MULTI_FUNCTION_SD;
12129 bp->mf_config[vn] = MF_CFG_RD(bp,
12130 func_mf_config[func].config);
12131 } else
12132 BNX2X_DEV_INFO("illegal OV for SD\n");
12133 break;
12134 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12135 bp->mf_mode = MULTI_FUNCTION_SD;
12136 bp->mf_sub_mode = SUB_MF_MODE_BD;
12137 bp->mf_config[vn] =
12138 MF_CFG_RD(bp,
12139 func_mf_config[func].config);
12140
12141 if (SHMEM2_HAS(bp, mtu_size)) {
12142 int mtu_idx = BP_FW_MB_IDX(bp);
12143 u16 mtu_size;
12144 u32 mtu;
12145
12146 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12147 mtu_size = (u16)mtu;
12148 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12149 mtu_size, mtu);
12150
12151
12152 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12153 (mtu_size <=
12154 ETH_MAX_JUMBO_PACKET_SIZE))
12155 bp->dev->mtu = mtu_size;
12156 }
12157 break;
12158 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12159 bp->mf_mode = MULTI_FUNCTION_SD;
12160 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12161 bp->mf_config[vn] =
12162 MF_CFG_RD(bp,
12163 func_mf_config[func].config);
12164 break;
12165 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12166 bp->mf_config[vn] = 0;
12167 break;
12168 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12169 val2 = SHMEM_RD(bp,
12170 dev_info.shared_hw_config.config_3);
12171 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12172 switch (val2) {
12173 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12174 validate_set_si_mode(bp);
12175 bp->mf_sub_mode =
12176 SUB_MF_MODE_NPAR1_DOT_5;
12177 break;
12178 default:
12179
12180 bp->mf_config[vn] = 0;
12181 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12182 val);
12183 }
12184 break;
12185 default:
12186
12187 bp->mf_config[vn] = 0;
12188 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12189 }
12190 }
12191
12192 BNX2X_DEV_INFO("%s function mode\n",
12193 IS_MF(bp) ? "multi" : "single");
12194
12195 switch (bp->mf_mode) {
12196 case MULTI_FUNCTION_SD:
12197 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12198 FUNC_MF_CFG_E1HOV_TAG_MASK;
12199 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12200 bp->mf_ov = val;
12201 bp->path_has_ovlan = true;
12202
12203 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12204 func, bp->mf_ov, bp->mf_ov);
12205 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12206 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12207 dev_err(&bp->pdev->dev,
12208 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12209 func);
12210 bp->path_has_ovlan = true;
12211 } else {
12212 dev_err(&bp->pdev->dev,
12213 "No valid MF OV for func %d, aborting\n",
12214 func);
12215 return -EPERM;
12216 }
12217 break;
12218 case MULTI_FUNCTION_AFEX:
12219 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12220 break;
12221 case MULTI_FUNCTION_SI:
12222 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12223 func);
12224 break;
12225 default:
12226 if (vn) {
12227 dev_err(&bp->pdev->dev,
12228 "VN %d is in a single function mode, aborting\n",
12229 vn);
12230 return -EPERM;
12231 }
12232 break;
12233 }
12234
12235
12236
12237
12238
12239
12240 if (CHIP_MODE_IS_4_PORT(bp) &&
12241 !bp->path_has_ovlan &&
12242 !IS_MF(bp) &&
12243 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12244 u8 other_port = !BP_PORT(bp);
12245 u8 other_func = BP_PATH(bp) + 2*other_port;
12246 val = MF_CFG_RD(bp,
12247 func_mf_config[other_func].e1hov_tag);
12248 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12249 bp->path_has_ovlan = true;
12250 }
12251 }
12252
12253
12254 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12255 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12256
12257
12258 bnx2x_get_port_hwinfo(bp);
12259
12260
12261 bnx2x_get_mac_hwinfo(bp);
12262
12263 bnx2x_get_cnic_info(bp);
12264
12265 return rc;
12266 }
12267
12268 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12269 {
12270 int cnt, i, block_end, rodi;
12271 char vpd_start[BNX2X_VPD_LEN+1];
12272 char str_id_reg[VENDOR_ID_LEN+1];
12273 char str_id_cap[VENDOR_ID_LEN+1];
12274 char *vpd_data;
12275 char *vpd_extended_data = NULL;
12276 u8 len;
12277
12278 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12279 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12280
12281 if (cnt < BNX2X_VPD_LEN)
12282 goto out_not_found;
12283
12284
12285
12286
12287 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12288 PCI_VPD_LRDT_RO_DATA);
12289 if (i < 0)
12290 goto out_not_found;
12291
12292 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12293 pci_vpd_lrdt_size(&vpd_start[i]);
12294
12295 i += PCI_VPD_LRDT_TAG_SIZE;
12296
12297 if (block_end > BNX2X_VPD_LEN) {
12298 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12299 if (vpd_extended_data == NULL)
12300 goto out_not_found;
12301
12302
12303 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12304 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12305 block_end - BNX2X_VPD_LEN,
12306 vpd_extended_data + BNX2X_VPD_LEN);
12307 if (cnt < (block_end - BNX2X_VPD_LEN))
12308 goto out_not_found;
12309 vpd_data = vpd_extended_data;
12310 } else
12311 vpd_data = vpd_start;
12312
12313
12314
12315 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12316 PCI_VPD_RO_KEYWORD_MFR_ID);
12317 if (rodi < 0)
12318 goto out_not_found;
12319
12320 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12321
12322 if (len != VENDOR_ID_LEN)
12323 goto out_not_found;
12324
12325 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12326
12327
12328 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12329 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12330 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12331 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12332
12333 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12334 PCI_VPD_RO_KEYWORD_VENDOR0);
12335 if (rodi >= 0) {
12336 len = pci_vpd_info_field_size(&vpd_data[rodi]);
12337
12338 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12339
12340 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12341 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12342 bp->fw_ver[len] = ' ';
12343 }
12344 }
12345 kfree(vpd_extended_data);
12346 return;
12347 }
12348 out_not_found:
12349 kfree(vpd_extended_data);
12350 return;
12351 }
12352
12353 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12354 {
12355 u32 flags = 0;
12356
12357 if (CHIP_REV_IS_FPGA(bp))
12358 SET_FLAGS(flags, MODE_FPGA);
12359 else if (CHIP_REV_IS_EMUL(bp))
12360 SET_FLAGS(flags, MODE_EMUL);
12361 else
12362 SET_FLAGS(flags, MODE_ASIC);
12363
12364 if (CHIP_MODE_IS_4_PORT(bp))
12365 SET_FLAGS(flags, MODE_PORT4);
12366 else
12367 SET_FLAGS(flags, MODE_PORT2);
12368
12369 if (CHIP_IS_E2(bp))
12370 SET_FLAGS(flags, MODE_E2);
12371 else if (CHIP_IS_E3(bp)) {
12372 SET_FLAGS(flags, MODE_E3);
12373 if (CHIP_REV(bp) == CHIP_REV_Ax)
12374 SET_FLAGS(flags, MODE_E3_A0);
12375 else
12376 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12377 }
12378
12379 if (IS_MF(bp)) {
12380 SET_FLAGS(flags, MODE_MF);
12381 switch (bp->mf_mode) {
12382 case MULTI_FUNCTION_SD:
12383 SET_FLAGS(flags, MODE_MF_SD);
12384 break;
12385 case MULTI_FUNCTION_SI:
12386 SET_FLAGS(flags, MODE_MF_SI);
12387 break;
12388 case MULTI_FUNCTION_AFEX:
12389 SET_FLAGS(flags, MODE_MF_AFEX);
12390 break;
12391 }
12392 } else
12393 SET_FLAGS(flags, MODE_SF);
12394
12395 #if defined(__LITTLE_ENDIAN)
12396 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12397 #else
12398 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12399 #endif
12400 INIT_MODE_FLAGS(bp) = flags;
12401 }
12402
12403 static int bnx2x_init_bp(struct bnx2x *bp)
12404 {
12405 int func;
12406 int rc;
12407
12408 mutex_init(&bp->port.phy_mutex);
12409 mutex_init(&bp->fw_mb_mutex);
12410 mutex_init(&bp->drv_info_mutex);
12411 sema_init(&bp->stats_lock, 1);
12412 bp->drv_info_mng_owner = false;
12413 INIT_LIST_HEAD(&bp->vlan_reg);
12414
12415 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12416 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12417 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12418 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12419 if (IS_PF(bp)) {
12420 rc = bnx2x_get_hwinfo(bp);
12421 if (rc)
12422 return rc;
12423 } else {
12424 eth_zero_addr(bp->dev->dev_addr);
12425 }
12426
12427 bnx2x_set_modes_bitmap(bp);
12428
12429 rc = bnx2x_alloc_mem_bp(bp);
12430 if (rc)
12431 return rc;
12432
12433 bnx2x_read_fwinfo(bp);
12434
12435 func = BP_FUNC(bp);
12436
12437
12438 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12439
12440 bp->fw_seq =
12441 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12442 DRV_MSG_SEQ_NUMBER_MASK;
12443 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12444
12445 rc = bnx2x_prev_unload(bp);
12446 if (rc) {
12447 bnx2x_free_mem_bp(bp);
12448 return rc;
12449 }
12450 }
12451
12452 if (CHIP_REV_IS_FPGA(bp))
12453 dev_err(&bp->pdev->dev, "FPGA detected\n");
12454
12455 if (BP_NOMCP(bp) && (func == 0))
12456 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12457
12458 bp->disable_tpa = disable_tpa;
12459 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12460
12461 bp->disable_tpa |= is_kdump_kernel();
12462
12463
12464 if (bp->disable_tpa) {
12465 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12466 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12467 }
12468
12469 if (CHIP_IS_E1(bp))
12470 bp->dropless_fc = 0;
12471 else
12472 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12473
12474 bp->mrrs = mrrs;
12475
12476 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12477 if (IS_VF(bp))
12478 bp->rx_ring_size = MAX_RX_AVAIL;
12479
12480
12481 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12482 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12483
12484 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12485
12486 timer_setup(&bp->timer, bnx2x_timer, 0);
12487 bp->timer.expires = jiffies + bp->current_interval;
12488
12489 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12490 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12491 SHMEM2_HAS(bp, dcbx_en) &&
12492 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12493 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12494 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12495 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12496 bnx2x_dcbx_init_params(bp);
12497 } else {
12498 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12499 }
12500
12501 if (CHIP_IS_E1x(bp))
12502 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12503 else
12504 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12505
12506
12507 if (IS_VF(bp))
12508 bp->max_cos = 1;
12509 else if (CHIP_IS_E1x(bp))
12510 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12511 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12512 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12513 else if (CHIP_IS_E3B0(bp))
12514 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12515 else
12516 BNX2X_ERR("unknown chip %x revision %x\n",
12517 CHIP_NUM(bp), CHIP_REV(bp));
12518 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12519
12520
12521
12522
12523
12524 if (IS_VF(bp))
12525 bp->min_msix_vec_cnt = 1;
12526 else if (CNIC_SUPPORT(bp))
12527 bp->min_msix_vec_cnt = 3;
12528 else
12529 bp->min_msix_vec_cnt = 2;
12530 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12531
12532 bp->dump_preset_idx = 1;
12533
12534 return rc;
12535 }
12536
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546 static int bnx2x_open(struct net_device *dev)
12547 {
12548 struct bnx2x *bp = netdev_priv(dev);
12549 int rc;
12550
12551 bp->stats_init = true;
12552
12553 netif_carrier_off(dev);
12554
12555 bnx2x_set_power_state(bp, PCI_D0);
12556
12557
12558
12559
12560
12561
12562
12563 if (IS_PF(bp)) {
12564 int other_engine = BP_PATH(bp) ? 0 : 1;
12565 bool other_load_status, load_status;
12566 bool global = false;
12567
12568 other_load_status = bnx2x_get_load_status(bp, other_engine);
12569 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12570 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12571 bnx2x_chk_parity_attn(bp, &global, true)) {
12572 do {
12573
12574
12575
12576
12577
12578 if (global)
12579 bnx2x_set_reset_global(bp);
12580
12581
12582
12583
12584
12585
12586 if ((!load_status &&
12587 (!global || !other_load_status)) &&
12588 bnx2x_trylock_leader_lock(bp) &&
12589 !bnx2x_leader_reset(bp)) {
12590 netdev_info(bp->dev,
12591 "Recovered in open\n");
12592 break;
12593 }
12594
12595
12596 bnx2x_set_power_state(bp, PCI_D3hot);
12597 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12598
12599 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12600 "If you still see this message after a few retries then power cycle is required.\n");
12601
12602 return -EAGAIN;
12603 } while (0);
12604 }
12605 }
12606
12607 bp->recovery_state = BNX2X_RECOVERY_DONE;
12608 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12609 if (rc)
12610 return rc;
12611
12612 if (IS_PF(bp))
12613 udp_tunnel_get_rx_info(dev);
12614
12615 return 0;
12616 }
12617
12618
12619 static int bnx2x_close(struct net_device *dev)
12620 {
12621 struct bnx2x *bp = netdev_priv(dev);
12622
12623
12624 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12625
12626 return 0;
12627 }
12628
12629 struct bnx2x_mcast_list_elem_group
12630 {
12631 struct list_head mcast_group_link;
12632 struct bnx2x_mcast_list_elem mcast_elems[];
12633 };
12634
12635 #define MCAST_ELEMS_PER_PG \
12636 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12637 sizeof(struct bnx2x_mcast_list_elem))
12638
12639 static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12640 {
12641 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12642
12643 while (!list_empty(mcast_group_list)) {
12644 current_mcast_group = list_first_entry(mcast_group_list,
12645 struct bnx2x_mcast_list_elem_group,
12646 mcast_group_link);
12647 list_del(¤t_mcast_group->mcast_group_link);
12648 free_page((unsigned long)current_mcast_group);
12649 }
12650 }
12651
12652 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12653 struct bnx2x_mcast_ramrod_params *p,
12654 struct list_head *mcast_group_list)
12655 {
12656 struct bnx2x_mcast_list_elem *mc_mac;
12657 struct netdev_hw_addr *ha;
12658 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12659 int mc_count = netdev_mc_count(bp->dev);
12660 int offset = 0;
12661
12662 INIT_LIST_HEAD(&p->mcast_list);
12663 netdev_for_each_mc_addr(ha, bp->dev) {
12664 if (!offset) {
12665 current_mcast_group =
12666 (struct bnx2x_mcast_list_elem_group *)
12667 __get_free_page(GFP_ATOMIC);
12668 if (!current_mcast_group) {
12669 bnx2x_free_mcast_macs_list(mcast_group_list);
12670 BNX2X_ERR("Failed to allocate mc MAC list\n");
12671 return -ENOMEM;
12672 }
12673 list_add(¤t_mcast_group->mcast_group_link,
12674 mcast_group_list);
12675 }
12676 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12677 mc_mac->mac = bnx2x_mc_addr(ha);
12678 list_add_tail(&mc_mac->link, &p->mcast_list);
12679 offset++;
12680 if (offset == MCAST_ELEMS_PER_PG)
12681 offset = 0;
12682 }
12683 p->mcast_list_len = mc_count;
12684 return 0;
12685 }
12686
12687
12688
12689
12690
12691
12692
12693
12694 static int bnx2x_set_uc_list(struct bnx2x *bp)
12695 {
12696 int rc;
12697 struct net_device *dev = bp->dev;
12698 struct netdev_hw_addr *ha;
12699 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12700 unsigned long ramrod_flags = 0;
12701
12702
12703 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12704 if (rc < 0) {
12705 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12706 return rc;
12707 }
12708
12709 netdev_for_each_uc_addr(ha, dev) {
12710 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12711 BNX2X_UC_LIST_MAC, &ramrod_flags);
12712 if (rc == -EEXIST) {
12713 DP(BNX2X_MSG_SP,
12714 "Failed to schedule ADD operations: %d\n", rc);
12715
12716 rc = 0;
12717
12718 } else if (rc < 0) {
12719
12720 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12721 rc);
12722 return rc;
12723 }
12724 }
12725
12726
12727 __set_bit(RAMROD_CONT, &ramrod_flags);
12728 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12729 BNX2X_UC_LIST_MAC, &ramrod_flags);
12730 }
12731
12732 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12733 {
12734 LIST_HEAD(mcast_group_list);
12735 struct net_device *dev = bp->dev;
12736 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12737 int rc = 0;
12738
12739 rparam.mcast_obj = &bp->mcast_obj;
12740
12741
12742 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12743 if (rc < 0) {
12744 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12745 return rc;
12746 }
12747
12748
12749 if (netdev_mc_count(dev)) {
12750 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12751 if (rc)
12752 return rc;
12753
12754
12755 rc = bnx2x_config_mcast(bp, &rparam,
12756 BNX2X_MCAST_CMD_ADD);
12757 if (rc < 0)
12758 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12759 rc);
12760
12761 bnx2x_free_mcast_macs_list(&mcast_group_list);
12762 }
12763
12764 return rc;
12765 }
12766
12767 static int bnx2x_set_mc_list(struct bnx2x *bp)
12768 {
12769 LIST_HEAD(mcast_group_list);
12770 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12771 struct net_device *dev = bp->dev;
12772 int rc = 0;
12773
12774
12775 if (CHIP_IS_E1x(bp))
12776 return bnx2x_set_mc_list_e1x(bp);
12777
12778 rparam.mcast_obj = &bp->mcast_obj;
12779
12780 if (netdev_mc_count(dev)) {
12781 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12782 if (rc)
12783 return rc;
12784
12785
12786 rc = bnx2x_config_mcast(bp, &rparam,
12787 BNX2X_MCAST_CMD_SET);
12788 if (rc < 0)
12789 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12790 rc);
12791
12792 bnx2x_free_mcast_macs_list(&mcast_group_list);
12793 } else {
12794
12795 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12796 if (rc < 0)
12797 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12798 rc);
12799 }
12800
12801 return rc;
12802 }
12803
12804
12805 static void bnx2x_set_rx_mode(struct net_device *dev)
12806 {
12807 struct bnx2x *bp = netdev_priv(dev);
12808
12809 if (bp->state != BNX2X_STATE_OPEN) {
12810 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12811 return;
12812 } else {
12813
12814 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12815 NETIF_MSG_IFUP);
12816 }
12817 }
12818
12819 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12820 {
12821 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12822
12823 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12824
12825 netif_addr_lock_bh(bp->dev);
12826
12827 if (bp->dev->flags & IFF_PROMISC) {
12828 rx_mode = BNX2X_RX_MODE_PROMISC;
12829 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12830 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12831 CHIP_IS_E1(bp))) {
12832 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12833 } else {
12834 if (IS_PF(bp)) {
12835
12836 if (bnx2x_set_mc_list(bp) < 0)
12837 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12838
12839
12840 netif_addr_unlock_bh(bp->dev);
12841 if (bnx2x_set_uc_list(bp) < 0)
12842 rx_mode = BNX2X_RX_MODE_PROMISC;
12843 netif_addr_lock_bh(bp->dev);
12844 } else {
12845
12846
12847
12848 bnx2x_schedule_sp_rtnl(bp,
12849 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12850 }
12851 }
12852
12853 bp->rx_mode = rx_mode;
12854
12855 if (IS_MF_ISCSI_ONLY(bp))
12856 bp->rx_mode = BNX2X_RX_MODE_NONE;
12857
12858
12859 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12860 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12861 netif_addr_unlock_bh(bp->dev);
12862 return;
12863 }
12864
12865 if (IS_PF(bp)) {
12866 bnx2x_set_storm_rx_mode(bp);
12867 netif_addr_unlock_bh(bp->dev);
12868 } else {
12869
12870
12871
12872
12873 netif_addr_unlock_bh(bp->dev);
12874 bnx2x_vfpf_storm_rx_mode(bp);
12875 }
12876 }
12877
12878
12879 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12880 int devad, u16 addr)
12881 {
12882 struct bnx2x *bp = netdev_priv(netdev);
12883 u16 value;
12884 int rc;
12885
12886 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12887 prtad, devad, addr);
12888
12889
12890 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12891
12892 bnx2x_acquire_phy_lock(bp);
12893 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12894 bnx2x_release_phy_lock(bp);
12895 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12896
12897 if (!rc)
12898 rc = value;
12899 return rc;
12900 }
12901
12902
12903 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12904 u16 addr, u16 value)
12905 {
12906 struct bnx2x *bp = netdev_priv(netdev);
12907 int rc;
12908
12909 DP(NETIF_MSG_LINK,
12910 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12911 prtad, devad, addr, value);
12912
12913
12914 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12915
12916 bnx2x_acquire_phy_lock(bp);
12917 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12918 bnx2x_release_phy_lock(bp);
12919 return rc;
12920 }
12921
12922
12923 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12924 {
12925 struct bnx2x *bp = netdev_priv(dev);
12926 struct mii_ioctl_data *mdio = if_mii(ifr);
12927
12928 if (!netif_running(dev))
12929 return -EAGAIN;
12930
12931 switch (cmd) {
12932 case SIOCSHWTSTAMP:
12933 return bnx2x_hwtstamp_ioctl(bp, ifr);
12934 default:
12935 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12936 mdio->phy_id, mdio->reg_num, mdio->val_in);
12937 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12938 }
12939 }
12940
12941 static int bnx2x_validate_addr(struct net_device *dev)
12942 {
12943 struct bnx2x *bp = netdev_priv(dev);
12944
12945
12946 if (IS_VF(bp))
12947 bnx2x_sample_bulletin(bp);
12948
12949 if (!is_valid_ether_addr(dev->dev_addr)) {
12950 BNX2X_ERR("Non-valid Ethernet address\n");
12951 return -EADDRNOTAVAIL;
12952 }
12953 return 0;
12954 }
12955
12956 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12957 struct netdev_phys_item_id *ppid)
12958 {
12959 struct bnx2x *bp = netdev_priv(netdev);
12960
12961 if (!(bp->flags & HAS_PHYS_PORT_ID))
12962 return -EOPNOTSUPP;
12963
12964 ppid->id_len = sizeof(bp->phys_port_id);
12965 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12966
12967 return 0;
12968 }
12969
12970 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12971 struct net_device *dev,
12972 netdev_features_t features)
12973 {
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987 if (unlikely(skb_is_gso(skb) &&
12988 (skb_shinfo(skb)->gso_size > 9000) &&
12989 !skb_gso_validate_mac_len(skb, 9700)))
12990 features &= ~NETIF_F_GSO_MASK;
12991
12992 features = vlan_features_check(skb, features);
12993 return vxlan_features_check(skb, features);
12994 }
12995
12996 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12997 {
12998 int rc;
12999
13000 if (IS_PF(bp)) {
13001 unsigned long ramrod_flags = 0;
13002
13003 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13004 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
13005 add, &ramrod_flags);
13006 } else {
13007 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
13008 }
13009
13010 return rc;
13011 }
13012
13013 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
13014 {
13015 struct bnx2x_vlan_entry *vlan;
13016 int rc = 0;
13017
13018
13019 list_for_each_entry(vlan, &bp->vlan_reg, link) {
13020 if (vlan->hw)
13021 continue;
13022
13023 if (bp->vlan_cnt >= bp->vlan_credit)
13024 return -ENOBUFS;
13025
13026 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
13027 if (rc) {
13028 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
13029 return rc;
13030 }
13031
13032 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
13033 vlan->hw = true;
13034 bp->vlan_cnt++;
13035 }
13036
13037 return 0;
13038 }
13039
13040 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
13041 {
13042 bool need_accept_any_vlan;
13043
13044 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
13045
13046 if (bp->accept_any_vlan != need_accept_any_vlan) {
13047 bp->accept_any_vlan = need_accept_any_vlan;
13048 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
13049 bp->accept_any_vlan ? "raised" : "cleared");
13050 if (set_rx_mode) {
13051 if (IS_PF(bp))
13052 bnx2x_set_rx_mode_inner(bp);
13053 else
13054 bnx2x_vfpf_storm_rx_mode(bp);
13055 }
13056 }
13057 }
13058
13059 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
13060 {
13061
13062 bnx2x_vlan_configure(bp, false);
13063
13064 return 0;
13065 }
13066
13067 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
13068 {
13069 struct bnx2x *bp = netdev_priv(dev);
13070 struct bnx2x_vlan_entry *vlan;
13071
13072 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
13073
13074 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
13075 if (!vlan)
13076 return -ENOMEM;
13077
13078 vlan->vid = vid;
13079 vlan->hw = false;
13080 list_add_tail(&vlan->link, &bp->vlan_reg);
13081
13082 if (netif_running(dev))
13083 bnx2x_vlan_configure(bp, true);
13084
13085 return 0;
13086 }
13087
13088 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
13089 {
13090 struct bnx2x *bp = netdev_priv(dev);
13091 struct bnx2x_vlan_entry *vlan;
13092 bool found = false;
13093 int rc = 0;
13094
13095 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
13096
13097 list_for_each_entry(vlan, &bp->vlan_reg, link)
13098 if (vlan->vid == vid) {
13099 found = true;
13100 break;
13101 }
13102
13103 if (!found) {
13104 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
13105 return -EINVAL;
13106 }
13107
13108 if (netif_running(dev) && vlan->hw) {
13109 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13110 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13111 bp->vlan_cnt--;
13112 }
13113
13114 list_del(&vlan->link);
13115 kfree(vlan);
13116
13117 if (netif_running(dev))
13118 bnx2x_vlan_configure(bp, true);
13119
13120 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13121
13122 return rc;
13123 }
13124
13125 static const struct net_device_ops bnx2x_netdev_ops = {
13126 .ndo_open = bnx2x_open,
13127 .ndo_stop = bnx2x_close,
13128 .ndo_start_xmit = bnx2x_start_xmit,
13129 .ndo_select_queue = bnx2x_select_queue,
13130 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13131 .ndo_set_mac_address = bnx2x_change_mac_addr,
13132 .ndo_validate_addr = bnx2x_validate_addr,
13133 .ndo_do_ioctl = bnx2x_ioctl,
13134 .ndo_change_mtu = bnx2x_change_mtu,
13135 .ndo_fix_features = bnx2x_fix_features,
13136 .ndo_set_features = bnx2x_set_features,
13137 .ndo_tx_timeout = bnx2x_tx_timeout,
13138 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13139 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13140 .ndo_setup_tc = __bnx2x_setup_tc,
13141 #ifdef CONFIG_BNX2X_SRIOV
13142 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13143 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13144 .ndo_get_vf_config = bnx2x_get_vf_config,
13145 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13146 #endif
13147 #ifdef NETDEV_FCOE_WWNN
13148 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13149 #endif
13150
13151 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13152 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13153 .ndo_features_check = bnx2x_features_check,
13154 .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
13155 .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
13156 };
13157
13158 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13159 {
13160 struct device *dev = &bp->pdev->dev;
13161
13162 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13163 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13164 dev_err(dev, "System does not support DMA, aborting\n");
13165 return -EIO;
13166 }
13167
13168 return 0;
13169 }
13170
13171 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13172 {
13173 if (bp->flags & AER_ENABLED) {
13174 pci_disable_pcie_error_reporting(bp->pdev);
13175 bp->flags &= ~AER_ENABLED;
13176 }
13177 }
13178
13179 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13180 struct net_device *dev, unsigned long board_type)
13181 {
13182 int rc;
13183 u32 pci_cfg_dword;
13184 bool chip_is_e1x = (board_type == BCM57710 ||
13185 board_type == BCM57711 ||
13186 board_type == BCM57711E);
13187
13188 SET_NETDEV_DEV(dev, &pdev->dev);
13189
13190 bp->dev = dev;
13191 bp->pdev = pdev;
13192
13193 rc = pci_enable_device(pdev);
13194 if (rc) {
13195 dev_err(&bp->pdev->dev,
13196 "Cannot enable PCI device, aborting\n");
13197 goto err_out;
13198 }
13199
13200 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13201 dev_err(&bp->pdev->dev,
13202 "Cannot find PCI device base address, aborting\n");
13203 rc = -ENODEV;
13204 goto err_out_disable;
13205 }
13206
13207 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13208 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13209 rc = -ENODEV;
13210 goto err_out_disable;
13211 }
13212
13213 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13214 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13215 PCICFG_REVESION_ID_ERROR_VAL) {
13216 pr_err("PCI device error, probably due to fan failure, aborting\n");
13217 rc = -ENODEV;
13218 goto err_out_disable;
13219 }
13220
13221 if (atomic_read(&pdev->enable_cnt) == 1) {
13222 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13223 if (rc) {
13224 dev_err(&bp->pdev->dev,
13225 "Cannot obtain PCI resources, aborting\n");
13226 goto err_out_disable;
13227 }
13228
13229 pci_set_master(pdev);
13230 pci_save_state(pdev);
13231 }
13232
13233 if (IS_PF(bp)) {
13234 if (!pdev->pm_cap) {
13235 dev_err(&bp->pdev->dev,
13236 "Cannot find power management capability, aborting\n");
13237 rc = -EIO;
13238 goto err_out_release;
13239 }
13240 }
13241
13242 if (!pci_is_pcie(pdev)) {
13243 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13244 rc = -EIO;
13245 goto err_out_release;
13246 }
13247
13248 rc = bnx2x_set_coherency_mask(bp);
13249 if (rc)
13250 goto err_out_release;
13251
13252 dev->mem_start = pci_resource_start(pdev, 0);
13253 dev->base_addr = dev->mem_start;
13254 dev->mem_end = pci_resource_end(pdev, 0);
13255
13256 dev->irq = pdev->irq;
13257
13258 bp->regview = pci_ioremap_bar(pdev, 0);
13259 if (!bp->regview) {
13260 dev_err(&bp->pdev->dev,
13261 "Cannot map register space, aborting\n");
13262 rc = -ENOMEM;
13263 goto err_out_release;
13264 }
13265
13266
13267
13268
13269
13270
13271 if (chip_is_e1x) {
13272 bp->pf_num = PCI_FUNC(pdev->devfn);
13273 } else {
13274
13275 pci_read_config_dword(bp->pdev,
13276 PCICFG_ME_REGISTER, &pci_cfg_dword);
13277 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13278 ME_REG_ABS_PF_NUM_SHIFT);
13279 }
13280 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13281
13282
13283 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13284 PCICFG_VENDOR_ID_OFFSET);
13285
13286
13287 pdev->needs_freset = 1;
13288
13289
13290 rc = pci_enable_pcie_error_reporting(pdev);
13291 if (!rc)
13292 bp->flags |= AER_ENABLED;
13293 else
13294 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13295
13296
13297
13298
13299
13300 if (IS_PF(bp)) {
13301 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13302 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13303 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13304 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13305
13306 if (chip_is_e1x) {
13307 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13308 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13309 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13310 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13311 }
13312
13313
13314
13315
13316
13317 if (!chip_is_e1x)
13318 REG_WR(bp,
13319 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13320 }
13321
13322 dev->watchdog_timeo = TX_TIMEOUT;
13323
13324 dev->netdev_ops = &bnx2x_netdev_ops;
13325 bnx2x_set_ethtool_ops(bp, dev);
13326
13327 dev->priv_flags |= IFF_UNICAST_FLT;
13328
13329 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13330 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13331 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13332 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13333 if (!chip_is_e1x) {
13334 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13335 NETIF_F_GSO_IPXIP4 |
13336 NETIF_F_GSO_UDP_TUNNEL |
13337 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13338 NETIF_F_GSO_PARTIAL;
13339
13340 dev->hw_enc_features =
13341 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13342 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13343 NETIF_F_GSO_IPXIP4 |
13344 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13345 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13346 NETIF_F_GSO_PARTIAL;
13347
13348 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13349 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13350 }
13351
13352 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13353 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13354
13355 if (IS_PF(bp)) {
13356 if (chip_is_e1x)
13357 bp->accept_any_vlan = true;
13358 else
13359 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13360 }
13361
13362
13363
13364
13365 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13366 dev->features |= NETIF_F_HIGHDMA;
13367 if (dev->features & NETIF_F_LRO)
13368 dev->features &= ~NETIF_F_GRO_HW;
13369
13370
13371 dev->hw_features |= NETIF_F_LOOPBACK;
13372
13373 #ifdef BCM_DCBNL
13374 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13375 #endif
13376
13377
13378 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13379 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13380
13381
13382 bp->mdio.prtad = MDIO_PRTAD_NONE;
13383 bp->mdio.mmds = 0;
13384 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13385 bp->mdio.dev = dev;
13386 bp->mdio.mdio_read = bnx2x_mdio_read;
13387 bp->mdio.mdio_write = bnx2x_mdio_write;
13388
13389 return 0;
13390
13391 err_out_release:
13392 if (atomic_read(&pdev->enable_cnt) == 1)
13393 pci_release_regions(pdev);
13394
13395 err_out_disable:
13396 pci_disable_device(pdev);
13397
13398 err_out:
13399 return rc;
13400 }
13401
13402 static int bnx2x_check_firmware(struct bnx2x *bp)
13403 {
13404 const struct firmware *firmware = bp->firmware;
13405 struct bnx2x_fw_file_hdr *fw_hdr;
13406 struct bnx2x_fw_file_section *sections;
13407 u32 offset, len, num_ops;
13408 __be16 *ops_offsets;
13409 int i;
13410 const u8 *fw_ver;
13411
13412 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13413 BNX2X_ERR("Wrong FW size\n");
13414 return -EINVAL;
13415 }
13416
13417 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13418 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13419
13420
13421
13422 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13423 offset = be32_to_cpu(sections[i].offset);
13424 len = be32_to_cpu(sections[i].len);
13425 if (offset + len > firmware->size) {
13426 BNX2X_ERR("Section %d length is out of bounds\n", i);
13427 return -EINVAL;
13428 }
13429 }
13430
13431
13432 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13433 ops_offsets = (__force __be16 *)(firmware->data + offset);
13434 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13435
13436 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13437 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13438 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13439 return -EINVAL;
13440 }
13441 }
13442
13443
13444 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13445 fw_ver = firmware->data + offset;
13446 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13447 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13448 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13449 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13450 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13451 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13452 BCM_5710_FW_MAJOR_VERSION,
13453 BCM_5710_FW_MINOR_VERSION,
13454 BCM_5710_FW_REVISION_VERSION,
13455 BCM_5710_FW_ENGINEERING_VERSION);
13456 return -EINVAL;
13457 }
13458
13459 return 0;
13460 }
13461
13462 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13463 {
13464 const __be32 *source = (const __be32 *)_source;
13465 u32 *target = (u32 *)_target;
13466 u32 i;
13467
13468 for (i = 0; i < n/4; i++)
13469 target[i] = be32_to_cpu(source[i]);
13470 }
13471
13472
13473
13474
13475
13476 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13477 {
13478 const __be32 *source = (const __be32 *)_source;
13479 struct raw_op *target = (struct raw_op *)_target;
13480 u32 i, j, tmp;
13481
13482 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13483 tmp = be32_to_cpu(source[j]);
13484 target[i].op = (tmp >> 24) & 0xff;
13485 target[i].offset = tmp & 0xffffff;
13486 target[i].raw_data = be32_to_cpu(source[j + 1]);
13487 }
13488 }
13489
13490
13491
13492
13493 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13494 {
13495 const __be32 *source = (const __be32 *)_source;
13496 struct iro *target = (struct iro *)_target;
13497 u32 i, j, tmp;
13498
13499 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13500 target[i].base = be32_to_cpu(source[j]);
13501 j++;
13502 tmp = be32_to_cpu(source[j]);
13503 target[i].m1 = (tmp >> 16) & 0xffff;
13504 target[i].m2 = tmp & 0xffff;
13505 j++;
13506 tmp = be32_to_cpu(source[j]);
13507 target[i].m3 = (tmp >> 16) & 0xffff;
13508 target[i].size = tmp & 0xffff;
13509 j++;
13510 }
13511 }
13512
13513 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13514 {
13515 const __be16 *source = (const __be16 *)_source;
13516 u16 *target = (u16 *)_target;
13517 u32 i;
13518
13519 for (i = 0; i < n/2; i++)
13520 target[i] = be16_to_cpu(source[i]);
13521 }
13522
13523 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13524 do { \
13525 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13526 bp->arr = kmalloc(len, GFP_KERNEL); \
13527 if (!bp->arr) \
13528 goto lbl; \
13529 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13530 (u8 *)bp->arr, len); \
13531 } while (0)
13532
13533 static int bnx2x_init_firmware(struct bnx2x *bp)
13534 {
13535 const char *fw_file_name;
13536 struct bnx2x_fw_file_hdr *fw_hdr;
13537 int rc;
13538
13539 if (bp->firmware)
13540 return 0;
13541
13542 if (CHIP_IS_E1(bp))
13543 fw_file_name = FW_FILE_NAME_E1;
13544 else if (CHIP_IS_E1H(bp))
13545 fw_file_name = FW_FILE_NAME_E1H;
13546 else if (!CHIP_IS_E1x(bp))
13547 fw_file_name = FW_FILE_NAME_E2;
13548 else {
13549 BNX2X_ERR("Unsupported chip revision\n");
13550 return -EINVAL;
13551 }
13552 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13553
13554 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13555 if (rc) {
13556 BNX2X_ERR("Can't load firmware file %s\n",
13557 fw_file_name);
13558 goto request_firmware_exit;
13559 }
13560
13561 rc = bnx2x_check_firmware(bp);
13562 if (rc) {
13563 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13564 goto request_firmware_exit;
13565 }
13566
13567 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13568
13569
13570
13571 rc = -ENOMEM;
13572 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13573
13574
13575 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13576
13577
13578 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13579 be16_to_cpu_n);
13580
13581
13582 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13583 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13584 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13585 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13586 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13587 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13588 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13589 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13590 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13591 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13592 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13593 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13594 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13595 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13596 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13597 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13598
13599 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13600
13601 return 0;
13602
13603 iro_alloc_err:
13604 kfree(bp->init_ops_offsets);
13605 init_offsets_alloc_err:
13606 kfree(bp->init_ops);
13607 init_ops_alloc_err:
13608 kfree(bp->init_data);
13609 request_firmware_exit:
13610 release_firmware(bp->firmware);
13611 bp->firmware = NULL;
13612
13613 return rc;
13614 }
13615
13616 static void bnx2x_release_firmware(struct bnx2x *bp)
13617 {
13618 kfree(bp->init_ops_offsets);
13619 kfree(bp->init_ops);
13620 kfree(bp->init_data);
13621 release_firmware(bp->firmware);
13622 bp->firmware = NULL;
13623 }
13624
13625 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13626 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13627 .init_hw_cmn = bnx2x_init_hw_common,
13628 .init_hw_port = bnx2x_init_hw_port,
13629 .init_hw_func = bnx2x_init_hw_func,
13630
13631 .reset_hw_cmn = bnx2x_reset_common,
13632 .reset_hw_port = bnx2x_reset_port,
13633 .reset_hw_func = bnx2x_reset_func,
13634
13635 .gunzip_init = bnx2x_gunzip_init,
13636 .gunzip_end = bnx2x_gunzip_end,
13637
13638 .init_fw = bnx2x_init_firmware,
13639 .release_fw = bnx2x_release_firmware,
13640 };
13641
13642 void bnx2x__init_func_obj(struct bnx2x *bp)
13643 {
13644
13645 bnx2x_setup_dmae(bp);
13646
13647 bnx2x_init_func_obj(bp, &bp->func_obj,
13648 bnx2x_sp(bp, func_rdata),
13649 bnx2x_sp_mapping(bp, func_rdata),
13650 bnx2x_sp(bp, func_afex_rdata),
13651 bnx2x_sp_mapping(bp, func_afex_rdata),
13652 &bnx2x_func_sp_drv);
13653 }
13654
13655
13656 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13657 {
13658 int cid_count = BNX2X_L2_MAX_CID(bp);
13659
13660 if (IS_SRIOV(bp))
13661 cid_count += BNX2X_VF_CIDS;
13662
13663 if (CNIC_SUPPORT(bp))
13664 cid_count += CNIC_CID_MAX;
13665
13666 return roundup(cid_count, QM_CID_ROUND);
13667 }
13668
13669
13670
13671
13672
13673
13674
13675 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13676 {
13677 int index;
13678 u16 control = 0;
13679
13680
13681
13682
13683
13684 if (!pdev->msix_cap) {
13685 dev_info(&pdev->dev, "no msix capability found\n");
13686 return 1 + cnic_cnt;
13687 }
13688 dev_info(&pdev->dev, "msix capability found\n");
13689
13690
13691
13692
13693
13694
13695
13696
13697 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13698
13699 index = control & PCI_MSIX_FLAGS_QSIZE;
13700
13701 return index;
13702 }
13703
13704 static int set_max_cos_est(int chip_id)
13705 {
13706 switch (chip_id) {
13707 case BCM57710:
13708 case BCM57711:
13709 case BCM57711E:
13710 return BNX2X_MULTI_TX_COS_E1X;
13711 case BCM57712:
13712 case BCM57712_MF:
13713 return BNX2X_MULTI_TX_COS_E2_E3A0;
13714 case BCM57800:
13715 case BCM57800_MF:
13716 case BCM57810:
13717 case BCM57810_MF:
13718 case BCM57840_4_10:
13719 case BCM57840_2_20:
13720 case BCM57840_O:
13721 case BCM57840_MFO:
13722 case BCM57840_MF:
13723 case BCM57811:
13724 case BCM57811_MF:
13725 return BNX2X_MULTI_TX_COS_E3B0;
13726 case BCM57712_VF:
13727 case BCM57800_VF:
13728 case BCM57810_VF:
13729 case BCM57840_VF:
13730 case BCM57811_VF:
13731 return 1;
13732 default:
13733 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13734 return -ENODEV;
13735 }
13736 }
13737
13738 static int set_is_vf(int chip_id)
13739 {
13740 switch (chip_id) {
13741 case BCM57712_VF:
13742 case BCM57800_VF:
13743 case BCM57810_VF:
13744 case BCM57840_VF:
13745 case BCM57811_VF:
13746 return true;
13747 default:
13748 return false;
13749 }
13750 }
13751
13752
13753 #define tsgen_ctrl 0x0
13754 #define tsgen_freecount 0x10
13755 #define tsgen_synctime_t0 0x20
13756 #define tsgen_offset_t0 0x28
13757 #define tsgen_drift_t0 0x30
13758 #define tsgen_synctime_t1 0x58
13759 #define tsgen_offset_t1 0x60
13760 #define tsgen_drift_t1 0x68
13761
13762
13763 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13764 int best_val, int best_period)
13765 {
13766 struct bnx2x_func_state_params func_params = {NULL};
13767 struct bnx2x_func_set_timesync_params *set_timesync_params =
13768 &func_params.params.set_timesync;
13769
13770
13771 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13772 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13773
13774 func_params.f_obj = &bp->func_obj;
13775 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13776
13777
13778 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13779 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13780 set_timesync_params->add_sub_drift_adjust_value =
13781 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13782 set_timesync_params->drift_adjust_value = best_val;
13783 set_timesync_params->drift_adjust_period = best_period;
13784
13785 return bnx2x_func_state_change(bp, &func_params);
13786 }
13787
13788 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13789 {
13790 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13791 int rc;
13792 int drift_dir = 1;
13793 int val, period, period1, period2, dif, dif1, dif2;
13794 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13795
13796 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13797
13798 if (!netif_running(bp->dev)) {
13799 DP(BNX2X_MSG_PTP,
13800 "PTP adjfreq called while the interface is down\n");
13801 return -ENETDOWN;
13802 }
13803
13804 if (ppb < 0) {
13805 ppb = -ppb;
13806 drift_dir = 0;
13807 }
13808
13809 if (ppb == 0) {
13810 best_val = 1;
13811 best_period = 0x1FFFFFF;
13812 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13813 best_val = 31;
13814 best_period = 1;
13815 } else {
13816
13817
13818
13819 for (val = 0; val <= 31; val++) {
13820 if ((val & 0x7) == 0)
13821 continue;
13822 period1 = val * 1000000 / ppb;
13823 period2 = period1 + 1;
13824 if (period1 != 0)
13825 dif1 = ppb - (val * 1000000 / period1);
13826 else
13827 dif1 = BNX2X_MAX_PHC_DRIFT;
13828 if (dif1 < 0)
13829 dif1 = -dif1;
13830 dif2 = ppb - (val * 1000000 / period2);
13831 if (dif2 < 0)
13832 dif2 = -dif2;
13833 dif = (dif1 < dif2) ? dif1 : dif2;
13834 period = (dif1 < dif2) ? period1 : period2;
13835 if (dif < best_dif) {
13836 best_dif = dif;
13837 best_val = val;
13838 best_period = period;
13839 }
13840 }
13841 }
13842
13843 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13844 best_period);
13845 if (rc) {
13846 BNX2X_ERR("Failed to set drift\n");
13847 return -EFAULT;
13848 }
13849
13850 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13851 best_period);
13852
13853 return 0;
13854 }
13855
13856 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13857 {
13858 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13859
13860 if (!netif_running(bp->dev)) {
13861 DP(BNX2X_MSG_PTP,
13862 "PTP adjtime called while the interface is down\n");
13863 return -ENETDOWN;
13864 }
13865
13866 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13867
13868 timecounter_adjtime(&bp->timecounter, delta);
13869
13870 return 0;
13871 }
13872
13873 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13874 {
13875 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13876 u64 ns;
13877
13878 if (!netif_running(bp->dev)) {
13879 DP(BNX2X_MSG_PTP,
13880 "PTP gettime called while the interface is down\n");
13881 return -ENETDOWN;
13882 }
13883
13884 ns = timecounter_read(&bp->timecounter);
13885
13886 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13887
13888 *ts = ns_to_timespec64(ns);
13889
13890 return 0;
13891 }
13892
13893 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13894 const struct timespec64 *ts)
13895 {
13896 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13897 u64 ns;
13898
13899 if (!netif_running(bp->dev)) {
13900 DP(BNX2X_MSG_PTP,
13901 "PTP settime called while the interface is down\n");
13902 return -ENETDOWN;
13903 }
13904
13905 ns = timespec64_to_ns(ts);
13906
13907 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13908
13909
13910 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13911
13912 return 0;
13913 }
13914
13915
13916 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13917 struct ptp_clock_request *rq, int on)
13918 {
13919 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13920
13921 BNX2X_ERR("PHC ancillary features are not supported\n");
13922 return -ENOTSUPP;
13923 }
13924
13925 void bnx2x_register_phc(struct bnx2x *bp)
13926 {
13927
13928 bp->ptp_clock_info.owner = THIS_MODULE;
13929 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13930 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13931 bp->ptp_clock_info.n_alarm = 0;
13932 bp->ptp_clock_info.n_ext_ts = 0;
13933 bp->ptp_clock_info.n_per_out = 0;
13934 bp->ptp_clock_info.pps = 0;
13935 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13936 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13937 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13938 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13939 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13940
13941 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13942 if (IS_ERR(bp->ptp_clock)) {
13943 bp->ptp_clock = NULL;
13944 BNX2X_ERR("PTP clock registration failed\n");
13945 }
13946 }
13947
13948 static int bnx2x_init_one(struct pci_dev *pdev,
13949 const struct pci_device_id *ent)
13950 {
13951 struct net_device *dev = NULL;
13952 struct bnx2x *bp;
13953 int rc, max_non_def_sbs;
13954 int rx_count, tx_count, rss_count, doorbell_size;
13955 int max_cos_est;
13956 bool is_vf;
13957 int cnic_cnt;
13958
13959
13960
13961
13962 if (is_kdump_kernel()) {
13963 ktime_t now = ktime_get_boottime();
13964 ktime_t fw_ready_time = ktime_set(5, 0);
13965
13966 if (ktime_before(now, fw_ready_time))
13967 msleep(ktime_ms_delta(fw_ready_time, now));
13968 }
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978 max_cos_est = set_max_cos_est(ent->driver_data);
13979 if (max_cos_est < 0)
13980 return max_cos_est;
13981 is_vf = set_is_vf(ent->driver_data);
13982 cnic_cnt = is_vf ? 0 : 1;
13983
13984 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13985
13986
13987 max_non_def_sbs += is_vf ? 1 : 0;
13988
13989
13990 rss_count = max_non_def_sbs - cnic_cnt;
13991
13992 if (rss_count < 1)
13993 return -EINVAL;
13994
13995
13996 rx_count = rss_count + cnic_cnt;
13997
13998
13999
14000
14001 tx_count = rss_count * max_cos_est + cnic_cnt;
14002
14003
14004 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
14005 if (!dev)
14006 return -ENOMEM;
14007
14008 bp = netdev_priv(dev);
14009
14010 bp->flags = 0;
14011 if (is_vf)
14012 bp->flags |= IS_VF_FLAG;
14013
14014 bp->igu_sb_cnt = max_non_def_sbs;
14015 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
14016 bp->msg_enable = debug;
14017 bp->cnic_support = cnic_cnt;
14018 bp->cnic_probe = bnx2x_cnic_probe;
14019
14020 pci_set_drvdata(pdev, dev);
14021
14022 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
14023 if (rc < 0) {
14024 free_netdev(dev);
14025 return rc;
14026 }
14027
14028 BNX2X_DEV_INFO("This is a %s function\n",
14029 IS_PF(bp) ? "physical" : "virtual");
14030 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
14031 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
14032 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
14033 tx_count, rx_count);
14034
14035 rc = bnx2x_init_bp(bp);
14036 if (rc)
14037 goto init_one_exit;
14038
14039
14040
14041
14042
14043 if (IS_VF(bp)) {
14044 bp->doorbells = bnx2x_vf_doorbells(bp);
14045 rc = bnx2x_vf_pci_alloc(bp);
14046 if (rc)
14047 goto init_one_freemem;
14048 } else {
14049 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
14050 if (doorbell_size > pci_resource_len(pdev, 2)) {
14051 dev_err(&bp->pdev->dev,
14052 "Cannot map doorbells, bar size too small, aborting\n");
14053 rc = -ENOMEM;
14054 goto init_one_freemem;
14055 }
14056 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
14057 doorbell_size);
14058 }
14059 if (!bp->doorbells) {
14060 dev_err(&bp->pdev->dev,
14061 "Cannot map doorbell space, aborting\n");
14062 rc = -ENOMEM;
14063 goto init_one_freemem;
14064 }
14065
14066 if (IS_VF(bp)) {
14067 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
14068 if (rc)
14069 goto init_one_freemem;
14070
14071 #ifdef CONFIG_BNX2X_SRIOV
14072
14073 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
14074 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14075 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
14076 }
14077 #endif
14078 }
14079
14080
14081 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
14082 if (rc)
14083 goto init_one_freemem;
14084
14085
14086 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
14087 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
14088
14089
14090 if (CHIP_IS_E1x(bp))
14091 bp->flags |= NO_FCOE_FLAG;
14092
14093
14094 bnx2x_set_num_queues(bp);
14095
14096
14097
14098
14099 rc = bnx2x_set_int_mode(bp);
14100 if (rc) {
14101 dev_err(&pdev->dev, "Cannot set interrupts\n");
14102 goto init_one_freemem;
14103 }
14104 BNX2X_DEV_INFO("set interrupts successfully\n");
14105
14106
14107 rc = register_netdev(dev);
14108 if (rc) {
14109 dev_err(&pdev->dev, "Cannot register net device\n");
14110 goto init_one_freemem;
14111 }
14112 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
14113
14114 if (!NO_FCOE(bp)) {
14115
14116 rtnl_lock();
14117 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14118 rtnl_unlock();
14119 }
14120 BNX2X_DEV_INFO(
14121 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14122 board_info[ent->driver_data].name,
14123 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14124 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14125 pcie_print_link_status(bp->pdev);
14126
14127 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14128 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14129
14130 return 0;
14131
14132 init_one_freemem:
14133 bnx2x_free_mem_bp(bp);
14134
14135 init_one_exit:
14136 bnx2x_disable_pcie_error_reporting(bp);
14137
14138 if (bp->regview)
14139 iounmap(bp->regview);
14140
14141 if (IS_PF(bp) && bp->doorbells)
14142 iounmap(bp->doorbells);
14143
14144 free_netdev(dev);
14145
14146 if (atomic_read(&pdev->enable_cnt) == 1)
14147 pci_release_regions(pdev);
14148
14149 pci_disable_device(pdev);
14150
14151 return rc;
14152 }
14153
14154 static void __bnx2x_remove(struct pci_dev *pdev,
14155 struct net_device *dev,
14156 struct bnx2x *bp,
14157 bool remove_netdev)
14158 {
14159
14160 if (!NO_FCOE(bp)) {
14161 rtnl_lock();
14162 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14163 rtnl_unlock();
14164 }
14165
14166 #ifdef BCM_DCBNL
14167
14168 bnx2x_dcbnl_update_applist(bp, true);
14169 #endif
14170
14171 if (IS_PF(bp) &&
14172 !BP_NOMCP(bp) &&
14173 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14174 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14175
14176
14177 if (remove_netdev) {
14178 unregister_netdev(dev);
14179 } else {
14180 rtnl_lock();
14181 dev_close(dev);
14182 rtnl_unlock();
14183 }
14184
14185 bnx2x_iov_remove_one(bp);
14186
14187
14188 if (IS_PF(bp)) {
14189 bnx2x_set_power_state(bp, PCI_D0);
14190 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14191
14192
14193
14194
14195 bnx2x_reset_endianity(bp);
14196 }
14197
14198
14199 bnx2x_disable_msi(bp);
14200
14201
14202 if (IS_PF(bp))
14203 bnx2x_set_power_state(bp, PCI_D3hot);
14204
14205
14206 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14207
14208
14209 if (IS_VF(bp))
14210 bnx2x_vfpf_release(bp);
14211
14212
14213 if (system_state == SYSTEM_POWER_OFF) {
14214 pci_wake_from_d3(pdev, bp->wol);
14215 pci_set_power_state(pdev, PCI_D3hot);
14216 }
14217
14218 bnx2x_disable_pcie_error_reporting(bp);
14219 if (remove_netdev) {
14220 if (bp->regview)
14221 iounmap(bp->regview);
14222
14223
14224
14225
14226 if (IS_PF(bp)) {
14227 if (bp->doorbells)
14228 iounmap(bp->doorbells);
14229
14230 bnx2x_release_firmware(bp);
14231 } else {
14232 bnx2x_vf_pci_dealloc(bp);
14233 }
14234 bnx2x_free_mem_bp(bp);
14235
14236 free_netdev(dev);
14237
14238 if (atomic_read(&pdev->enable_cnt) == 1)
14239 pci_release_regions(pdev);
14240
14241 pci_disable_device(pdev);
14242 }
14243 }
14244
14245 static void bnx2x_remove_one(struct pci_dev *pdev)
14246 {
14247 struct net_device *dev = pci_get_drvdata(pdev);
14248 struct bnx2x *bp;
14249
14250 if (!dev) {
14251 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14252 return;
14253 }
14254 bp = netdev_priv(dev);
14255
14256 __bnx2x_remove(pdev, dev, bp, true);
14257 }
14258
14259 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14260 {
14261 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14262
14263 bp->rx_mode = BNX2X_RX_MODE_NONE;
14264
14265 if (CNIC_LOADED(bp))
14266 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14267
14268
14269 bnx2x_tx_disable(bp);
14270
14271 bnx2x_del_all_napi(bp);
14272 if (CNIC_LOADED(bp))
14273 bnx2x_del_all_napi_cnic(bp);
14274 netdev_reset_tc(bp->dev);
14275
14276 del_timer_sync(&bp->timer);
14277 cancel_delayed_work_sync(&bp->sp_task);
14278 cancel_delayed_work_sync(&bp->period_task);
14279
14280 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14281 bp->stats_state = STATS_STATE_DISABLED;
14282 up(&bp->stats_lock);
14283 }
14284
14285 bnx2x_save_statistics(bp);
14286
14287 netif_carrier_off(bp->dev);
14288
14289 return 0;
14290 }
14291
14292
14293
14294
14295
14296
14297
14298
14299
14300 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14301 pci_channel_state_t state)
14302 {
14303 struct net_device *dev = pci_get_drvdata(pdev);
14304 struct bnx2x *bp = netdev_priv(dev);
14305
14306 rtnl_lock();
14307
14308 BNX2X_ERR("IO error detected\n");
14309
14310 netif_device_detach(dev);
14311
14312 if (state == pci_channel_io_perm_failure) {
14313 rtnl_unlock();
14314 return PCI_ERS_RESULT_DISCONNECT;
14315 }
14316
14317 if (netif_running(dev))
14318 bnx2x_eeh_nic_unload(bp);
14319
14320 bnx2x_prev_path_mark_eeh(bp);
14321
14322 pci_disable_device(pdev);
14323
14324 rtnl_unlock();
14325
14326
14327 return PCI_ERS_RESULT_NEED_RESET;
14328 }
14329
14330
14331
14332
14333
14334
14335
14336 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14337 {
14338 struct net_device *dev = pci_get_drvdata(pdev);
14339 struct bnx2x *bp = netdev_priv(dev);
14340 int i;
14341
14342 rtnl_lock();
14343 BNX2X_ERR("IO slot reset initializing...\n");
14344 if (pci_enable_device(pdev)) {
14345 dev_err(&pdev->dev,
14346 "Cannot re-enable PCI device after reset\n");
14347 rtnl_unlock();
14348 return PCI_ERS_RESULT_DISCONNECT;
14349 }
14350
14351 pci_set_master(pdev);
14352 pci_restore_state(pdev);
14353 pci_save_state(pdev);
14354
14355 if (netif_running(dev))
14356 bnx2x_set_power_state(bp, PCI_D0);
14357
14358 if (netif_running(dev)) {
14359 BNX2X_ERR("IO slot reset --> driver unload\n");
14360
14361
14362 if (bnx2x_init_shmem(bp)) {
14363 rtnl_unlock();
14364 return PCI_ERS_RESULT_DISCONNECT;
14365 }
14366
14367 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14368 u32 v;
14369
14370 v = SHMEM2_RD(bp,
14371 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14372 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14373 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14374 }
14375 bnx2x_drain_tx_queues(bp);
14376 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14377 bnx2x_netif_stop(bp, 1);
14378 bnx2x_free_irq(bp);
14379
14380
14381 bnx2x_send_unload_done(bp, true);
14382
14383 bp->sp_state = 0;
14384 bp->port.pmf = 0;
14385
14386 bnx2x_prev_unload(bp);
14387
14388
14389
14390
14391 bnx2x_squeeze_objects(bp);
14392 bnx2x_free_skbs(bp);
14393 for_each_rx_queue(bp, i)
14394 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14395 bnx2x_free_fp_mem(bp);
14396 bnx2x_free_mem(bp);
14397
14398 bp->state = BNX2X_STATE_CLOSED;
14399 }
14400
14401 rtnl_unlock();
14402
14403 return PCI_ERS_RESULT_RECOVERED;
14404 }
14405
14406
14407
14408
14409
14410
14411
14412
14413 static void bnx2x_io_resume(struct pci_dev *pdev)
14414 {
14415 struct net_device *dev = pci_get_drvdata(pdev);
14416 struct bnx2x *bp = netdev_priv(dev);
14417
14418 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14419 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14420 return;
14421 }
14422
14423 rtnl_lock();
14424
14425 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14426 DRV_MSG_SEQ_NUMBER_MASK;
14427
14428 if (netif_running(dev))
14429 bnx2x_nic_load(bp, LOAD_NORMAL);
14430
14431 netif_device_attach(dev);
14432
14433 rtnl_unlock();
14434 }
14435
14436 static const struct pci_error_handlers bnx2x_err_handler = {
14437 .error_detected = bnx2x_io_error_detected,
14438 .slot_reset = bnx2x_io_slot_reset,
14439 .resume = bnx2x_io_resume,
14440 };
14441
14442 static void bnx2x_shutdown(struct pci_dev *pdev)
14443 {
14444 struct net_device *dev = pci_get_drvdata(pdev);
14445 struct bnx2x *bp;
14446
14447 if (!dev)
14448 return;
14449
14450 bp = netdev_priv(dev);
14451 if (!bp)
14452 return;
14453
14454 rtnl_lock();
14455 netif_device_detach(dev);
14456 rtnl_unlock();
14457
14458
14459
14460
14461
14462 __bnx2x_remove(pdev, dev, bp, false);
14463 }
14464
14465 static struct pci_driver bnx2x_pci_driver = {
14466 .name = DRV_MODULE_NAME,
14467 .id_table = bnx2x_pci_tbl,
14468 .probe = bnx2x_init_one,
14469 .remove = bnx2x_remove_one,
14470 .suspend = bnx2x_suspend,
14471 .resume = bnx2x_resume,
14472 .err_handler = &bnx2x_err_handler,
14473 #ifdef CONFIG_BNX2X_SRIOV
14474 .sriov_configure = bnx2x_sriov_configure,
14475 #endif
14476 .shutdown = bnx2x_shutdown,
14477 };
14478
14479 static int __init bnx2x_init(void)
14480 {
14481 int ret;
14482
14483 pr_info("%s", version);
14484
14485 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14486 if (bnx2x_wq == NULL) {
14487 pr_err("Cannot create workqueue\n");
14488 return -ENOMEM;
14489 }
14490 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14491 if (!bnx2x_iov_wq) {
14492 pr_err("Cannot create iov workqueue\n");
14493 destroy_workqueue(bnx2x_wq);
14494 return -ENOMEM;
14495 }
14496
14497 ret = pci_register_driver(&bnx2x_pci_driver);
14498 if (ret) {
14499 pr_err("Cannot register driver\n");
14500 destroy_workqueue(bnx2x_wq);
14501 destroy_workqueue(bnx2x_iov_wq);
14502 }
14503 return ret;
14504 }
14505
14506 static void __exit bnx2x_cleanup(void)
14507 {
14508 struct list_head *pos, *q;
14509
14510 pci_unregister_driver(&bnx2x_pci_driver);
14511
14512 destroy_workqueue(bnx2x_wq);
14513 destroy_workqueue(bnx2x_iov_wq);
14514
14515
14516 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14517 struct bnx2x_prev_path_list *tmp =
14518 list_entry(pos, struct bnx2x_prev_path_list, list);
14519 list_del(pos);
14520 kfree(tmp);
14521 }
14522 }
14523
14524 void bnx2x_notify_link_changed(struct bnx2x *bp)
14525 {
14526 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14527 }
14528
14529 module_init(bnx2x_init);
14530 module_exit(bnx2x_cleanup);
14531
14532
14533
14534
14535
14536
14537
14538
14539
14540
14541 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14542 {
14543 unsigned long ramrod_flags = 0;
14544
14545 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14546 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14547 &bp->iscsi_l2_mac_obj, true,
14548 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14549 }
14550
14551
14552 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14553 {
14554 struct eth_spe *spe;
14555 int cxt_index, cxt_offset;
14556
14557 #ifdef BNX2X_STOP_ON_ERROR
14558 if (unlikely(bp->panic))
14559 return;
14560 #endif
14561
14562 spin_lock_bh(&bp->spq_lock);
14563 BUG_ON(bp->cnic_spq_pending < count);
14564 bp->cnic_spq_pending -= count;
14565
14566 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14567 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14568 & SPE_HDR_CONN_TYPE) >>
14569 SPE_HDR_CONN_TYPE_SHIFT;
14570 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14571 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14572
14573
14574
14575
14576 if (type == ETH_CONNECTION_TYPE) {
14577 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14578 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14579 ILT_PAGE_CIDS;
14580 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14581 (cxt_index * ILT_PAGE_CIDS);
14582 bnx2x_set_ctx_validation(bp,
14583 &bp->context[cxt_index].
14584 vcxt[cxt_offset].eth,
14585 BNX2X_ISCSI_ETH_CID(bp));
14586 }
14587 }
14588
14589
14590
14591
14592
14593
14594
14595 if (type == ETH_CONNECTION_TYPE) {
14596 if (!atomic_read(&bp->cq_spq_left))
14597 break;
14598 else
14599 atomic_dec(&bp->cq_spq_left);
14600 } else if (type == NONE_CONNECTION_TYPE) {
14601 if (!atomic_read(&bp->eq_spq_left))
14602 break;
14603 else
14604 atomic_dec(&bp->eq_spq_left);
14605 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14606 (type == FCOE_CONNECTION_TYPE)) {
14607 if (bp->cnic_spq_pending >=
14608 bp->cnic_eth_dev.max_kwqe_pending)
14609 break;
14610 else
14611 bp->cnic_spq_pending++;
14612 } else {
14613 BNX2X_ERR("Unknown SPE type: %d\n", type);
14614 bnx2x_panic();
14615 break;
14616 }
14617
14618 spe = bnx2x_sp_get_next(bp);
14619 *spe = *bp->cnic_kwq_cons;
14620
14621 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14622 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14623
14624 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14625 bp->cnic_kwq_cons = bp->cnic_kwq;
14626 else
14627 bp->cnic_kwq_cons++;
14628 }
14629 bnx2x_sp_prod_update(bp);
14630 spin_unlock_bh(&bp->spq_lock);
14631 }
14632
14633 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14634 struct kwqe_16 *kwqes[], u32 count)
14635 {
14636 struct bnx2x *bp = netdev_priv(dev);
14637 int i;
14638
14639 #ifdef BNX2X_STOP_ON_ERROR
14640 if (unlikely(bp->panic)) {
14641 BNX2X_ERR("Can't post to SP queue while panic\n");
14642 return -EIO;
14643 }
14644 #endif
14645
14646 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14647 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14648 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14649 return -EAGAIN;
14650 }
14651
14652 spin_lock_bh(&bp->spq_lock);
14653
14654 for (i = 0; i < count; i++) {
14655 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14656
14657 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14658 break;
14659
14660 *bp->cnic_kwq_prod = *spe;
14661
14662 bp->cnic_kwq_pending++;
14663
14664 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14665 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14666 spe->data.update_data_addr.hi,
14667 spe->data.update_data_addr.lo,
14668 bp->cnic_kwq_pending);
14669
14670 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14671 bp->cnic_kwq_prod = bp->cnic_kwq;
14672 else
14673 bp->cnic_kwq_prod++;
14674 }
14675
14676 spin_unlock_bh(&bp->spq_lock);
14677
14678 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14679 bnx2x_cnic_sp_post(bp, 0);
14680
14681 return i;
14682 }
14683
14684 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14685 {
14686 struct cnic_ops *c_ops;
14687 int rc = 0;
14688
14689 mutex_lock(&bp->cnic_mutex);
14690 c_ops = rcu_dereference_protected(bp->cnic_ops,
14691 lockdep_is_held(&bp->cnic_mutex));
14692 if (c_ops)
14693 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14694 mutex_unlock(&bp->cnic_mutex);
14695
14696 return rc;
14697 }
14698
14699 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14700 {
14701 struct cnic_ops *c_ops;
14702 int rc = 0;
14703
14704 rcu_read_lock();
14705 c_ops = rcu_dereference(bp->cnic_ops);
14706 if (c_ops)
14707 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14708 rcu_read_unlock();
14709
14710 return rc;
14711 }
14712
14713
14714
14715
14716 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14717 {
14718 struct cnic_ctl_info ctl = {0};
14719
14720 ctl.cmd = cmd;
14721
14722 return bnx2x_cnic_ctl_send(bp, &ctl);
14723 }
14724
14725 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14726 {
14727 struct cnic_ctl_info ctl = {0};
14728
14729
14730 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14731 ctl.data.comp.cid = cid;
14732 ctl.data.comp.error = err;
14733
14734 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14735 bnx2x_cnic_sp_post(bp, 0);
14736 }
14737
14738
14739
14740
14741
14742
14743 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14744 {
14745 unsigned long accept_flags = 0, ramrod_flags = 0;
14746 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14747 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14748
14749 if (start) {
14750
14751
14752
14753
14754
14755
14756 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14757 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14758 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14759 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14760
14761
14762 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14763
14764 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14765 } else
14766
14767 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14768
14769 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14770 set_bit(sched_state, &bp->sp_state);
14771 else {
14772 __set_bit(RAMROD_RX, &ramrod_flags);
14773 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14774 ramrod_flags);
14775 }
14776 }
14777
14778 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14779 {
14780 struct bnx2x *bp = netdev_priv(dev);
14781 int rc = 0;
14782
14783 switch (ctl->cmd) {
14784 case DRV_CTL_CTXTBL_WR_CMD: {
14785 u32 index = ctl->data.io.offset;
14786 dma_addr_t addr = ctl->data.io.dma_addr;
14787
14788 bnx2x_ilt_wr(bp, index, addr);
14789 break;
14790 }
14791
14792 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14793 int count = ctl->data.credit.credit_count;
14794
14795 bnx2x_cnic_sp_post(bp, count);
14796 break;
14797 }
14798
14799
14800 case DRV_CTL_START_L2_CMD: {
14801 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14802 unsigned long sp_bits = 0;
14803
14804
14805 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14806 cp->iscsi_l2_client_id,
14807 cp->iscsi_l2_cid, BP_FUNC(bp),
14808 bnx2x_sp(bp, mac_rdata),
14809 bnx2x_sp_mapping(bp, mac_rdata),
14810 BNX2X_FILTER_MAC_PENDING,
14811 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14812 &bp->macs_pool);
14813
14814
14815 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14816 if (rc)
14817 break;
14818
14819 barrier();
14820
14821
14822
14823 netif_addr_lock_bh(dev);
14824 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14825 netif_addr_unlock_bh(dev);
14826
14827
14828 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14829 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14830
14831 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14832 BNX2X_ERR("rx_mode completion timed out!\n");
14833
14834 break;
14835 }
14836
14837
14838 case DRV_CTL_STOP_L2_CMD: {
14839 unsigned long sp_bits = 0;
14840
14841
14842 netif_addr_lock_bh(dev);
14843 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14844 netif_addr_unlock_bh(dev);
14845
14846
14847 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14848 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14849
14850 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14851 BNX2X_ERR("rx_mode completion timed out!\n");
14852
14853 barrier();
14854
14855
14856 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14857 BNX2X_ISCSI_ETH_MAC, true);
14858 break;
14859 }
14860 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14861 int count = ctl->data.credit.credit_count;
14862
14863 smp_mb__before_atomic();
14864 atomic_add(count, &bp->cq_spq_left);
14865 smp_mb__after_atomic();
14866 break;
14867 }
14868 case DRV_CTL_ULP_REGISTER_CMD: {
14869 int ulp_type = ctl->data.register_data.ulp_type;
14870
14871 if (CHIP_IS_E3(bp)) {
14872 int idx = BP_FW_MB_IDX(bp);
14873 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14874 int path = BP_PATH(bp);
14875 int port = BP_PORT(bp);
14876 int i;
14877 u32 scratch_offset;
14878 u32 *host_addr;
14879
14880
14881 if (ulp_type == CNIC_ULP_ISCSI)
14882 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14883 else if (ulp_type == CNIC_ULP_FCOE)
14884 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14885 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14886
14887 if ((ulp_type != CNIC_ULP_FCOE) ||
14888 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14889 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14890 break;
14891
14892
14893 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14894 if (!scratch_offset)
14895 break;
14896 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14897 fcoe_features[path][port]);
14898 host_addr = (u32 *) &(ctl->data.register_data.
14899 fcoe_features);
14900 for (i = 0; i < sizeof(struct fcoe_capabilities);
14901 i += 4)
14902 REG_WR(bp, scratch_offset + i,
14903 *(host_addr + i/4));
14904 }
14905 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14906 break;
14907 }
14908
14909 case DRV_CTL_ULP_UNREGISTER_CMD: {
14910 int ulp_type = ctl->data.ulp_type;
14911
14912 if (CHIP_IS_E3(bp)) {
14913 int idx = BP_FW_MB_IDX(bp);
14914 u32 cap;
14915
14916 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14917 if (ulp_type == CNIC_ULP_ISCSI)
14918 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14919 else if (ulp_type == CNIC_ULP_FCOE)
14920 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14921 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14922 }
14923 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14924 break;
14925 }
14926
14927 default:
14928 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14929 rc = -EINVAL;
14930 }
14931
14932
14933 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14934 switch (ctl->drv_state) {
14935 case DRV_NOP:
14936 break;
14937 case DRV_ACTIVE:
14938 bnx2x_set_os_driver_state(bp,
14939 OS_DRIVER_STATE_ACTIVE);
14940 break;
14941 case DRV_INACTIVE:
14942 bnx2x_set_os_driver_state(bp,
14943 OS_DRIVER_STATE_DISABLED);
14944 break;
14945 case DRV_UNLOADED:
14946 bnx2x_set_os_driver_state(bp,
14947 OS_DRIVER_STATE_NOT_LOADED);
14948 break;
14949 default:
14950 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14951 }
14952 }
14953
14954 return rc;
14955 }
14956
14957 static int bnx2x_get_fc_npiv(struct net_device *dev,
14958 struct cnic_fc_npiv_tbl *cnic_tbl)
14959 {
14960 struct bnx2x *bp = netdev_priv(dev);
14961 struct bdn_fc_npiv_tbl *tbl = NULL;
14962 u32 offset, entries;
14963 int rc = -EINVAL;
14964 int i;
14965
14966 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14967 goto out;
14968
14969 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14970
14971 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14972 if (!tbl) {
14973 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14974 goto out;
14975 }
14976
14977 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14978 if (!offset) {
14979 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14980 goto out;
14981 }
14982 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14983
14984
14985 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14986 BNX2X_ERR("Failed to read FC-NPIV table\n");
14987 goto out;
14988 }
14989
14990
14991
14992
14993 entries = tbl->fc_npiv_cfg.num_of_npiv;
14994 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14995 tbl->fc_npiv_cfg.num_of_npiv = entries;
14996
14997 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14998 DP(BNX2X_MSG_MCP,
14999 "No FC-NPIV table [valid, simply not present]\n");
15000 goto out;
15001 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
15002 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
15003 tbl->fc_npiv_cfg.num_of_npiv);
15004 goto out;
15005 } else {
15006 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
15007 tbl->fc_npiv_cfg.num_of_npiv);
15008 }
15009
15010
15011 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
15012 for (i = 0; i < cnic_tbl->count; i++) {
15013 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
15014 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
15015 }
15016
15017 rc = 0;
15018 out:
15019 kfree(tbl);
15020 return rc;
15021 }
15022
15023 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
15024 {
15025 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15026
15027 if (bp->flags & USING_MSIX_FLAG) {
15028 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
15029 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
15030 cp->irq_arr[0].vector = bp->msix_table[1].vector;
15031 } else {
15032 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
15033 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
15034 }
15035 if (!CHIP_IS_E1x(bp))
15036 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
15037 else
15038 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
15039
15040 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
15041 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
15042 cp->irq_arr[1].status_blk = bp->def_status_blk;
15043 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
15044 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
15045
15046 cp->num_irq = 2;
15047 }
15048
15049 void bnx2x_setup_cnic_info(struct bnx2x *bp)
15050 {
15051 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15052
15053 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15054 bnx2x_cid_ilt_lines(bp);
15055 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15056 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15057 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15058
15059 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
15060 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
15061 cp->iscsi_l2_cid);
15062
15063 if (NO_ISCSI_OOO(bp))
15064 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15065 }
15066
15067 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
15068 void *data)
15069 {
15070 struct bnx2x *bp = netdev_priv(dev);
15071 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15072 int rc;
15073
15074 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
15075
15076 if (ops == NULL) {
15077 BNX2X_ERR("NULL ops received\n");
15078 return -EINVAL;
15079 }
15080
15081 if (!CNIC_SUPPORT(bp)) {
15082 BNX2X_ERR("Can't register CNIC when not supported\n");
15083 return -EOPNOTSUPP;
15084 }
15085
15086 if (!CNIC_LOADED(bp)) {
15087 rc = bnx2x_load_cnic(bp);
15088 if (rc) {
15089 BNX2X_ERR("CNIC-related load failed\n");
15090 return rc;
15091 }
15092 }
15093
15094 bp->cnic_enabled = true;
15095
15096 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
15097 if (!bp->cnic_kwq)
15098 return -ENOMEM;
15099
15100 bp->cnic_kwq_cons = bp->cnic_kwq;
15101 bp->cnic_kwq_prod = bp->cnic_kwq;
15102 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
15103
15104 bp->cnic_spq_pending = 0;
15105 bp->cnic_kwq_pending = 0;
15106
15107 bp->cnic_data = data;
15108
15109 cp->num_irq = 0;
15110 cp->drv_state |= CNIC_DRV_STATE_REGD;
15111 cp->iro_arr = bp->iro_arr;
15112
15113 bnx2x_setup_cnic_irq_info(bp);
15114
15115 rcu_assign_pointer(bp->cnic_ops, ops);
15116
15117
15118 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15119
15120 return 0;
15121 }
15122
15123 static int bnx2x_unregister_cnic(struct net_device *dev)
15124 {
15125 struct bnx2x *bp = netdev_priv(dev);
15126 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15127
15128 mutex_lock(&bp->cnic_mutex);
15129 cp->drv_state = 0;
15130 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15131 mutex_unlock(&bp->cnic_mutex);
15132 synchronize_rcu();
15133 bp->cnic_enabled = false;
15134 kfree(bp->cnic_kwq);
15135 bp->cnic_kwq = NULL;
15136
15137 return 0;
15138 }
15139
15140 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15141 {
15142 struct bnx2x *bp = netdev_priv(dev);
15143 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15144
15145
15146
15147
15148
15149 if (NO_ISCSI(bp) && NO_FCOE(bp))
15150 return NULL;
15151
15152 cp->drv_owner = THIS_MODULE;
15153 cp->chip_id = CHIP_ID(bp);
15154 cp->pdev = bp->pdev;
15155 cp->io_base = bp->regview;
15156 cp->io_base2 = bp->doorbells;
15157 cp->max_kwqe_pending = 8;
15158 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15159 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15160 bnx2x_cid_ilt_lines(bp);
15161 cp->ctx_tbl_len = CNIC_ILT_LINES;
15162 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15163 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15164 cp->drv_ctl = bnx2x_drv_ctl;
15165 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15166 cp->drv_register_cnic = bnx2x_register_cnic;
15167 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15168 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15169 cp->iscsi_l2_client_id =
15170 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15171 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15172
15173 if (NO_ISCSI_OOO(bp))
15174 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15175
15176 if (NO_ISCSI(bp))
15177 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15178
15179 if (NO_FCOE(bp))
15180 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15181
15182 BNX2X_DEV_INFO(
15183 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15184 cp->ctx_blk_size,
15185 cp->ctx_tbl_offset,
15186 cp->ctx_tbl_len,
15187 cp->starting_cid);
15188 return cp;
15189 }
15190
15191 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15192 {
15193 struct bnx2x *bp = fp->bp;
15194 u32 offset = BAR_USTRORM_INTMEM;
15195
15196 if (IS_VF(bp))
15197 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15198 else if (!CHIP_IS_E1x(bp))
15199 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15200 else
15201 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15202
15203 return offset;
15204 }
15205
15206
15207
15208
15209
15210
15211 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15212 {
15213 u32 pretend_reg;
15214
15215 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15216 return -1;
15217
15218
15219 pretend_reg = bnx2x_get_pretend_reg(bp);
15220 REG_WR(bp, pretend_reg, pretend_func_val);
15221 REG_RD(bp, pretend_reg);
15222 return 0;
15223 }
15224
15225 static void bnx2x_ptp_task(struct work_struct *work)
15226 {
15227 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15228 int port = BP_PORT(bp);
15229 u32 val_seq;
15230 u64 timestamp, ns;
15231 struct skb_shared_hwtstamps shhwtstamps;
15232 bool bail = true;
15233 int i;
15234
15235
15236
15237
15238 for (i = 0; i < 10; i++) {
15239
15240 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15241 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15242 if (val_seq & 0x10000) {
15243 bail = false;
15244 break;
15245 }
15246 msleep(1 << i);
15247 }
15248
15249 if (!bail) {
15250
15251 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15252 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15253 timestamp <<= 32;
15254 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15255 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15256
15257 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15258 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15259 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15260
15261 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15262 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15263 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15264
15265 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15266 timestamp, ns);
15267 } else {
15268 DP(BNX2X_MSG_PTP,
15269 "Tx timestamp is not recorded (register read=%u)\n",
15270 val_seq);
15271 bp->eth_stats.ptp_skip_tx_ts++;
15272 }
15273
15274 dev_kfree_skb_any(bp->ptp_tx_skb);
15275 bp->ptp_tx_skb = NULL;
15276 }
15277
15278 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15279 {
15280 int port = BP_PORT(bp);
15281 u64 timestamp, ns;
15282
15283 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15284 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15285 timestamp <<= 32;
15286 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15287 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15288
15289
15290 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15291 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15292
15293 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15294
15295 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15296
15297 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15298 timestamp, ns);
15299 }
15300
15301
15302 static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15303 {
15304 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15305 int port = BP_PORT(bp);
15306 u32 wb_data[2];
15307 u64 phc_cycles;
15308
15309 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15310 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15311 phc_cycles = wb_data[1];
15312 phc_cycles = (phc_cycles << 32) + wb_data[0];
15313
15314 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15315
15316 return phc_cycles;
15317 }
15318
15319 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15320 {
15321 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15322 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15323 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15324 bp->cyclecounter.shift = 0;
15325 bp->cyclecounter.mult = 1;
15326 }
15327
15328 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15329 {
15330 struct bnx2x_func_state_params func_params = {NULL};
15331 struct bnx2x_func_set_timesync_params *set_timesync_params =
15332 &func_params.params.set_timesync;
15333
15334
15335 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15336 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15337
15338 func_params.f_obj = &bp->func_obj;
15339 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15340
15341
15342 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15343 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15344
15345 return bnx2x_func_state_change(bp, &func_params);
15346 }
15347
15348 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15349 {
15350 struct bnx2x_queue_state_params q_params;
15351 int rc, i;
15352
15353
15354 memset(&q_params, 0, sizeof(q_params));
15355 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15356 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15357 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15358 &q_params.params.update.update_flags);
15359 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15360 &q_params.params.update.update_flags);
15361
15362
15363 for_each_eth_queue(bp, i) {
15364 struct bnx2x_fastpath *fp = &bp->fp[i];
15365
15366
15367 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15368
15369
15370 rc = bnx2x_queue_state_change(bp, &q_params);
15371 if (rc) {
15372 BNX2X_ERR("Failed to enable PTP packets\n");
15373 return rc;
15374 }
15375 }
15376
15377 return 0;
15378 }
15379
15380 #define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15381 #define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15382 #define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15383 #define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15384 #define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15385 #define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15386 #define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15387 #define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15388 #define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15389 #define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15390 #define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15391 #define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15392
15393 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15394 {
15395 int port = BP_PORT(bp);
15396 u32 param, rule;
15397 int rc;
15398
15399 if (!bp->hwtstamp_ioctl_called)
15400 return 0;
15401
15402 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15403 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15404 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15405 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15406 switch (bp->tx_type) {
15407 case HWTSTAMP_TX_ON:
15408 bp->flags |= TX_TIMESTAMPING_EN;
15409 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15410 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15411 break;
15412 case HWTSTAMP_TX_ONESTEP_SYNC:
15413 BNX2X_ERR("One-step timestamping is not supported\n");
15414 return -ERANGE;
15415 }
15416
15417 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15418 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15419 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15420 NIG_REG_P0_LLH_PTP_RULE_MASK;
15421 switch (bp->rx_filter) {
15422 case HWTSTAMP_FILTER_NONE:
15423 break;
15424 case HWTSTAMP_FILTER_ALL:
15425 case HWTSTAMP_FILTER_SOME:
15426 case HWTSTAMP_FILTER_NTP_ALL:
15427 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15428 break;
15429 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15430 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15431 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15432 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15433
15434 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15435 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15436 break;
15437 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15438 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15439 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15440 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15441
15442 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15443 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15444 break;
15445 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15446 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15447 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15448 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15449
15450 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15451 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15452
15453 break;
15454 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15455 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15456 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15457 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15458
15459 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15460 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15461 break;
15462 }
15463
15464
15465 rc = bnx2x_enable_ptp_packets(bp);
15466 if (rc)
15467 return rc;
15468
15469
15470 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15471 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15472
15473 return 0;
15474 }
15475
15476 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15477 {
15478 struct hwtstamp_config config;
15479 int rc;
15480
15481 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15482
15483 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15484 return -EFAULT;
15485
15486 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15487 config.tx_type, config.rx_filter);
15488
15489 if (config.flags) {
15490 BNX2X_ERR("config.flags is reserved for future use\n");
15491 return -EINVAL;
15492 }
15493
15494 bp->hwtstamp_ioctl_called = 1;
15495 bp->tx_type = config.tx_type;
15496 bp->rx_filter = config.rx_filter;
15497
15498 rc = bnx2x_configure_ptp_filters(bp);
15499 if (rc)
15500 return rc;
15501
15502 config.rx_filter = bp->rx_filter;
15503
15504 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15505 -EFAULT : 0;
15506 }
15507
15508
15509 static int bnx2x_configure_ptp(struct bnx2x *bp)
15510 {
15511 int rc, port = BP_PORT(bp);
15512 u32 wb_data[2];
15513
15514
15515 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15516 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15517 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15518 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15519 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15520 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15521 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15522 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15523
15524
15525 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15526 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15527
15528
15529 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15530 NIG_REG_P0_PTP_EN, 0x3F);
15531
15532
15533 wb_data[0] = 0;
15534 wb_data[1] = 0;
15535 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15536
15537
15538 rc = bnx2x_send_reset_timesync_ramrod(bp);
15539 if (rc) {
15540 BNX2X_ERR("Failed to reset PHC drift register\n");
15541 return -EFAULT;
15542 }
15543
15544
15545 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15546 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15547 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15548 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15549
15550 return 0;
15551 }
15552
15553
15554 void bnx2x_init_ptp(struct bnx2x *bp)
15555 {
15556 int rc;
15557
15558
15559 rc = bnx2x_configure_ptp(bp);
15560 if (rc) {
15561 BNX2X_ERR("Stopping PTP initialization\n");
15562 return;
15563 }
15564
15565
15566 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15567
15568
15569
15570
15571
15572 if (!bp->timecounter_init_done) {
15573 bnx2x_init_cyclecounter(bp);
15574 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15575 ktime_to_ns(ktime_get_real()));
15576 bp->timecounter_init_done = 1;
15577 }
15578
15579 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15580 }