root/drivers/infiniband/hw/hfi1/chip.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hfi1_addr_from_offset
  2. read_csr
  3. write_csr
  4. get_csr_addr
  5. read_write_csr
  6. dev_access_u32_csr
  7. access_sde_err_cnt
  8. access_sde_int_cnt
  9. access_sde_idle_int_cnt
  10. access_sde_progress_int_cnt
  11. dev_access_u64_csr
  12. dc_access_lcb_cntr
  13. port_access_u32_csr
  14. port_access_u64_csr
  15. read_write_sw
  16. access_sw_link_dn_cnt
  17. access_sw_link_up_cnt
  18. access_sw_unknown_frame_cnt
  19. access_sw_xmit_discards
  20. access_xmit_constraint_errs
  21. access_rcv_constraint_errs
  22. get_all_cpu_total
  23. read_write_cpu
  24. access_sw_cpu_intr
  25. access_sw_cpu_rcv_limit
  26. access_sw_pio_wait
  27. access_sw_pio_drain
  28. access_sw_ctx0_seq_drop
  29. access_sw_vtx_wait
  30. access_sw_kmem_wait
  31. access_sw_send_schedule
  32. access_misc_pll_lock_fail_err_cnt
  33. access_misc_mbist_fail_err_cnt
  34. access_misc_invalid_eep_cmd_err_cnt
  35. access_misc_efuse_done_parity_err_cnt
  36. access_misc_efuse_write_err_cnt
  37. access_misc_efuse_read_bad_addr_err_cnt
  38. access_misc_efuse_csr_parity_err_cnt
  39. access_misc_fw_auth_failed_err_cnt
  40. access_misc_key_mismatch_err_cnt
  41. access_misc_sbus_write_failed_err_cnt
  42. access_misc_csr_write_bad_addr_err_cnt
  43. access_misc_csr_read_bad_addr_err_cnt
  44. access_misc_csr_parity_err_cnt
  45. access_sw_cce_err_status_aggregated_cnt
  46. access_cce_msix_csr_parity_err_cnt
  47. access_cce_int_map_unc_err_cnt
  48. access_cce_int_map_cor_err_cnt
  49. access_cce_msix_table_unc_err_cnt
  50. access_cce_msix_table_cor_err_cnt
  51. access_cce_rxdma_conv_fifo_parity_err_cnt
  52. access_cce_rcpl_async_fifo_parity_err_cnt
  53. access_cce_seg_write_bad_addr_err_cnt
  54. access_cce_seg_read_bad_addr_err_cnt
  55. access_la_triggered_cnt
  56. access_cce_trgt_cpl_timeout_err_cnt
  57. access_pcic_receive_parity_err_cnt
  58. access_pcic_transmit_back_parity_err_cnt
  59. access_pcic_transmit_front_parity_err_cnt
  60. access_pcic_cpl_dat_q_unc_err_cnt
  61. access_pcic_cpl_hd_q_unc_err_cnt
  62. access_pcic_post_dat_q_unc_err_cnt
  63. access_pcic_post_hd_q_unc_err_cnt
  64. access_pcic_retry_sot_mem_unc_err_cnt
  65. access_pcic_retry_mem_unc_err
  66. access_pcic_n_post_dat_q_parity_err_cnt
  67. access_pcic_n_post_h_q_parity_err_cnt
  68. access_pcic_cpl_dat_q_cor_err_cnt
  69. access_pcic_cpl_hd_q_cor_err_cnt
  70. access_pcic_post_dat_q_cor_err_cnt
  71. access_pcic_post_hd_q_cor_err_cnt
  72. access_pcic_retry_sot_mem_cor_err_cnt
  73. access_pcic_retry_mem_cor_err_cnt
  74. access_cce_cli1_async_fifo_dbg_parity_err_cnt
  75. access_cce_cli1_async_fifo_rxdma_parity_err_cnt
  76. access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt
  77. access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt
  78. access_cce_cli2_async_fifo_parity_err_cnt
  79. access_cce_csr_cfg_bus_parity_err_cnt
  80. access_cce_cli0_async_fifo_parity_err_cnt
  81. access_cce_rspd_data_parity_err_cnt
  82. access_cce_trgt_access_err_cnt
  83. access_cce_trgt_async_fifo_parity_err_cnt
  84. access_cce_csr_write_bad_addr_err_cnt
  85. access_cce_csr_read_bad_addr_err_cnt
  86. access_ccs_csr_parity_err_cnt
  87. access_rx_csr_parity_err_cnt
  88. access_rx_csr_write_bad_addr_err_cnt
  89. access_rx_csr_read_bad_addr_err_cnt
  90. access_rx_dma_csr_unc_err_cnt
  91. access_rx_dma_dq_fsm_encoding_err_cnt
  92. access_rx_dma_eq_fsm_encoding_err_cnt
  93. access_rx_dma_csr_parity_err_cnt
  94. access_rx_rbuf_data_cor_err_cnt
  95. access_rx_rbuf_data_unc_err_cnt
  96. access_rx_dma_data_fifo_rd_cor_err_cnt
  97. access_rx_dma_data_fifo_rd_unc_err_cnt
  98. access_rx_dma_hdr_fifo_rd_cor_err_cnt
  99. access_rx_dma_hdr_fifo_rd_unc_err_cnt
  100. access_rx_rbuf_desc_part2_cor_err_cnt
  101. access_rx_rbuf_desc_part2_unc_err_cnt
  102. access_rx_rbuf_desc_part1_cor_err_cnt
  103. access_rx_rbuf_desc_part1_unc_err_cnt
  104. access_rx_hq_intr_fsm_err_cnt
  105. access_rx_hq_intr_csr_parity_err_cnt
  106. access_rx_lookup_csr_parity_err_cnt
  107. access_rx_lookup_rcv_array_cor_err_cnt
  108. access_rx_lookup_rcv_array_unc_err_cnt
  109. access_rx_lookup_des_part2_parity_err_cnt
  110. access_rx_lookup_des_part1_unc_cor_err_cnt
  111. access_rx_lookup_des_part1_unc_err_cnt
  112. access_rx_rbuf_next_free_buf_cor_err_cnt
  113. access_rx_rbuf_next_free_buf_unc_err_cnt
  114. access_rbuf_fl_init_wr_addr_parity_err_cnt
  115. access_rx_rbuf_fl_initdone_parity_err_cnt
  116. access_rx_rbuf_fl_write_addr_parity_err_cnt
  117. access_rx_rbuf_fl_rd_addr_parity_err_cnt
  118. access_rx_rbuf_empty_err_cnt
  119. access_rx_rbuf_full_err_cnt
  120. access_rbuf_bad_lookup_err_cnt
  121. access_rbuf_ctx_id_parity_err_cnt
  122. access_rbuf_csr_qeopdw_parity_err_cnt
  123. access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt
  124. access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt
  125. access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt
  126. access_rx_rbuf_csr_q_vld_bit_parity_err_cnt
  127. access_rx_rbuf_csr_q_next_buf_parity_err_cnt
  128. access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt
  129. access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt
  130. access_rx_rbuf_block_list_read_cor_err_cnt
  131. access_rx_rbuf_block_list_read_unc_err_cnt
  132. access_rx_rbuf_lookup_des_cor_err_cnt
  133. access_rx_rbuf_lookup_des_unc_err_cnt
  134. access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt
  135. access_rx_rbuf_lookup_des_reg_unc_err_cnt
  136. access_rx_rbuf_free_list_cor_err_cnt
  137. access_rx_rbuf_free_list_unc_err_cnt
  138. access_rx_rcv_fsm_encoding_err_cnt
  139. access_rx_dma_flag_cor_err_cnt
  140. access_rx_dma_flag_unc_err_cnt
  141. access_rx_dc_sop_eop_parity_err_cnt
  142. access_rx_rcv_csr_parity_err_cnt
  143. access_rx_rcv_qp_map_table_cor_err_cnt
  144. access_rx_rcv_qp_map_table_unc_err_cnt
  145. access_rx_rcv_data_cor_err_cnt
  146. access_rx_rcv_data_unc_err_cnt
  147. access_rx_rcv_hdr_cor_err_cnt
  148. access_rx_rcv_hdr_unc_err_cnt
  149. access_rx_dc_intf_parity_err_cnt
  150. access_rx_dma_csr_cor_err_cnt
  151. access_pio_pec_sop_head_parity_err_cnt
  152. access_pio_pcc_sop_head_parity_err_cnt
  153. access_pio_last_returned_cnt_parity_err_cnt
  154. access_pio_current_free_cnt_parity_err_cnt
  155. access_pio_reserved_31_err_cnt
  156. access_pio_reserved_30_err_cnt
  157. access_pio_ppmc_sop_len_err_cnt
  158. access_pio_ppmc_bqc_mem_parity_err_cnt
  159. access_pio_vl_fifo_parity_err_cnt
  160. access_pio_vlf_sop_parity_err_cnt
  161. access_pio_vlf_v1_len_parity_err_cnt
  162. access_pio_block_qw_count_parity_err_cnt
  163. access_pio_write_qw_valid_parity_err_cnt
  164. access_pio_state_machine_err_cnt
  165. access_pio_write_data_parity_err_cnt
  166. access_pio_host_addr_mem_cor_err_cnt
  167. access_pio_host_addr_mem_unc_err_cnt
  168. access_pio_pkt_evict_sm_or_arb_sm_err_cnt
  169. access_pio_init_sm_in_err_cnt
  170. access_pio_ppmc_pbl_fifo_err_cnt
  171. access_pio_credit_ret_fifo_parity_err_cnt
  172. access_pio_v1_len_mem_bank1_cor_err_cnt
  173. access_pio_v1_len_mem_bank0_cor_err_cnt
  174. access_pio_v1_len_mem_bank1_unc_err_cnt
  175. access_pio_v1_len_mem_bank0_unc_err_cnt
  176. access_pio_sm_pkt_reset_parity_err_cnt
  177. access_pio_pkt_evict_fifo_parity_err_cnt
  178. access_pio_sbrdctrl_crrel_fifo_parity_err_cnt
  179. access_pio_sbrdctl_crrel_parity_err_cnt
  180. access_pio_pec_fifo_parity_err_cnt
  181. access_pio_pcc_fifo_parity_err_cnt
  182. access_pio_sb_mem_fifo1_err_cnt
  183. access_pio_sb_mem_fifo0_err_cnt
  184. access_pio_csr_parity_err_cnt
  185. access_pio_write_addr_parity_err_cnt
  186. access_pio_write_bad_ctxt_err_cnt
  187. access_sdma_pcie_req_tracking_cor_err_cnt
  188. access_sdma_pcie_req_tracking_unc_err_cnt
  189. access_sdma_csr_parity_err_cnt
  190. access_sdma_rpy_tag_err_cnt
  191. access_tx_read_pio_memory_csr_unc_err_cnt
  192. access_tx_read_sdma_memory_csr_err_cnt
  193. access_tx_egress_fifo_cor_err_cnt
  194. access_tx_read_pio_memory_cor_err_cnt
  195. access_tx_read_sdma_memory_cor_err_cnt
  196. access_tx_sb_hdr_cor_err_cnt
  197. access_tx_credit_overrun_err_cnt
  198. access_tx_launch_fifo8_cor_err_cnt
  199. access_tx_launch_fifo7_cor_err_cnt
  200. access_tx_launch_fifo6_cor_err_cnt
  201. access_tx_launch_fifo5_cor_err_cnt
  202. access_tx_launch_fifo4_cor_err_cnt
  203. access_tx_launch_fifo3_cor_err_cnt
  204. access_tx_launch_fifo2_cor_err_cnt
  205. access_tx_launch_fifo1_cor_err_cnt
  206. access_tx_launch_fifo0_cor_err_cnt
  207. access_tx_credit_return_vl_err_cnt
  208. access_tx_hcrc_insertion_err_cnt
  209. access_tx_egress_fifo_unc_err_cnt
  210. access_tx_read_pio_memory_unc_err_cnt
  211. access_tx_read_sdma_memory_unc_err_cnt
  212. access_tx_sb_hdr_unc_err_cnt
  213. access_tx_credit_return_partiy_err_cnt
  214. access_tx_launch_fifo8_unc_or_parity_err_cnt
  215. access_tx_launch_fifo7_unc_or_parity_err_cnt
  216. access_tx_launch_fifo6_unc_or_parity_err_cnt
  217. access_tx_launch_fifo5_unc_or_parity_err_cnt
  218. access_tx_launch_fifo4_unc_or_parity_err_cnt
  219. access_tx_launch_fifo3_unc_or_parity_err_cnt
  220. access_tx_launch_fifo2_unc_or_parity_err_cnt
  221. access_tx_launch_fifo1_unc_or_parity_err_cnt
  222. access_tx_launch_fifo0_unc_or_parity_err_cnt
  223. access_tx_sdma15_disallowed_packet_err_cnt
  224. access_tx_sdma14_disallowed_packet_err_cnt
  225. access_tx_sdma13_disallowed_packet_err_cnt
  226. access_tx_sdma12_disallowed_packet_err_cnt
  227. access_tx_sdma11_disallowed_packet_err_cnt
  228. access_tx_sdma10_disallowed_packet_err_cnt
  229. access_tx_sdma9_disallowed_packet_err_cnt
  230. access_tx_sdma8_disallowed_packet_err_cnt
  231. access_tx_sdma7_disallowed_packet_err_cnt
  232. access_tx_sdma6_disallowed_packet_err_cnt
  233. access_tx_sdma5_disallowed_packet_err_cnt
  234. access_tx_sdma4_disallowed_packet_err_cnt
  235. access_tx_sdma3_disallowed_packet_err_cnt
  236. access_tx_sdma2_disallowed_packet_err_cnt
  237. access_tx_sdma1_disallowed_packet_err_cnt
  238. access_tx_sdma0_disallowed_packet_err_cnt
  239. access_tx_config_parity_err_cnt
  240. access_tx_sbrd_ctl_csr_parity_err_cnt
  241. access_tx_launch_csr_parity_err_cnt
  242. access_tx_illegal_vl_err_cnt
  243. access_tx_sbrd_ctl_state_machine_parity_err_cnt
  244. access_egress_reserved_10_err_cnt
  245. access_egress_reserved_9_err_cnt
  246. access_tx_sdma_launch_intf_parity_err_cnt
  247. access_tx_pio_launch_intf_parity_err_cnt
  248. access_egress_reserved_6_err_cnt
  249. access_tx_incorrect_link_state_err_cnt
  250. access_tx_linkdown_err_cnt
  251. access_tx_egress_fifi_underrun_or_parity_err_cnt
  252. access_egress_reserved_2_err_cnt
  253. access_tx_pkt_integrity_mem_unc_err_cnt
  254. access_tx_pkt_integrity_mem_cor_err_cnt
  255. access_send_csr_write_bad_addr_err_cnt
  256. access_send_csr_read_bad_addr_err_cnt
  257. access_send_csr_parity_cnt
  258. access_pio_write_out_of_bounds_err_cnt
  259. access_pio_write_overflow_err_cnt
  260. access_pio_write_crosses_boundary_err_cnt
  261. access_pio_disallowed_packet_err_cnt
  262. access_pio_inconsistent_sop_err_cnt
  263. access_sdma_header_request_fifo_cor_err_cnt
  264. access_sdma_header_storage_cor_err_cnt
  265. access_sdma_packet_tracking_cor_err_cnt
  266. access_sdma_assembly_cor_err_cnt
  267. access_sdma_desc_table_cor_err_cnt
  268. access_sdma_header_request_fifo_unc_err_cnt
  269. access_sdma_header_storage_unc_err_cnt
  270. access_sdma_packet_tracking_unc_err_cnt
  271. access_sdma_assembly_unc_err_cnt
  272. access_sdma_desc_table_unc_err_cnt
  273. access_sdma_timeout_err_cnt
  274. access_sdma_header_length_err_cnt
  275. access_sdma_header_address_err_cnt
  276. access_sdma_header_select_err_cnt
  277. access_sdma_reserved_9_err_cnt
  278. access_sdma_packet_desc_overflow_err_cnt
  279. access_sdma_length_mismatch_err_cnt
  280. access_sdma_halt_err_cnt
  281. access_sdma_mem_read_err_cnt
  282. access_sdma_first_desc_err_cnt
  283. access_sdma_tail_out_of_bounds_err_cnt
  284. access_sdma_too_long_err_cnt
  285. access_sdma_gen_mismatch_err_cnt
  286. access_sdma_wrong_dw_err_cnt
  287. access_dc_rcv_err_cnt
  288. is_ax
  289. is_bx
  290. is_urg_masked
  291. append_str
  292. flag_string
  293. is_misc_err_name
  294. is_sdma_eng_err_name
  295. is_sendctxt_err_name
  296. is_various_name
  297. is_dc_name
  298. is_sdma_eng_name
  299. is_rcv_avail_name
  300. is_rcv_urgent_name
  301. is_send_credit_name
  302. is_reserved_name
  303. cce_err_status_string
  304. rxe_err_status_string
  305. misc_err_status_string
  306. pio_err_status_string
  307. sdma_err_status_string
  308. egress_err_status_string
  309. egress_err_info_string
  310. send_err_status_string
  311. handle_cce_err
  312. update_rcverr_timer
  313. init_rcverr
  314. free_rcverr
  315. handle_rxe_err
  316. handle_misc_err
  317. handle_pio_err
  318. handle_sdma_err
  319. __count_port_discards
  320. count_port_inactive
  321. handle_send_egress_err_info
  322. port_inactive_err
  323. disallowed_pkt_err
  324. disallowed_pkt_engine
  325. engine_to_vl
  326. sc_to_vl
  327. handle_egress_err
  328. handle_txe_err
  329. interrupt_clear_down
  330. is_misc_err_int
  331. send_context_err_status_string
  332. is_sendctxt_err_int
  333. handle_sdma_eng_err
  334. is_sdma_eng_err_int
  335. is_various_int
  336. handle_qsfp_int
  337. request_host_lcb_access
  338. request_8051_lcb_access
  339. set_host_lcb_access
  340. set_8051_lcb_access
  341. acquire_lcb_access
  342. release_lcb_access
  343. init_lcb_access
  344. hreq_response
  345. handle_8051_request
  346. set_up_vau
  347. set_up_vl15
  348. reset_link_credits
  349. vcu_to_cu
  350. cu_to_vcu
  351. vau_to_au
  352. set_linkup_defaults
  353. lcb_shutdown
  354. _dc_shutdown
  355. dc_shutdown
  356. _dc_start
  357. dc_start
  358. adjust_lcb_for_fpga_serdes
  359. handle_sma_message
  360. adjust_rcvctrl
  361. add_rcvctrl
  362. clear_rcvctrl
  363. start_freeze_handling
  364. wait_for_freeze_status
  365. rxe_freeze
  366. rxe_kernel_unfreeze
  367. handle_freeze
  368. update_xmit_counters
  369. handle_link_up
  370. reset_neighbor_info
  371. link_down_reason_str
  372. handle_link_down
  373. handle_link_bounce
  374. cap_to_port_ltp
  375. port_ltp_to_cap
  376. lcb_to_port_ltp
  377. clear_full_mgmt_pkey
  378. link_width_to_bits
  379. nibble_to_count
  380. get_link_widths
  381. get_linkup_widths
  382. get_linkup_link_widths
  383. handle_verify_cap
  384. apply_link_downgrade_policy
  385. handle_link_downgrade
  386. dcc_err_string
  387. lcb_err_string
  388. dc8051_err_string
  389. dc8051_info_err_string
  390. dc8051_info_host_msg_string
  391. handle_8051_interrupt
  392. handle_dcc_err
  393. handle_lcb_err
  394. is_dc_int
  395. is_send_credit_int
  396. is_sdma_eng_int
  397. is_rcv_avail_int
  398. is_rcv_urgent_int
  399. is_reserved_int
  400. is_interrupt
  401. general_interrupt
  402. sdma_interrupt
  403. clear_recv_intr
  404. force_recv_intr
  405. check_packet_present
  406. receive_context_interrupt
  407. receive_context_thread
  408. read_physical_state
  409. read_logical_state
  410. set_logical_state
  411. read_lcb_via_8051
  412. update_lcb_cache
  413. read_lcb_cache
  414. read_lcb_csr
  415. write_lcb_via_8051
  416. write_lcb_csr
  417. do_8051_command
  418. set_physical_link_state
  419. load_8051_config
  420. read_8051_config
  421. write_vc_local_phy
  422. write_vc_local_fabric
  423. read_vc_local_link_mode
  424. write_vc_local_link_mode
  425. write_local_device_id
  426. read_remote_device_id
  427. write_host_interface_version
  428. read_misc_status
  429. read_vc_remote_phy
  430. read_vc_remote_fabric
  431. read_vc_remote_link_width
  432. read_local_lni
  433. read_last_local_state
  434. read_last_remote_state
  435. hfi1_read_link_quality
  436. read_planned_down_reason_code
  437. read_link_down_reason
  438. read_tx_settings
  439. write_tx_settings
  440. read_idle_message
  441. read_idle_sma
  442. send_idle_message
  443. send_idle_sma
  444. do_quick_linkup
  445. init_loopback
  446. opa_to_vc_link_widths
  447. set_local_link_attributes
  448. start_link
  449. wait_for_qsfp_init
  450. set_qsfp_int_n
  451. reset_qsfp
  452. handle_qsfp_error_conditions
  453. qsfp_event
  454. init_qsfp_int
  455. init_lcb
  456. test_qsfp_read
  457. try_start_link
  458. handle_start_link
  459. bringup_serdes
  460. hfi1_quiet_serdes
  461. init_cpu_counters
  462. hfi1_put_tid
  463. hfi1_clear_tids
  464. ib_cfg_name
  465. hfi1_get_ib_cfg
  466. lrh_max_header_bytes
  467. set_send_length
  468. set_lidlmc
  469. state_completed_string
  470. state_complete_reason_code_string
  471. decode_state_complete
  472. check_lni_states
  473. wait_link_transfer_active
  474. force_logical_link_state_down
  475. goto_offline
  476. link_state_name
  477. link_state_reason_name
  478. driver_pstate
  479. driver_lstate
  480. set_link_down_reason
  481. data_vls_operational
  482. set_link_state
  483. hfi1_set_ib_cfg
  484. init_vl_arb_caches
  485. vl_arb_lock_cache
  486. vl_arb_unlock_cache
  487. vl_arb_get_cache
  488. vl_arb_set_cache
  489. vl_arb_match_cache
  490. set_vl_weights
  491. read_one_cm_vl
  492. get_buffer_control
  493. get_sc2vlnt
  494. get_vlarb_preempt
  495. set_sc2vlnt
  496. nonzero_msg
  497. set_global_shared
  498. set_global_limit
  499. set_vl_shared
  500. set_vl_dedicated
  501. wait_for_vl_status_clear
  502. set_buffer_control
  503. fm_get_table
  504. fm_set_table
  505. disable_data_vls
  506. open_fill_data_vls
  507. drain_data_vls
  508. stop_drain_data_vls
  509. ns_to_cclock
  510. cclock_to_ns
  511. adjust_rcv_timeout
  512. update_usrhead
  513. hdrqempty
  514. encoded_size
  515. hfi1_rcvctrl
  516. hfi1_read_cntrs
  517. hfi1_read_portcntrs
  518. free_cntrs
  519. read_dev_port_cntr
  520. write_dev_port_cntr
  521. read_dev_cntr
  522. write_dev_cntr
  523. read_port_cntr
  524. write_port_cntr
  525. do_update_synth_timer
  526. update_synth_timer
  527. init_cntrs
  528. chip_to_opa_lstate
  529. chip_to_opa_pstate
  530. opa_lstate_name
  531. opa_pstate_name
  532. update_statusp
  533. wait_logical_linkstate
  534. log_state_transition
  535. log_physical_state
  536. wait_physical_linkstate
  537. wait_phys_link_offline_substates
  538. wait_phys_link_out_of_offline
  539. hfi1_init_ctxt
  540. hfi1_tempsense_rd
  541. read_mod_write
  542. set_intr_bits
  543. clear_all_interrupts
  544. remap_intr
  545. remap_sdma_interrupts
  546. reset_interrupts
  547. set_up_interrupts
  548. set_up_context_variables
  549. set_partition_keys
  550. write_uninitialized_csrs_and_memories
  551. clear_cce_status
  552. reset_cce_csrs
  553. reset_misc_csrs
  554. reset_txe_csrs
  555. init_rbufs
  556. reset_rxe_csrs
  557. init_sc2vl_tables
  558. init_chip
  559. init_early_variables
  560. init_kdeth_qp
  561. hfi1_get_qp_map
  562. init_qpmap_table
  563. alloc_rsm_map_table
  564. complete_rsm_map_table
  565. add_rsm_rule
  566. clear_rsm_rule
  567. qos_rmt_entries
  568. init_qos
  569. init_fecn_handling
  570. hfi1_init_vnic_rsm
  571. hfi1_deinit_vnic_rsm
  572. init_rxe
  573. init_other
  574. assign_cm_au_table
  575. assign_local_cm_au_table
  576. assign_remote_cm_au_table
  577. init_txe
  578. hfi1_set_ctxt_jkey
  579. hfi1_clear_ctxt_jkey
  580. hfi1_set_ctxt_pkey
  581. hfi1_clear_ctxt_pkey
  582. hfi1_start_cleanup
  583. init_asic_data
  584. obtain_boardname
  585. check_int_registers
  586. hfi1_init_dd
  587. delay_cycles
  588. create_pbc
  589. thermal_init
  590. handle_temp_err

   1 /*
   2  * Copyright(c) 2015 - 2018 Intel Corporation.
   3  *
   4  * This file is provided under a dual BSD/GPLv2 license.  When using or
   5  * redistributing this file, you may do so under either license.
   6  *
   7  * GPL LICENSE SUMMARY
   8  *
   9  * This program is free software; you can redistribute it and/or modify
  10  * it under the terms of version 2 of the GNU General Public License as
  11  * published by the Free Software Foundation.
  12  *
  13  * This program is distributed in the hope that it will be useful, but
  14  * WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  * General Public License for more details.
  17  *
  18  * BSD LICENSE
  19  *
  20  * Redistribution and use in source and binary forms, with or without
  21  * modification, are permitted provided that the following conditions
  22  * are met:
  23  *
  24  *  - Redistributions of source code must retain the above copyright
  25  *    notice, this list of conditions and the following disclaimer.
  26  *  - Redistributions in binary form must reproduce the above copyright
  27  *    notice, this list of conditions and the following disclaimer in
  28  *    the documentation and/or other materials provided with the
  29  *    distribution.
  30  *  - Neither the name of Intel Corporation nor the names of its
  31  *    contributors may be used to endorse or promote products derived
  32  *    from this software without specific prior written permission.
  33  *
  34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45  *
  46  */
  47 
  48 /*
  49  * This file contains all of the code that is specific to the HFI chip
  50  */
  51 
  52 #include <linux/pci.h>
  53 #include <linux/delay.h>
  54 #include <linux/interrupt.h>
  55 #include <linux/module.h>
  56 
  57 #include "hfi.h"
  58 #include "trace.h"
  59 #include "mad.h"
  60 #include "pio.h"
  61 #include "sdma.h"
  62 #include "eprom.h"
  63 #include "efivar.h"
  64 #include "platform.h"
  65 #include "aspm.h"
  66 #include "affinity.h"
  67 #include "debugfs.h"
  68 #include "fault.h"
  69 
  70 uint kdeth_qp;
  71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
  72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
  73 
  74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
  75 module_param(num_vls, uint, S_IRUGO);
  76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
  77 
  78 /*
  79  * Default time to aggregate two 10K packets from the idle state
  80  * (timer not running). The timer starts at the end of the first packet,
  81  * so only the time for one 10K packet and header plus a bit extra is needed.
  82  * 10 * 1024 + 64 header byte = 10304 byte
  83  * 10304 byte / 12.5 GB/s = 824.32ns
  84  */
  85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
  86 module_param(rcv_intr_timeout, uint, S_IRUGO);
  87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
  88 
  89 uint rcv_intr_count = 16; /* same as qib */
  90 module_param(rcv_intr_count, uint, S_IRUGO);
  91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
  92 
  93 ushort link_crc_mask = SUPPORTED_CRCS;
  94 module_param(link_crc_mask, ushort, S_IRUGO);
  95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
  96 
  97 uint loopback;
  98 module_param_named(loopback, loopback, uint, S_IRUGO);
  99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
 100 
 101 /* Other driver tunables */
 102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
 103 static ushort crc_14b_sideband = 1;
 104 static uint use_flr = 1;
 105 uint quick_linkup; /* skip LNI */
 106 
 107 struct flag_table {
 108         u64 flag;       /* the flag */
 109         char *str;      /* description string */
 110         u16 extra;      /* extra information */
 111         u16 unused0;
 112         u32 unused1;
 113 };
 114 
 115 /* str must be a string constant */
 116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
 117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
 118 
 119 /* Send Error Consequences */
 120 #define SEC_WRITE_DROPPED       0x1
 121 #define SEC_PACKET_DROPPED      0x2
 122 #define SEC_SC_HALTED           0x4     /* per-context only */
 123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
 124 
 125 #define DEFAULT_KRCVQS            2
 126 #define MIN_KERNEL_KCTXTS         2
 127 #define FIRST_KERNEL_KCTXT        1
 128 
 129 /*
 130  * RSM instance allocation
 131  *   0 - Verbs
 132  *   1 - User Fecn Handling
 133  *   2 - Vnic
 134  */
 135 #define RSM_INS_VERBS             0
 136 #define RSM_INS_FECN              1
 137 #define RSM_INS_VNIC              2
 138 
 139 /* Bit offset into the GUID which carries HFI id information */
 140 #define GUID_HFI_INDEX_SHIFT     39
 141 
 142 /* extract the emulation revision */
 143 #define emulator_rev(dd) ((dd)->irev >> 8)
 144 /* parallel and serial emulation versions are 3 and 4 respectively */
 145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
 146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
 147 
 148 /* RSM fields for Verbs */
 149 /* packet type */
 150 #define IB_PACKET_TYPE         2ull
 151 #define QW_SHIFT               6ull
 152 /* QPN[7..1] */
 153 #define QPN_WIDTH              7ull
 154 
 155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
 156 #define LRH_BTH_QW             0ull
 157 #define LRH_BTH_BIT_OFFSET     48ull
 158 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
 159 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
 160 #define LRH_BTH_SELECT
 161 #define LRH_BTH_MASK           3ull
 162 #define LRH_BTH_VALUE          2ull
 163 
 164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
 165 #define LRH_SC_QW              0ull
 166 #define LRH_SC_BIT_OFFSET      56ull
 167 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
 168 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
 169 #define LRH_SC_MASK            128ull
 170 #define LRH_SC_VALUE           0ull
 171 
 172 /* SC[n..0] QW 0, OFFSET 60 - for select */
 173 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
 174 
 175 /* QPN[m+n:1] QW 1, OFFSET 1 */
 176 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
 177 
 178 /* RSM fields for Vnic */
 179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
 180 #define L2_TYPE_QW             0ull
 181 #define L2_TYPE_BIT_OFFSET     61ull
 182 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
 183 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
 184 #define L2_TYPE_MASK           3ull
 185 #define L2_16B_VALUE           2ull
 186 
 187 /* L4_TYPE QW 1, OFFSET 0 - for match */
 188 #define L4_TYPE_QW              1ull
 189 #define L4_TYPE_BIT_OFFSET      0ull
 190 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
 191 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
 192 #define L4_16B_TYPE_MASK        0xFFull
 193 #define L4_16B_ETH_VALUE        0x78ull
 194 
 195 /* 16B VESWID - for select */
 196 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
 197 /* 16B ENTROPY - for select */
 198 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
 199 
 200 /* defines to build power on SC2VL table */
 201 #define SC2VL_VAL( \
 202         num, \
 203         sc0, sc0val, \
 204         sc1, sc1val, \
 205         sc2, sc2val, \
 206         sc3, sc3val, \
 207         sc4, sc4val, \
 208         sc5, sc5val, \
 209         sc6, sc6val, \
 210         sc7, sc7val) \
 211 ( \
 212         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
 213         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
 214         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
 215         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
 216         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
 217         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
 218         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
 219         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
 220 )
 221 
 222 #define DC_SC_VL_VAL( \
 223         range, \
 224         e0, e0val, \
 225         e1, e1val, \
 226         e2, e2val, \
 227         e3, e3val, \
 228         e4, e4val, \
 229         e5, e5val, \
 230         e6, e6val, \
 231         e7, e7val, \
 232         e8, e8val, \
 233         e9, e9val, \
 234         e10, e10val, \
 235         e11, e11val, \
 236         e12, e12val, \
 237         e13, e13val, \
 238         e14, e14val, \
 239         e15, e15val) \
 240 ( \
 241         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
 242         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
 243         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
 244         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
 245         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
 246         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
 247         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
 248         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
 249         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
 250         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
 251         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
 252         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
 253         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
 254         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
 255         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
 256         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
 257 )
 258 
 259 /* all CceStatus sub-block freeze bits */
 260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
 261                         | CCE_STATUS_RXE_FROZE_SMASK \
 262                         | CCE_STATUS_TXE_FROZE_SMASK \
 263                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
 264 /* all CceStatus sub-block TXE pause bits */
 265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
 266                         | CCE_STATUS_TXE_PAUSED_SMASK \
 267                         | CCE_STATUS_SDMA_PAUSED_SMASK)
 268 /* all CceStatus sub-block RXE pause bits */
 269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
 270 
 271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
 272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
 273 
 274 /*
 275  * CCE Error flags.
 276  */
 277 static struct flag_table cce_err_status_flags[] = {
 278 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
 279                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
 280 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
 281                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
 282 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
 283                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
 284 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
 285                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
 286 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
 287                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
 288 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
 289                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
 290 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
 291                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
 292 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
 293                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
 294 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
 295                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
 296 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 297             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
 298 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
 299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
 300 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
 301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
 302 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
 303                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
 304 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 305                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
 306 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
 307                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
 308 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 309                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
 310 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 311                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
 312 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
 313                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
 314 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
 315                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
 316 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
 317                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
 318 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
 319                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
 320 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
 321                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
 322 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
 323                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
 324 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
 325                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
 326 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
 327                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
 328 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
 329                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
 330 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
 331                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
 332 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
 333                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
 334 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
 335                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
 336 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
 337                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
 338 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
 339                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
 340 /*31*/  FLAG_ENTRY0("LATriggered",
 341                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
 342 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
 343                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
 344 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
 345                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
 346 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
 347                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
 348 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
 349                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
 350 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
 351                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
 352 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
 353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
 354 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
 355                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
 356 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
 357                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
 358 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
 359                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
 360 /*41-63 reserved*/
 361 };
 362 
 363 /*
 364  * Misc Error flags
 365  */
 366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
 367 static struct flag_table misc_err_status_flags[] = {
 368 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
 369 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
 370 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
 371 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
 372 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
 373 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
 374 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
 375 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
 376 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
 377 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
 378 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
 379 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
 380 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
 381 };
 382 
 383 /*
 384  * TXE PIO Error flags and consequences
 385  */
 386 static struct flag_table pio_err_status_flags[] = {
 387 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
 388         SEC_WRITE_DROPPED,
 389         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
 390 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
 391         SEC_SPC_FREEZE,
 392         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
 393 /* 2*/  FLAG_ENTRY("PioCsrParity",
 394         SEC_SPC_FREEZE,
 395         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
 396 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
 397         SEC_SPC_FREEZE,
 398         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
 399 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
 400         SEC_SPC_FREEZE,
 401         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
 402 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
 403         SEC_SPC_FREEZE,
 404         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
 405 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
 406         SEC_SPC_FREEZE,
 407         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
 408 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
 409         SEC_SPC_FREEZE,
 410         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
 411 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
 412         SEC_SPC_FREEZE,
 413         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
 414 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
 415         SEC_SPC_FREEZE,
 416         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
 417 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
 418         SEC_SPC_FREEZE,
 419         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
 420 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
 421         SEC_SPC_FREEZE,
 422         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
 423 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
 424         SEC_SPC_FREEZE,
 425         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
 426 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
 427         0,
 428         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
 429 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
 430         0,
 431         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
 432 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
 433         SEC_SPC_FREEZE,
 434         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
 435 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
 436         SEC_SPC_FREEZE,
 437         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
 438 /*17*/  FLAG_ENTRY("PioInitSmIn",
 439         0,
 440         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
 441 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
 442         SEC_SPC_FREEZE,
 443         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
 444 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
 445         SEC_SPC_FREEZE,
 446         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
 447 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
 448         0,
 449         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
 450 /*21*/  FLAG_ENTRY("PioWriteDataParity",
 451         SEC_SPC_FREEZE,
 452         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
 453 /*22*/  FLAG_ENTRY("PioStateMachine",
 454         SEC_SPC_FREEZE,
 455         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
 456 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
 457         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 458         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
 459 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
 460         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
 461         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
 462 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
 463         SEC_SPC_FREEZE,
 464         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
 465 /*26*/  FLAG_ENTRY("PioVlfSopParity",
 466         SEC_SPC_FREEZE,
 467         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
 468 /*27*/  FLAG_ENTRY("PioVlFifoParity",
 469         SEC_SPC_FREEZE,
 470         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
 471 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
 472         SEC_SPC_FREEZE,
 473         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
 474 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
 475         SEC_SPC_FREEZE,
 476         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
 477 /*30-31 reserved*/
 478 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
 479         SEC_SPC_FREEZE,
 480         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
 481 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
 482         SEC_SPC_FREEZE,
 483         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
 484 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
 485         SEC_SPC_FREEZE,
 486         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
 487 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
 488         SEC_SPC_FREEZE,
 489         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
 490 /*36-63 reserved*/
 491 };
 492 
 493 /* TXE PIO errors that cause an SPC freeze */
 494 #define ALL_PIO_FREEZE_ERR \
 495         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
 496         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
 497         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
 498         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
 499         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
 500         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
 501         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
 502         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
 503         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
 504         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
 505         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
 506         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
 507         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
 508         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
 509         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
 510         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
 511         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
 512         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
 513         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
 514         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
 515         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
 516         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
 517         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
 518         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
 519         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
 520         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
 521         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
 522         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
 523         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
 524 
 525 /*
 526  * TXE SDMA Error flags
 527  */
 528 static struct flag_table sdma_err_status_flags[] = {
 529 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
 530                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
 531 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
 532                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
 533 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
 534                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
 535 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
 536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
 537 /*04-63 reserved*/
 538 };
 539 
 540 /* TXE SDMA errors that cause an SPC freeze */
 541 #define ALL_SDMA_FREEZE_ERR  \
 542                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
 543                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
 544                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
 545 
 546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
 547 #define PORT_DISCARD_EGRESS_ERRS \
 548         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
 549         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
 550         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
 551 
 552 /*
 553  * TXE Egress Error flags
 554  */
 555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
 556 static struct flag_table egress_err_status_flags[] = {
 557 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
 558 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
 559 /* 2 reserved */
 560 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
 561                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
 562 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
 563 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
 564 /* 6 reserved */
 565 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
 566                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
 567 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
 568                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
 569 /* 9-10 reserved */
 570 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
 571                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
 572 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
 573 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
 574 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
 575 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
 576 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
 577                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
 578 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
 579                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
 580 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
 581                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
 582 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
 583                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
 584 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
 585                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
 586 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
 587                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
 588 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
 589                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
 590 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
 591                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
 592 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
 593                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
 594 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
 595                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
 596 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
 597                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
 598 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
 599                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
 600 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
 601                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
 602 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
 603                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
 604 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
 605                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
 606 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
 607                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
 608 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
 609                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
 610 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
 611                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
 612 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
 613                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
 614 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
 615                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
 616 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
 617                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
 618 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
 619                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
 620 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
 621                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
 622 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
 623                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
 624 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
 625                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
 626 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
 627 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
 628 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
 629 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
 630 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
 631 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
 632 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
 633 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
 634 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
 635 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
 636 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
 637 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
 638 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
 639 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
 640 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
 641 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
 642 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
 643 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
 644 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
 645 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
 646 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
 647 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
 648                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
 649 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
 650                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
 651 };
 652 
 653 /*
 654  * TXE Egress Error Info flags
 655  */
 656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
 657 static struct flag_table egress_err_info_flags[] = {
 658 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
 659 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
 660 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 661 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
 662 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
 663 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
 664 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
 665 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
 666 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
 667 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
 668 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
 669 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
 670 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
 671 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
 672 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
 673 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
 674 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
 675 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
 676 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
 677 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
 678 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
 679 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
 680 };
 681 
 682 /* TXE Egress errors that cause an SPC freeze */
 683 #define ALL_TXE_EGRESS_FREEZE_ERR \
 684         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
 685         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
 686         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
 687         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
 688         | SEES(TX_LAUNCH_CSR_PARITY) \
 689         | SEES(TX_SBRD_CTL_CSR_PARITY) \
 690         | SEES(TX_CONFIG_PARITY) \
 691         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
 692         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
 693         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
 694         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
 695         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
 696         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
 697         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
 698         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
 699         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
 700         | SEES(TX_CREDIT_RETURN_PARITY))
 701 
 702 /*
 703  * TXE Send error flags
 704  */
 705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
 706 static struct flag_table send_err_status_flags[] = {
 707 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
 708 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
 709 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
 710 };
 711 
 712 /*
 713  * TXE Send Context Error flags and consequences
 714  */
 715 static struct flag_table sc_err_status_flags[] = {
 716 /* 0*/  FLAG_ENTRY("InconsistentSop",
 717                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
 718                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
 719 /* 1*/  FLAG_ENTRY("DisallowedPacket",
 720                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
 721                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
 722 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
 723                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
 724                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
 725 /* 3*/  FLAG_ENTRY("WriteOverflow",
 726                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
 727                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
 728 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
 729                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
 730                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
 731 /* 5-63 reserved*/
 732 };
 733 
 734 /*
 735  * RXE Receive Error flags
 736  */
 737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
 738 static struct flag_table rxe_err_status_flags[] = {
 739 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
 740 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
 741 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
 742 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
 743 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
 744 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
 745 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
 746 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
 747 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
 748 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
 749 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
 750 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
 751 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
 752 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
 753 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
 754 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
 755 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
 756                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
 757 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
 758 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
 759 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
 760                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
 761 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
 762                 RXES(RBUF_BLOCK_LIST_READ_COR)),
 763 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
 764                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
 765 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
 766                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
 767 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
 768                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
 769 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
 770                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
 771 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
 772 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
 773 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
 774                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
 775 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
 776 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
 777 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
 778 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
 779 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
 780 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
 781 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
 782 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
 783                 RXES(RBUF_FL_INITDONE_PARITY)),
 784 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
 785                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
 786 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
 787 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
 788 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
 789 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
 790                 RXES(LOOKUP_DES_PART1_UNC_COR)),
 791 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
 792                 RXES(LOOKUP_DES_PART2_PARITY)),
 793 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
 794 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
 795 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
 796 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
 797 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
 798 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
 799 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
 800 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
 801 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
 802 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
 803 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
 804 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
 805 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
 806 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
 807 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
 808 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
 809 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
 810 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
 811 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
 812 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
 813 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
 814 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
 815 };
 816 
 817 /* RXE errors that will trigger an SPC freeze */
 818 #define ALL_RXE_FREEZE_ERR  \
 819         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
 820         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
 821         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
 822         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
 823         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
 824         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
 825         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
 826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
 827         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
 828         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
 829         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
 830         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
 831         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
 832         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
 833         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
 834         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
 835         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
 836         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
 837         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
 838         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
 839         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
 840         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
 841         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
 842         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
 843         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
 844         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
 845         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
 846         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
 847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
 848         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
 849         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
 850         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
 851         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
 852         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
 853         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
 854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
 855         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
 856         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
 857         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
 858         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
 859         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
 860         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
 861         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
 862         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
 863 
 864 #define RXE_FREEZE_ABORT_MASK \
 865         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
 866         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
 867         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
 868 
 869 /*
 870  * DCC Error Flags
 871  */
 872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
 873 static struct flag_table dcc_err_flags[] = {
 874         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
 875         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
 876         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
 877         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
 878         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
 879         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
 880         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
 881         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
 882         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
 883         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
 884         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
 885         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
 886         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
 887         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
 888         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
 889         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
 890         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
 891         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
 892         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
 893         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
 894         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
 895         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
 896         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
 897         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
 898         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
 899         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
 900         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
 901         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
 902         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
 903         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
 904         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
 905         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
 906         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
 907         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
 908         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
 909         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
 910         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
 911         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
 912         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
 913         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
 914         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
 915         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
 916         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
 917         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
 918         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
 919         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
 920 };
 921 
 922 /*
 923  * LCB error flags
 924  */
 925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
 926 static struct flag_table lcb_err_flags[] = {
 927 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
 928 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
 929 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
 930 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
 931                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
 932 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
 933 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
 934 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
 935 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
 936 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
 937 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
 938 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
 939 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
 940 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
 941 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
 942                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
 943 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
 944 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
 945 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
 946 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
 947 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
 948 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
 949                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
 950 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
 951 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
 952 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
 953 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
 954 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
 955 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
 956 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
 957                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
 958 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
 959 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
 960                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
 961 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
 962                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
 963 };
 964 
 965 /*
 966  * DC8051 Error Flags
 967  */
 968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
 969 static struct flag_table dc8051_err_flags[] = {
 970         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
 971         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
 972         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
 973         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
 974         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
 975         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
 976         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
 977         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
 978         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
 979                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
 980         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
 981 };
 982 
 983 /*
 984  * DC8051 Information Error flags
 985  *
 986  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
 987  */
 988 static struct flag_table dc8051_info_err_flags[] = {
 989         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
 990         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
 991         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
 992         FLAG_ENTRY0("Serdes internal loopback failure",
 993                     FAILED_SERDES_INTERNAL_LOOPBACK),
 994         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
 995         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
 996         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
 997         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
 998         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
 999         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003         FLAG_ENTRY0("External Device Request Timeout",
1004                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1005 };
1006 
1007 /*
1008  * DC8051 Information Host Information flags
1009  *
1010  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011  */
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013         FLAG_ENTRY0("Host request done", 0x0001),
1014         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015         FLAG_ENTRY0("BC SMA message", 0x0004),
1016         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018         FLAG_ENTRY0("External device config request", 0x0020),
1019         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021         FLAG_ENTRY0("Link going down", 0x0100),
1022         FLAG_ENTRY0("Link width downgraded", 0x0200),
1023 };
1024 
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029                                u8 *continuous);
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033                                       u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035                                     u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037                                   u8 *device_rev);
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040                             u8 *tx_polarity_inversion,
1041                             u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043                                 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046                            unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060                                           u32 state);
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062                            u64 *out_data);
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1065 
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068                                             int msecs);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070                                   int msecs);
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074                                    int msecs);
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076                                          int msecs);
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                            unsigned int *np);
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088 
1089 /*
1090  * Error interrupt table entry.  This is used as input to the interrupt
1091  * "clear down" routine used for all second tier error interrupt register.
1092  * Second tier interrupt registers have a single bit representing them
1093  * in the top-level CceIntStatus.
1094  */
1095 struct err_reg_info {
1096         u32 status;             /* status CSR offset */
1097         u32 clear;              /* clear CSR offset */
1098         u32 mask;               /* mask CSR offset */
1099         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100         const char *desc;
1101 };
1102 
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106 
1107 /*
1108  * Helpers for building HFI and DC error interrupt table entries.  Different
1109  * helpers are needed because of inconsistent register names.
1110  */
1111 #define EE(reg, handler, desc) \
1112         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                 handler, desc }
1114 #define DC_EE1(reg, handler, desc) \
1115         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118 
1119 /*
1120  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121  * another register containing more information.
1122  */
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132         /* the rest are reserved */
1133 };
1134 
1135 /*
1136  * Index into the Various section of the interrupt sources
1137  * corresponding to the Critical Temperature interrupt.
1138  */
1139 #define TCRIT_INT_SOURCE 4
1140 
1141 /*
1142  * SDMA error interrupt entry - refers to another register containing more
1143  * information.
1144  */
1145 static const struct err_reg_info sdma_eng_err =
1146         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147 
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154         /* rest are reserved */
1155 };
1156 
1157 /*
1158  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159  * register can not be derived from the MTU value because 10K is not
1160  * a power of 2. Therefore, we need a constant. Everything else can
1161  * be calculated.
1162  */
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1164 
1165 /*
1166  * Table of the DC grouping of error interrupts.  Each entry refers to
1167  * another register containing more information.
1168  */
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174         /* the rest are reserved */
1175 };
1176 
1177 struct cntr_entry {
1178         /*
1179          * counter name
1180          */
1181         char *name;
1182 
1183         /*
1184          * csr to read for name (if applicable)
1185          */
1186         u64 csr;
1187 
1188         /*
1189          * offset into dd or ppd to store the counter's value
1190          */
1191         int offset;
1192 
1193         /*
1194          * flags
1195          */
1196         u8 flags;
1197 
1198         /*
1199          * accessor for stat element, context either dd or ppd
1200          */
1201         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                        int mode, u64 data);
1203 };
1204 
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207 
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209 { \
1210         name, \
1211         csr, \
1212         offset, \
1213         flags, \
1214         accessor \
1215 }
1216 
1217 /* 32bit RXE */
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 CNTR_ELEM(#name, \
1220           (counter * 8 + RCV_COUNTER_ARRAY32), \
1221           0, flags | CNTR_32BIT, \
1222           port_access_u32_csr)
1223 
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + RCV_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229 
1230 /* 64bit RXE */
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + RCV_COUNTER_ARRAY64), \
1234           0, flags, \
1235           port_access_u64_csr)
1236 
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + RCV_COUNTER_ARRAY64), \
1240           0, flags, \
1241           dev_access_u64_csr)
1242 
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247           0, CNTR_NORMAL, port_access_u64_csr)
1248 
1249 /* 32bit TXE */
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 CNTR_ELEM(#name, \
1252           (counter * 8 + SEND_COUNTER_ARRAY32), \
1253           0, flags | CNTR_32BIT, \
1254           port_access_u32_csr)
1255 
1256 /* 64bit TXE */
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 CNTR_ELEM(#name, \
1259           (counter * 8 + SEND_COUNTER_ARRAY64), \
1260           0, flags, \
1261           port_access_u64_csr)
1262 
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 CNTR_ELEM(#name,\
1265           counter * 8 + SEND_COUNTER_ARRAY64, \
1266           0, \
1267           flags, \
1268           dev_access_u64_csr)
1269 
1270 /* CCE */
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 CNTR_ELEM(#name, \
1273           (counter * 8 + CCE_COUNTER_ARRAY32), \
1274           0, flags | CNTR_32BIT, \
1275           dev_access_u32_csr)
1276 
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 CNTR_ELEM(#name, \
1279           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280           0, flags | CNTR_32BIT, \
1281           dev_access_u32_csr)
1282 
1283 /* DC */
1284 #define DC_PERF_CNTR(name, counter, flags) \
1285 CNTR_ELEM(#name, \
1286           counter, \
1287           0, \
1288           flags, \
1289           dev_access_u64_csr)
1290 
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292 CNTR_ELEM(#name, \
1293           counter, \
1294           0, \
1295           flags, \
1296           dc_access_lcb_cntr)
1297 
1298 /* ibp counters */
1299 #define SW_IBP_CNTR(name, cntr) \
1300 CNTR_ELEM(#name, \
1301           0, \
1302           0, \
1303           CNTR_SYNTH, \
1304           access_ibp_##cntr)
1305 
1306 /**
1307  * hfi_addr_from_offset - return addr for readq/writeq
1308  * @dd - the dd device
1309  * @offset - the offset of the CSR within bar0
1310  *
1311  * This routine selects the appropriate base address
1312  * based on the indicated offset.
1313  */
1314 static inline void __iomem *hfi1_addr_from_offset(
1315         const struct hfi1_devdata *dd,
1316         u32 offset)
1317 {
1318         if (offset >= dd->base2_start)
1319                 return dd->kregbase2 + (offset - dd->base2_start);
1320         return dd->kregbase1 + offset;
1321 }
1322 
1323 /**
1324  * read_csr - read CSR at the indicated offset
1325  * @dd - the dd device
1326  * @offset - the offset of the CSR within bar0
1327  *
1328  * Return: the value read or all FF's if there
1329  * is no mapping
1330  */
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 {
1333         if (dd->flags & HFI1_PRESENT)
1334                 return readq(hfi1_addr_from_offset(dd, offset));
1335         return -1;
1336 }
1337 
1338 /**
1339  * write_csr - write CSR at the indicated offset
1340  * @dd - the dd device
1341  * @offset - the offset of the CSR within bar0
1342  * @value - value to write
1343  */
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 {
1346         if (dd->flags & HFI1_PRESENT) {
1347                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348 
1349                 /* avoid write to RcvArray */
1350                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                         return;
1352                 writeq(value, base);
1353         }
1354 }
1355 
1356 /**
1357  * get_csr_addr - return te iomem address for offset
1358  * @dd - the dd device
1359  * @offset - the offset of the CSR within bar0
1360  *
1361  * Return: The iomem address to use in subsequent
1362  * writeq/readq operations.
1363  */
1364 void __iomem *get_csr_addr(
1365         const struct hfi1_devdata *dd,
1366         u32 offset)
1367 {
1368         if (dd->flags & HFI1_PRESENT)
1369                 return hfi1_addr_from_offset(dd, offset);
1370         return NULL;
1371 }
1372 
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                  int mode, u64 value)
1375 {
1376         u64 ret;
1377 
1378         if (mode == CNTR_MODE_R) {
1379                 ret = read_csr(dd, csr);
1380         } else if (mode == CNTR_MODE_W) {
1381                 write_csr(dd, csr, value);
1382                 ret = value;
1383         } else {
1384                 dd_dev_err(dd, "Invalid cntr register access mode");
1385                 return 0;
1386         }
1387 
1388         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389         return ret;
1390 }
1391 
1392 /* Dev Access */
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                               void *context, int vl, int mode, u64 data)
1395 {
1396         struct hfi1_devdata *dd = context;
1397         u64 csr = entry->csr;
1398 
1399         if (entry->flags & CNTR_SDMA) {
1400                 if (vl == CNTR_INVALID_VL)
1401                         return 0;
1402                 csr += 0x100 * vl;
1403         } else {
1404                 if (vl != CNTR_INVALID_VL)
1405                         return 0;
1406         }
1407         return read_write_csr(dd, csr, mode, data);
1408 }
1409 
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                               void *context, int idx, int mode, u64 data)
1412 {
1413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414 
1415         if (dd->per_sdma && idx < dd->num_sdma)
1416                 return dd->per_sdma[idx].err_cnt;
1417         return 0;
1418 }
1419 
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                               void *context, int idx, int mode, u64 data)
1422 {
1423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424 
1425         if (dd->per_sdma && idx < dd->num_sdma)
1426                 return dd->per_sdma[idx].sdma_int_cnt;
1427         return 0;
1428 }
1429 
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                    void *context, int idx, int mode, u64 data)
1432 {
1433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434 
1435         if (dd->per_sdma && idx < dd->num_sdma)
1436                 return dd->per_sdma[idx].idle_int_cnt;
1437         return 0;
1438 }
1439 
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                        void *context, int idx, int mode,
1442                                        u64 data)
1443 {
1444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445 
1446         if (dd->per_sdma && idx < dd->num_sdma)
1447                 return dd->per_sdma[idx].progress_int_cnt;
1448         return 0;
1449 }
1450 
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                               int vl, int mode, u64 data)
1453 {
1454         struct hfi1_devdata *dd = context;
1455 
1456         u64 val = 0;
1457         u64 csr = entry->csr;
1458 
1459         if (entry->flags & CNTR_VL) {
1460                 if (vl == CNTR_INVALID_VL)
1461                         return 0;
1462                 csr += 8 * vl;
1463         } else {
1464                 if (vl != CNTR_INVALID_VL)
1465                         return 0;
1466         }
1467 
1468         val = read_write_csr(dd, csr, mode, data);
1469         return val;
1470 }
1471 
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                               int vl, int mode, u64 data)
1474 {
1475         struct hfi1_devdata *dd = context;
1476         u32 csr = entry->csr;
1477         int ret = 0;
1478 
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         if (mode == CNTR_MODE_R)
1482                 ret = read_lcb_csr(dd, csr, &data);
1483         else if (mode == CNTR_MODE_W)
1484                 ret = write_lcb_csr(dd, csr, data);
1485 
1486         if (ret) {
1487                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                 return 0;
1489         }
1490 
1491         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492         return data;
1493 }
1494 
1495 /* Port Access */
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                                int vl, int mode, u64 data)
1498 {
1499         struct hfi1_pportdata *ppd = context;
1500 
1501         if (vl != CNTR_INVALID_VL)
1502                 return 0;
1503         return read_write_csr(ppd->dd, entry->csr, mode, data);
1504 }
1505 
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                                void *context, int vl, int mode, u64 data)
1508 {
1509         struct hfi1_pportdata *ppd = context;
1510         u64 val;
1511         u64 csr = entry->csr;
1512 
1513         if (entry->flags & CNTR_VL) {
1514                 if (vl == CNTR_INVALID_VL)
1515                         return 0;
1516                 csr += 8 * vl;
1517         } else {
1518                 if (vl != CNTR_INVALID_VL)
1519                         return 0;
1520         }
1521         val = read_write_csr(ppd->dd, csr, mode, data);
1522         return val;
1523 }
1524 
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                 u64 data)
1528 {
1529         u64 ret;
1530 
1531         if (mode == CNTR_MODE_R) {
1532                 ret = *cntr;
1533         } else if (mode == CNTR_MODE_W) {
1534                 *cntr = data;
1535                 ret = data;
1536         } else {
1537                 dd_dev_err(dd, "Invalid cntr sw access mode");
1538                 return 0;
1539         }
1540 
1541         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542 
1543         return ret;
1544 }
1545 
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                  int vl, int mode, u64 data)
1548 {
1549         struct hfi1_pportdata *ppd = context;
1550 
1551         if (vl != CNTR_INVALID_VL)
1552                 return 0;
1553         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554 }
1555 
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                  int vl, int mode, u64 data)
1558 {
1559         struct hfi1_pportdata *ppd = context;
1560 
1561         if (vl != CNTR_INVALID_VL)
1562                 return 0;
1563         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564 }
1565 
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                        void *context, int vl, int mode,
1568                                        u64 data)
1569 {
1570         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571 
1572         if (vl != CNTR_INVALID_VL)
1573                 return 0;
1574         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575 }
1576 
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581         u64 zero = 0;
1582         u64 *counter;
1583 
1584         if (vl == CNTR_INVALID_VL)
1585                 counter = &ppd->port_xmit_discards;
1586         else if (vl >= 0 && vl < C_VL_COUNT)
1587                 counter = &ppd->port_xmit_discards_vl[vl];
1588         else
1589                 counter = &zero;
1590 
1591         return read_write_sw(ppd->dd, counter, mode, data);
1592 }
1593 
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                        void *context, int vl, int mode,
1596                                        u64 data)
1597 {
1598         struct hfi1_pportdata *ppd = context;
1599 
1600         if (vl != CNTR_INVALID_VL)
1601                 return 0;
1602 
1603         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                              mode, data);
1605 }
1606 
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                       void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_pportdata *ppd = context;
1611 
1612         if (vl != CNTR_INVALID_VL)
1613                 return 0;
1614 
1615         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                              mode, data);
1617 }
1618 
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1620 {
1621         int cpu;
1622         u64 counter = 0;
1623 
1624         for_each_possible_cpu(cpu)
1625                 counter += *per_cpu_ptr(cntr, cpu);
1626         return counter;
1627 }
1628 
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                           u64 __percpu *cntr,
1631                           int vl, int mode, u64 data)
1632 {
1633         u64 ret = 0;
1634 
1635         if (vl != CNTR_INVALID_VL)
1636                 return 0;
1637 
1638         if (mode == CNTR_MODE_R) {
1639                 ret = get_all_cpu_total(cntr) - *z_val;
1640         } else if (mode == CNTR_MODE_W) {
1641                 /* A write can only zero the counter */
1642                 if (data == 0)
1643                         *z_val = get_all_cpu_total(cntr);
1644                 else
1645                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646         } else {
1647                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                 return 0;
1649         }
1650 
1651         return ret;
1652 }
1653 
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                               void *context, int vl, int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = context;
1658 
1659         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                               mode, data);
1661 }
1662 
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                    void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = context;
1667 
1668         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                               mode, data);
1670 }
1671 
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                               void *context, int vl, int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = context;
1676 
1677         return dd->verbs_dev.n_piowait;
1678 }
1679 
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                                void *context, int vl, int mode, u64 data)
1682 {
1683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684 
1685         return dd->verbs_dev.n_piodrain;
1686 }
1687 
1688 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1689                                    void *context, int vl, int mode, u64 data)
1690 {
1691         struct hfi1_devdata *dd = context;
1692 
1693         return dd->ctx0_seq_drop;
1694 }
1695 
1696 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1697                               void *context, int vl, int mode, u64 data)
1698 {
1699         struct hfi1_devdata *dd = context;
1700 
1701         return dd->verbs_dev.n_txwait;
1702 }
1703 
1704 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1705                                void *context, int vl, int mode, u64 data)
1706 {
1707         struct hfi1_devdata *dd = context;
1708 
1709         return dd->verbs_dev.n_kmem_wait;
1710 }
1711 
1712 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1713                                    void *context, int vl, int mode, u64 data)
1714 {
1715         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716 
1717         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1718                               mode, data);
1719 }
1720 
1721 /* Software counters for the error status bits within MISC_ERR_STATUS */
1722 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1723                                              void *context, int vl, int mode,
1724                                              u64 data)
1725 {
1726         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1727 
1728         return dd->misc_err_status_cnt[12];
1729 }
1730 
1731 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1732                                           void *context, int vl, int mode,
1733                                           u64 data)
1734 {
1735         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1736 
1737         return dd->misc_err_status_cnt[11];
1738 }
1739 
1740 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1741                                                void *context, int vl, int mode,
1742                                                u64 data)
1743 {
1744         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1745 
1746         return dd->misc_err_status_cnt[10];
1747 }
1748 
1749 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1750                                                  void *context, int vl,
1751                                                  int mode, u64 data)
1752 {
1753         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754 
1755         return dd->misc_err_status_cnt[9];
1756 }
1757 
1758 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1759                                            void *context, int vl, int mode,
1760                                            u64 data)
1761 {
1762         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763 
1764         return dd->misc_err_status_cnt[8];
1765 }
1766 
1767 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1768                                 const struct cntr_entry *entry,
1769                                 void *context, int vl, int mode, u64 data)
1770 {
1771         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772 
1773         return dd->misc_err_status_cnt[7];
1774 }
1775 
1776 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1777                                                 void *context, int vl,
1778                                                 int mode, u64 data)
1779 {
1780         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781 
1782         return dd->misc_err_status_cnt[6];
1783 }
1784 
1785 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1786                                               void *context, int vl, int mode,
1787                                               u64 data)
1788 {
1789         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790 
1791         return dd->misc_err_status_cnt[5];
1792 }
1793 
1794 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1795                                             void *context, int vl, int mode,
1796                                             u64 data)
1797 {
1798         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799 
1800         return dd->misc_err_status_cnt[4];
1801 }
1802 
1803 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1804                                                  void *context, int vl,
1805                                                  int mode, u64 data)
1806 {
1807         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808 
1809         return dd->misc_err_status_cnt[3];
1810 }
1811 
1812 static u64 access_misc_csr_write_bad_addr_err_cnt(
1813                                 const struct cntr_entry *entry,
1814                                 void *context, int vl, int mode, u64 data)
1815 {
1816         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817 
1818         return dd->misc_err_status_cnt[2];
1819 }
1820 
1821 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1822                                                  void *context, int vl,
1823                                                  int mode, u64 data)
1824 {
1825         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826 
1827         return dd->misc_err_status_cnt[1];
1828 }
1829 
1830 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1831                                           void *context, int vl, int mode,
1832                                           u64 data)
1833 {
1834         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1835 
1836         return dd->misc_err_status_cnt[0];
1837 }
1838 
1839 /*
1840  * Software counter for the aggregate of
1841  * individual CceErrStatus counters
1842  */
1843 static u64 access_sw_cce_err_status_aggregated_cnt(
1844                                 const struct cntr_entry *entry,
1845                                 void *context, int vl, int mode, u64 data)
1846 {
1847         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848 
1849         return dd->sw_cce_err_status_aggregate;
1850 }
1851 
1852 /*
1853  * Software counters corresponding to each of the
1854  * error status bits within CceErrStatus
1855  */
1856 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1857                                               void *context, int vl, int mode,
1858                                               u64 data)
1859 {
1860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861 
1862         return dd->cce_err_status_cnt[40];
1863 }
1864 
1865 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1866                                           void *context, int vl, int mode,
1867                                           u64 data)
1868 {
1869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870 
1871         return dd->cce_err_status_cnt[39];
1872 }
1873 
1874 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1875                                           void *context, int vl, int mode,
1876                                           u64 data)
1877 {
1878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879 
1880         return dd->cce_err_status_cnt[38];
1881 }
1882 
1883 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1884                                              void *context, int vl, int mode,
1885                                              u64 data)
1886 {
1887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888 
1889         return dd->cce_err_status_cnt[37];
1890 }
1891 
1892 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1893                                              void *context, int vl, int mode,
1894                                              u64 data)
1895 {
1896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897 
1898         return dd->cce_err_status_cnt[36];
1899 }
1900 
1901 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1902                                 const struct cntr_entry *entry,
1903                                 void *context, int vl, int mode, u64 data)
1904 {
1905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906 
1907         return dd->cce_err_status_cnt[35];
1908 }
1909 
1910 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1911                                 const struct cntr_entry *entry,
1912                                 void *context, int vl, int mode, u64 data)
1913 {
1914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915 
1916         return dd->cce_err_status_cnt[34];
1917 }
1918 
1919 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1920                                                  void *context, int vl,
1921                                                  int mode, u64 data)
1922 {
1923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924 
1925         return dd->cce_err_status_cnt[33];
1926 }
1927 
1928 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1929                                                 void *context, int vl, int mode,
1930                                                 u64 data)
1931 {
1932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933 
1934         return dd->cce_err_status_cnt[32];
1935 }
1936 
1937 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1938                                    void *context, int vl, int mode, u64 data)
1939 {
1940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1941 
1942         return dd->cce_err_status_cnt[31];
1943 }
1944 
1945 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1946                                                void *context, int vl, int mode,
1947                                                u64 data)
1948 {
1949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1950 
1951         return dd->cce_err_status_cnt[30];
1952 }
1953 
1954 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1955                                               void *context, int vl, int mode,
1956                                               u64 data)
1957 {
1958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1959 
1960         return dd->cce_err_status_cnt[29];
1961 }
1962 
1963 static u64 access_pcic_transmit_back_parity_err_cnt(
1964                                 const struct cntr_entry *entry,
1965                                 void *context, int vl, int mode, u64 data)
1966 {
1967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1968 
1969         return dd->cce_err_status_cnt[28];
1970 }
1971 
1972 static u64 access_pcic_transmit_front_parity_err_cnt(
1973                                 const struct cntr_entry *entry,
1974                                 void *context, int vl, int mode, u64 data)
1975 {
1976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1977 
1978         return dd->cce_err_status_cnt[27];
1979 }
1980 
1981 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1982                                              void *context, int vl, int mode,
1983                                              u64 data)
1984 {
1985         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1986 
1987         return dd->cce_err_status_cnt[26];
1988 }
1989 
1990 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1991                                             void *context, int vl, int mode,
1992                                             u64 data)
1993 {
1994         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1995 
1996         return dd->cce_err_status_cnt[25];
1997 }
1998 
1999 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2000                                               void *context, int vl, int mode,
2001                                               u64 data)
2002 {
2003         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2004 
2005         return dd->cce_err_status_cnt[24];
2006 }
2007 
2008 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2009                                              void *context, int vl, int mode,
2010                                              u64 data)
2011 {
2012         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2013 
2014         return dd->cce_err_status_cnt[23];
2015 }
2016 
2017 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2018                                                  void *context, int vl,
2019                                                  int mode, u64 data)
2020 {
2021         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2022 
2023         return dd->cce_err_status_cnt[22];
2024 }
2025 
2026 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2027                                          void *context, int vl, int mode,
2028                                          u64 data)
2029 {
2030         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2031 
2032         return dd->cce_err_status_cnt[21];
2033 }
2034 
2035 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2036                                 const struct cntr_entry *entry,
2037                                 void *context, int vl, int mode, u64 data)
2038 {
2039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2040 
2041         return dd->cce_err_status_cnt[20];
2042 }
2043 
2044 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2045                                                  void *context, int vl,
2046                                                  int mode, u64 data)
2047 {
2048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2049 
2050         return dd->cce_err_status_cnt[19];
2051 }
2052 
2053 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2054                                              void *context, int vl, int mode,
2055                                              u64 data)
2056 {
2057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2058 
2059         return dd->cce_err_status_cnt[18];
2060 }
2061 
2062 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2063                                             void *context, int vl, int mode,
2064                                             u64 data)
2065 {
2066         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2067 
2068         return dd->cce_err_status_cnt[17];
2069 }
2070 
2071 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2072                                               void *context, int vl, int mode,
2073                                               u64 data)
2074 {
2075         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2076 
2077         return dd->cce_err_status_cnt[16];
2078 }
2079 
2080 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2081                                              void *context, int vl, int mode,
2082                                              u64 data)
2083 {
2084         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2085 
2086         return dd->cce_err_status_cnt[15];
2087 }
2088 
2089 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2090                                                  void *context, int vl,
2091                                                  int mode, u64 data)
2092 {
2093         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2094 
2095         return dd->cce_err_status_cnt[14];
2096 }
2097 
2098 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2099                                              void *context, int vl, int mode,
2100                                              u64 data)
2101 {
2102         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2103 
2104         return dd->cce_err_status_cnt[13];
2105 }
2106 
2107 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2108                                 const struct cntr_entry *entry,
2109                                 void *context, int vl, int mode, u64 data)
2110 {
2111         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2112 
2113         return dd->cce_err_status_cnt[12];
2114 }
2115 
2116 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2117                                 const struct cntr_entry *entry,
2118                                 void *context, int vl, int mode, u64 data)
2119 {
2120         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2121 
2122         return dd->cce_err_status_cnt[11];
2123 }
2124 
2125 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2126                                 const struct cntr_entry *entry,
2127                                 void *context, int vl, int mode, u64 data)
2128 {
2129         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2130 
2131         return dd->cce_err_status_cnt[10];
2132 }
2133 
2134 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2135                                 const struct cntr_entry *entry,
2136                                 void *context, int vl, int mode, u64 data)
2137 {
2138         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139 
2140         return dd->cce_err_status_cnt[9];
2141 }
2142 
2143 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2144                                 const struct cntr_entry *entry,
2145                                 void *context, int vl, int mode, u64 data)
2146 {
2147         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148 
2149         return dd->cce_err_status_cnt[8];
2150 }
2151 
2152 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2153                                                  void *context, int vl,
2154                                                  int mode, u64 data)
2155 {
2156         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157 
2158         return dd->cce_err_status_cnt[7];
2159 }
2160 
2161 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2162                                 const struct cntr_entry *entry,
2163                                 void *context, int vl, int mode, u64 data)
2164 {
2165         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166 
2167         return dd->cce_err_status_cnt[6];
2168 }
2169 
2170 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2171                                                void *context, int vl, int mode,
2172                                                u64 data)
2173 {
2174         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175 
2176         return dd->cce_err_status_cnt[5];
2177 }
2178 
2179 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2180                                           void *context, int vl, int mode,
2181                                           u64 data)
2182 {
2183         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184 
2185         return dd->cce_err_status_cnt[4];
2186 }
2187 
2188 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2189                                 const struct cntr_entry *entry,
2190                                 void *context, int vl, int mode, u64 data)
2191 {
2192         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193 
2194         return dd->cce_err_status_cnt[3];
2195 }
2196 
2197 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2198                                                  void *context, int vl,
2199                                                  int mode, u64 data)
2200 {
2201         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202 
2203         return dd->cce_err_status_cnt[2];
2204 }
2205 
2206 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2207                                                 void *context, int vl,
2208                                                 int mode, u64 data)
2209 {
2210         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211 
2212         return dd->cce_err_status_cnt[1];
2213 }
2214 
2215 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2216                                          void *context, int vl, int mode,
2217                                          u64 data)
2218 {
2219         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220 
2221         return dd->cce_err_status_cnt[0];
2222 }
2223 
2224 /*
2225  * Software counters corresponding to each of the
2226  * error status bits within RcvErrStatus
2227  */
2228 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2229                                         void *context, int vl, int mode,
2230                                         u64 data)
2231 {
2232         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2233 
2234         return dd->rcv_err_status_cnt[63];
2235 }
2236 
2237 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2238                                                 void *context, int vl,
2239                                                 int mode, u64 data)
2240 {
2241         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2242 
2243         return dd->rcv_err_status_cnt[62];
2244 }
2245 
2246 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2247                                                void *context, int vl, int mode,
2248                                                u64 data)
2249 {
2250         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2251 
2252         return dd->rcv_err_status_cnt[61];
2253 }
2254 
2255 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2256                                          void *context, int vl, int mode,
2257                                          u64 data)
2258 {
2259         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2260 
2261         return dd->rcv_err_status_cnt[60];
2262 }
2263 
2264 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2265                                                  void *context, int vl,
2266                                                  int mode, u64 data)
2267 {
2268         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2269 
2270         return dd->rcv_err_status_cnt[59];
2271 }
2272 
2273 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2274                                                  void *context, int vl,
2275                                                  int mode, u64 data)
2276 {
2277         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2278 
2279         return dd->rcv_err_status_cnt[58];
2280 }
2281 
2282 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2283                                             void *context, int vl, int mode,
2284                                             u64 data)
2285 {
2286         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2287 
2288         return dd->rcv_err_status_cnt[57];
2289 }
2290 
2291 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2292                                            void *context, int vl, int mode,
2293                                            u64 data)
2294 {
2295         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2296 
2297         return dd->rcv_err_status_cnt[56];
2298 }
2299 
2300 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2301                                            void *context, int vl, int mode,
2302                                            u64 data)
2303 {
2304         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2305 
2306         return dd->rcv_err_status_cnt[55];
2307 }
2308 
2309 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2310                                 const struct cntr_entry *entry,
2311                                 void *context, int vl, int mode, u64 data)
2312 {
2313         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2314 
2315         return dd->rcv_err_status_cnt[54];
2316 }
2317 
2318 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2319                                 const struct cntr_entry *entry,
2320                                 void *context, int vl, int mode, u64 data)
2321 {
2322         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2323 
2324         return dd->rcv_err_status_cnt[53];
2325 }
2326 
2327 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2328                                                  void *context, int vl,
2329                                                  int mode, u64 data)
2330 {
2331         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2332 
2333         return dd->rcv_err_status_cnt[52];
2334 }
2335 
2336 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2337                                                  void *context, int vl,
2338                                                  int mode, u64 data)
2339 {
2340         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2341 
2342         return dd->rcv_err_status_cnt[51];
2343 }
2344 
2345 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2346                                                  void *context, int vl,
2347                                                  int mode, u64 data)
2348 {
2349         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2350 
2351         return dd->rcv_err_status_cnt[50];
2352 }
2353 
2354 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2355                                                  void *context, int vl,
2356                                                  int mode, u64 data)
2357 {
2358         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2359 
2360         return dd->rcv_err_status_cnt[49];
2361 }
2362 
2363 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2364                                                  void *context, int vl,
2365                                                  int mode, u64 data)
2366 {
2367         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2368 
2369         return dd->rcv_err_status_cnt[48];
2370 }
2371 
2372 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2373                                                  void *context, int vl,
2374                                                  int mode, u64 data)
2375 {
2376         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2377 
2378         return dd->rcv_err_status_cnt[47];
2379 }
2380 
2381 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2382                                          void *context, int vl, int mode,
2383                                          u64 data)
2384 {
2385         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2386 
2387         return dd->rcv_err_status_cnt[46];
2388 }
2389 
2390 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2391                                 const struct cntr_entry *entry,
2392                                 void *context, int vl, int mode, u64 data)
2393 {
2394         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2395 
2396         return dd->rcv_err_status_cnt[45];
2397 }
2398 
2399 static u64 access_rx_lookup_csr_parity_err_cnt(
2400                                 const struct cntr_entry *entry,
2401                                 void *context, int vl, int mode, u64 data)
2402 {
2403         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2404 
2405         return dd->rcv_err_status_cnt[44];
2406 }
2407 
2408 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2409                                 const struct cntr_entry *entry,
2410                                 void *context, int vl, int mode, u64 data)
2411 {
2412         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2413 
2414         return dd->rcv_err_status_cnt[43];
2415 }
2416 
2417 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2418                                 const struct cntr_entry *entry,
2419                                 void *context, int vl, int mode, u64 data)
2420 {
2421         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2422 
2423         return dd->rcv_err_status_cnt[42];
2424 }
2425 
2426 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2427                                 const struct cntr_entry *entry,
2428                                 void *context, int vl, int mode, u64 data)
2429 {
2430         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2431 
2432         return dd->rcv_err_status_cnt[41];
2433 }
2434 
2435 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2436                                 const struct cntr_entry *entry,
2437                                 void *context, int vl, int mode, u64 data)
2438 {
2439         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2440 
2441         return dd->rcv_err_status_cnt[40];
2442 }
2443 
2444 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2445                                 const struct cntr_entry *entry,
2446                                 void *context, int vl, int mode, u64 data)
2447 {
2448         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2449 
2450         return dd->rcv_err_status_cnt[39];
2451 }
2452 
2453 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2454                                 const struct cntr_entry *entry,
2455                                 void *context, int vl, int mode, u64 data)
2456 {
2457         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2458 
2459         return dd->rcv_err_status_cnt[38];
2460 }
2461 
2462 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2463                                 const struct cntr_entry *entry,
2464                                 void *context, int vl, int mode, u64 data)
2465 {
2466         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2467 
2468         return dd->rcv_err_status_cnt[37];
2469 }
2470 
2471 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2472                                 const struct cntr_entry *entry,
2473                                 void *context, int vl, int mode, u64 data)
2474 {
2475         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2476 
2477         return dd->rcv_err_status_cnt[36];
2478 }
2479 
2480 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2481                                 const struct cntr_entry *entry,
2482                                 void *context, int vl, int mode, u64 data)
2483 {
2484         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2485 
2486         return dd->rcv_err_status_cnt[35];
2487 }
2488 
2489 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2490                                 const struct cntr_entry *entry,
2491                                 void *context, int vl, int mode, u64 data)
2492 {
2493         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2494 
2495         return dd->rcv_err_status_cnt[34];
2496 }
2497 
2498 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2499                                 const struct cntr_entry *entry,
2500                                 void *context, int vl, int mode, u64 data)
2501 {
2502         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2503 
2504         return dd->rcv_err_status_cnt[33];
2505 }
2506 
2507 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2508                                         void *context, int vl, int mode,
2509                                         u64 data)
2510 {
2511         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2512 
2513         return dd->rcv_err_status_cnt[32];
2514 }
2515 
2516 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2517                                        void *context, int vl, int mode,
2518                                        u64 data)
2519 {
2520         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2521 
2522         return dd->rcv_err_status_cnt[31];
2523 }
2524 
2525 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2526                                           void *context, int vl, int mode,
2527                                           u64 data)
2528 {
2529         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2530 
2531         return dd->rcv_err_status_cnt[30];
2532 }
2533 
2534 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2535                                              void *context, int vl, int mode,
2536                                              u64 data)
2537 {
2538         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2539 
2540         return dd->rcv_err_status_cnt[29];
2541 }
2542 
2543 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2544                                                  void *context, int vl,
2545                                                  int mode, u64 data)
2546 {
2547         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2548 
2549         return dd->rcv_err_status_cnt[28];
2550 }
2551 
2552 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2553                                 const struct cntr_entry *entry,
2554                                 void *context, int vl, int mode, u64 data)
2555 {
2556         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2557 
2558         return dd->rcv_err_status_cnt[27];
2559 }
2560 
2561 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2562                                 const struct cntr_entry *entry,
2563                                 void *context, int vl, int mode, u64 data)
2564 {
2565         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2566 
2567         return dd->rcv_err_status_cnt[26];
2568 }
2569 
2570 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2571                                 const struct cntr_entry *entry,
2572                                 void *context, int vl, int mode, u64 data)
2573 {
2574         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2575 
2576         return dd->rcv_err_status_cnt[25];
2577 }
2578 
2579 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2580                                 const struct cntr_entry *entry,
2581                                 void *context, int vl, int mode, u64 data)
2582 {
2583         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2584 
2585         return dd->rcv_err_status_cnt[24];
2586 }
2587 
2588 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2589                                 const struct cntr_entry *entry,
2590                                 void *context, int vl, int mode, u64 data)
2591 {
2592         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2593 
2594         return dd->rcv_err_status_cnt[23];
2595 }
2596 
2597 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2598                                 const struct cntr_entry *entry,
2599                                 void *context, int vl, int mode, u64 data)
2600 {
2601         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2602 
2603         return dd->rcv_err_status_cnt[22];
2604 }
2605 
2606 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2607                                 const struct cntr_entry *entry,
2608                                 void *context, int vl, int mode, u64 data)
2609 {
2610         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2611 
2612         return dd->rcv_err_status_cnt[21];
2613 }
2614 
2615 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2616                                 const struct cntr_entry *entry,
2617                                 void *context, int vl, int mode, u64 data)
2618 {
2619         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2620 
2621         return dd->rcv_err_status_cnt[20];
2622 }
2623 
2624 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2625                                 const struct cntr_entry *entry,
2626                                 void *context, int vl, int mode, u64 data)
2627 {
2628         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2629 
2630         return dd->rcv_err_status_cnt[19];
2631 }
2632 
2633 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2634                                                  void *context, int vl,
2635                                                  int mode, u64 data)
2636 {
2637         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2638 
2639         return dd->rcv_err_status_cnt[18];
2640 }
2641 
2642 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2643                                                  void *context, int vl,
2644                                                  int mode, u64 data)
2645 {
2646         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2647 
2648         return dd->rcv_err_status_cnt[17];
2649 }
2650 
2651 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2652                                 const struct cntr_entry *entry,
2653                                 void *context, int vl, int mode, u64 data)
2654 {
2655         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2656 
2657         return dd->rcv_err_status_cnt[16];
2658 }
2659 
2660 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2661                                 const struct cntr_entry *entry,
2662                                 void *context, int vl, int mode, u64 data)
2663 {
2664         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2665 
2666         return dd->rcv_err_status_cnt[15];
2667 }
2668 
2669 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2670                                                 void *context, int vl,
2671                                                 int mode, u64 data)
2672 {
2673         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2674 
2675         return dd->rcv_err_status_cnt[14];
2676 }
2677 
2678 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2679                                                 void *context, int vl,
2680                                                 int mode, u64 data)
2681 {
2682         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2683 
2684         return dd->rcv_err_status_cnt[13];
2685 }
2686 
2687 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2688                                               void *context, int vl, int mode,
2689                                               u64 data)
2690 {
2691         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2692 
2693         return dd->rcv_err_status_cnt[12];
2694 }
2695 
2696 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2697                                           void *context, int vl, int mode,
2698                                           u64 data)
2699 {
2700         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2701 
2702         return dd->rcv_err_status_cnt[11];
2703 }
2704 
2705 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2706                                           void *context, int vl, int mode,
2707                                           u64 data)
2708 {
2709         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2710 
2711         return dd->rcv_err_status_cnt[10];
2712 }
2713 
2714 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2715                                                void *context, int vl, int mode,
2716                                                u64 data)
2717 {
2718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719 
2720         return dd->rcv_err_status_cnt[9];
2721 }
2722 
2723 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2724                                             void *context, int vl, int mode,
2725                                             u64 data)
2726 {
2727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728 
2729         return dd->rcv_err_status_cnt[8];
2730 }
2731 
2732 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2733                                 const struct cntr_entry *entry,
2734                                 void *context, int vl, int mode, u64 data)
2735 {
2736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737 
2738         return dd->rcv_err_status_cnt[7];
2739 }
2740 
2741 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2742                                 const struct cntr_entry *entry,
2743                                 void *context, int vl, int mode, u64 data)
2744 {
2745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746 
2747         return dd->rcv_err_status_cnt[6];
2748 }
2749 
2750 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2751                                           void *context, int vl, int mode,
2752                                           u64 data)
2753 {
2754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755 
2756         return dd->rcv_err_status_cnt[5];
2757 }
2758 
2759 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2760                                           void *context, int vl, int mode,
2761                                           u64 data)
2762 {
2763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764 
2765         return dd->rcv_err_status_cnt[4];
2766 }
2767 
2768 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2769                                          void *context, int vl, int mode,
2770                                          u64 data)
2771 {
2772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773 
2774         return dd->rcv_err_status_cnt[3];
2775 }
2776 
2777 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2778                                          void *context, int vl, int mode,
2779                                          u64 data)
2780 {
2781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782 
2783         return dd->rcv_err_status_cnt[2];
2784 }
2785 
2786 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2787                                             void *context, int vl, int mode,
2788                                             u64 data)
2789 {
2790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791 
2792         return dd->rcv_err_status_cnt[1];
2793 }
2794 
2795 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2796                                          void *context, int vl, int mode,
2797                                          u64 data)
2798 {
2799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800 
2801         return dd->rcv_err_status_cnt[0];
2802 }
2803 
2804 /*
2805  * Software counters corresponding to each of the
2806  * error status bits within SendPioErrStatus
2807  */
2808 static u64 access_pio_pec_sop_head_parity_err_cnt(
2809                                 const struct cntr_entry *entry,
2810                                 void *context, int vl, int mode, u64 data)
2811 {
2812         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2813 
2814         return dd->send_pio_err_status_cnt[35];
2815 }
2816 
2817 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2818                                 const struct cntr_entry *entry,
2819                                 void *context, int vl, int mode, u64 data)
2820 {
2821         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2822 
2823         return dd->send_pio_err_status_cnt[34];
2824 }
2825 
2826 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2827                                 const struct cntr_entry *entry,
2828                                 void *context, int vl, int mode, u64 data)
2829 {
2830         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2831 
2832         return dd->send_pio_err_status_cnt[33];
2833 }
2834 
2835 static u64 access_pio_current_free_cnt_parity_err_cnt(
2836                                 const struct cntr_entry *entry,
2837                                 void *context, int vl, int mode, u64 data)
2838 {
2839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2840 
2841         return dd->send_pio_err_status_cnt[32];
2842 }
2843 
2844 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2845                                           void *context, int vl, int mode,
2846                                           u64 data)
2847 {
2848         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2849 
2850         return dd->send_pio_err_status_cnt[31];
2851 }
2852 
2853 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2854                                           void *context, int vl, int mode,
2855                                           u64 data)
2856 {
2857         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2858 
2859         return dd->send_pio_err_status_cnt[30];
2860 }
2861 
2862 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2863                                            void *context, int vl, int mode,
2864                                            u64 data)
2865 {
2866         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2867 
2868         return dd->send_pio_err_status_cnt[29];
2869 }
2870 
2871 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2872                                 const struct cntr_entry *entry,
2873                                 void *context, int vl, int mode, u64 data)
2874 {
2875         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2876 
2877         return dd->send_pio_err_status_cnt[28];
2878 }
2879 
2880 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2881                                              void *context, int vl, int mode,
2882                                              u64 data)
2883 {
2884         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2885 
2886         return dd->send_pio_err_status_cnt[27];
2887 }
2888 
2889 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2890                                              void *context, int vl, int mode,
2891                                              u64 data)
2892 {
2893         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2894 
2895         return dd->send_pio_err_status_cnt[26];
2896 }
2897 
2898 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2899                                                 void *context, int vl,
2900                                                 int mode, u64 data)
2901 {
2902         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2903 
2904         return dd->send_pio_err_status_cnt[25];
2905 }
2906 
2907 static u64 access_pio_block_qw_count_parity_err_cnt(
2908                                 const struct cntr_entry *entry,
2909                                 void *context, int vl, int mode, u64 data)
2910 {
2911         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2912 
2913         return dd->send_pio_err_status_cnt[24];
2914 }
2915 
2916 static u64 access_pio_write_qw_valid_parity_err_cnt(
2917                                 const struct cntr_entry *entry,
2918                                 void *context, int vl, int mode, u64 data)
2919 {
2920         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2921 
2922         return dd->send_pio_err_status_cnt[23];
2923 }
2924 
2925 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2926                                             void *context, int vl, int mode,
2927                                             u64 data)
2928 {
2929         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2930 
2931         return dd->send_pio_err_status_cnt[22];
2932 }
2933 
2934 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2935                                                 void *context, int vl,
2936                                                 int mode, u64 data)
2937 {
2938         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2939 
2940         return dd->send_pio_err_status_cnt[21];
2941 }
2942 
2943 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2944                                                 void *context, int vl,
2945                                                 int mode, u64 data)
2946 {
2947         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2948 
2949         return dd->send_pio_err_status_cnt[20];
2950 }
2951 
2952 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2953                                                 void *context, int vl,
2954                                                 int mode, u64 data)
2955 {
2956         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2957 
2958         return dd->send_pio_err_status_cnt[19];
2959 }
2960 
2961 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2962                                 const struct cntr_entry *entry,
2963                                 void *context, int vl, int mode, u64 data)
2964 {
2965         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2966 
2967         return dd->send_pio_err_status_cnt[18];
2968 }
2969 
2970 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2971                                          void *context, int vl, int mode,
2972                                          u64 data)
2973 {
2974         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2975 
2976         return dd->send_pio_err_status_cnt[17];
2977 }
2978 
2979 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2980                                             void *context, int vl, int mode,
2981                                             u64 data)
2982 {
2983         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2984 
2985         return dd->send_pio_err_status_cnt[16];
2986 }
2987 
2988 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2989                                 const struct cntr_entry *entry,
2990                                 void *context, int vl, int mode, u64 data)
2991 {
2992         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2993 
2994         return dd->send_pio_err_status_cnt[15];
2995 }
2996 
2997 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2998                                 const struct cntr_entry *entry,
2999                                 void *context, int vl, int mode, u64 data)
3000 {
3001         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3002 
3003         return dd->send_pio_err_status_cnt[14];
3004 }
3005 
3006 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3007                                 const struct cntr_entry *entry,
3008                                 void *context, int vl, int mode, u64 data)
3009 {
3010         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3011 
3012         return dd->send_pio_err_status_cnt[13];
3013 }
3014 
3015 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3016                                 const struct cntr_entry *entry,
3017                                 void *context, int vl, int mode, u64 data)
3018 {
3019         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3020 
3021         return dd->send_pio_err_status_cnt[12];
3022 }
3023 
3024 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3025                                 const struct cntr_entry *entry,
3026                                 void *context, int vl, int mode, u64 data)
3027 {
3028         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3029 
3030         return dd->send_pio_err_status_cnt[11];
3031 }
3032 
3033 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3034                                 const struct cntr_entry *entry,
3035                                 void *context, int vl, int mode, u64 data)
3036 {
3037         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3038 
3039         return dd->send_pio_err_status_cnt[10];
3040 }
3041 
3042 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3043                                 const struct cntr_entry *entry,
3044                                 void *context, int vl, int mode, u64 data)
3045 {
3046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047 
3048         return dd->send_pio_err_status_cnt[9];
3049 }
3050 
3051 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3052                                 const struct cntr_entry *entry,
3053                                 void *context, int vl, int mode, u64 data)
3054 {
3055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056 
3057         return dd->send_pio_err_status_cnt[8];
3058 }
3059 
3060 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3061                                 const struct cntr_entry *entry,
3062                                 void *context, int vl, int mode, u64 data)
3063 {
3064         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065 
3066         return dd->send_pio_err_status_cnt[7];
3067 }
3068 
3069 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3070                                               void *context, int vl, int mode,
3071                                               u64 data)
3072 {
3073         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074 
3075         return dd->send_pio_err_status_cnt[6];
3076 }
3077 
3078 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3079                                               void *context, int vl, int mode,
3080                                               u64 data)
3081 {
3082         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083 
3084         return dd->send_pio_err_status_cnt[5];
3085 }
3086 
3087 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3088                                            void *context, int vl, int mode,
3089                                            u64 data)
3090 {
3091         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092 
3093         return dd->send_pio_err_status_cnt[4];
3094 }
3095 
3096 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3097                                            void *context, int vl, int mode,
3098                                            u64 data)
3099 {
3100         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101 
3102         return dd->send_pio_err_status_cnt[3];
3103 }
3104 
3105 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3106                                          void *context, int vl, int mode,
3107                                          u64 data)
3108 {
3109         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110 
3111         return dd->send_pio_err_status_cnt[2];
3112 }
3113 
3114 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3115                                                 void *context, int vl,
3116                                                 int mode, u64 data)
3117 {
3118         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119 
3120         return dd->send_pio_err_status_cnt[1];
3121 }
3122 
3123 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3124                                              void *context, int vl, int mode,
3125                                              u64 data)
3126 {
3127         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128 
3129         return dd->send_pio_err_status_cnt[0];
3130 }
3131 
3132 /*
3133  * Software counters corresponding to each of the
3134  * error status bits within SendDmaErrStatus
3135  */
3136 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3137                                 const struct cntr_entry *entry,
3138                                 void *context, int vl, int mode, u64 data)
3139 {
3140         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141 
3142         return dd->send_dma_err_status_cnt[3];
3143 }
3144 
3145 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3146                                 const struct cntr_entry *entry,
3147                                 void *context, int vl, int mode, u64 data)
3148 {
3149         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150 
3151         return dd->send_dma_err_status_cnt[2];
3152 }
3153 
3154 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3155                                           void *context, int vl, int mode,
3156                                           u64 data)
3157 {
3158         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159 
3160         return dd->send_dma_err_status_cnt[1];
3161 }
3162 
3163 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3164                                        void *context, int vl, int mode,
3165                                        u64 data)
3166 {
3167         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168 
3169         return dd->send_dma_err_status_cnt[0];
3170 }
3171 
3172 /*
3173  * Software counters corresponding to each of the
3174  * error status bits within SendEgressErrStatus
3175  */
3176 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3177                                 const struct cntr_entry *entry,
3178                                 void *context, int vl, int mode, u64 data)
3179 {
3180         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3181 
3182         return dd->send_egress_err_status_cnt[63];
3183 }
3184 
3185 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3186                                 const struct cntr_entry *entry,
3187                                 void *context, int vl, int mode, u64 data)
3188 {
3189         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3190 
3191         return dd->send_egress_err_status_cnt[62];
3192 }
3193 
3194 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3195                                              void *context, int vl, int mode,
3196                                              u64 data)
3197 {
3198         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3199 
3200         return dd->send_egress_err_status_cnt[61];
3201 }
3202 
3203 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3204                                                  void *context, int vl,
3205                                                  int mode, u64 data)
3206 {
3207         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3208 
3209         return dd->send_egress_err_status_cnt[60];
3210 }
3211 
3212 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3213                                 const struct cntr_entry *entry,
3214                                 void *context, int vl, int mode, u64 data)
3215 {
3216         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3217 
3218         return dd->send_egress_err_status_cnt[59];
3219 }
3220 
3221 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3222                                         void *context, int vl, int mode,
3223                                         u64 data)
3224 {
3225         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3226 
3227         return dd->send_egress_err_status_cnt[58];
3228 }
3229 
3230 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3231                                             void *context, int vl, int mode,
3232                                             u64 data)
3233 {
3234         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3235 
3236         return dd->send_egress_err_status_cnt[57];
3237 }
3238 
3239 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3240                                               void *context, int vl, int mode,
3241                                               u64 data)
3242 {
3243         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3244 
3245         return dd->send_egress_err_status_cnt[56];
3246 }
3247 
3248 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3249                                               void *context, int vl, int mode,
3250                                               u64 data)
3251 {
3252         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3253 
3254         return dd->send_egress_err_status_cnt[55];
3255 }
3256 
3257 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3258                                               void *context, int vl, int mode,
3259                                               u64 data)
3260 {
3261         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3262 
3263         return dd->send_egress_err_status_cnt[54];
3264 }
3265 
3266 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3267                                               void *context, int vl, int mode,
3268                                               u64 data)
3269 {
3270         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3271 
3272         return dd->send_egress_err_status_cnt[53];
3273 }
3274 
3275 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3276                                               void *context, int vl, int mode,
3277                                               u64 data)
3278 {
3279         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3280 
3281         return dd->send_egress_err_status_cnt[52];
3282 }
3283 
3284 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3285                                               void *context, int vl, int mode,
3286                                               u64 data)
3287 {
3288         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3289 
3290         return dd->send_egress_err_status_cnt[51];
3291 }
3292 
3293 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3294                                               void *context, int vl, int mode,
3295                                               u64 data)
3296 {
3297         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3298 
3299         return dd->send_egress_err_status_cnt[50];
3300 }
3301 
3302 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3303                                               void *context, int vl, int mode,
3304                                               u64 data)
3305 {
3306         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3307 
3308         return dd->send_egress_err_status_cnt[49];
3309 }
3310 
3311 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3312                                               void *context, int vl, int mode,
3313                                               u64 data)
3314 {
3315         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3316 
3317         return dd->send_egress_err_status_cnt[48];
3318 }
3319 
3320 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3321                                               void *context, int vl, int mode,
3322                                               u64 data)
3323 {
3324         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3325 
3326         return dd->send_egress_err_status_cnt[47];
3327 }
3328 
3329 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3330                                             void *context, int vl, int mode,
3331                                             u64 data)
3332 {
3333         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3334 
3335         return dd->send_egress_err_status_cnt[46];
3336 }
3337 
3338 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3339                                              void *context, int vl, int mode,
3340                                              u64 data)
3341 {
3342         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3343 
3344         return dd->send_egress_err_status_cnt[45];
3345 }
3346 
3347 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3348                                                  void *context, int vl,
3349                                                  int mode, u64 data)
3350 {
3351         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3352 
3353         return dd->send_egress_err_status_cnt[44];
3354 }
3355 
3356 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3357                                 const struct cntr_entry *entry,
3358                                 void *context, int vl, int mode, u64 data)
3359 {
3360         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3361 
3362         return dd->send_egress_err_status_cnt[43];
3363 }
3364 
3365 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3366                                         void *context, int vl, int mode,
3367                                         u64 data)
3368 {
3369         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3370 
3371         return dd->send_egress_err_status_cnt[42];
3372 }
3373 
3374 static u64 access_tx_credit_return_partiy_err_cnt(
3375                                 const struct cntr_entry *entry,
3376                                 void *context, int vl, int mode, u64 data)
3377 {
3378         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3379 
3380         return dd->send_egress_err_status_cnt[41];
3381 }
3382 
3383 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3384                                 const struct cntr_entry *entry,
3385                                 void *context, int vl, int mode, u64 data)
3386 {
3387         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3388 
3389         return dd->send_egress_err_status_cnt[40];
3390 }
3391 
3392 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3393                                 const struct cntr_entry *entry,
3394                                 void *context, int vl, int mode, u64 data)
3395 {
3396         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3397 
3398         return dd->send_egress_err_status_cnt[39];
3399 }
3400 
3401 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3402                                 const struct cntr_entry *entry,
3403                                 void *context, int vl, int mode, u64 data)
3404 {
3405         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3406 
3407         return dd->send_egress_err_status_cnt[38];
3408 }
3409 
3410 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3411                                 const struct cntr_entry *entry,
3412                                 void *context, int vl, int mode, u64 data)
3413 {
3414         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3415 
3416         return dd->send_egress_err_status_cnt[37];
3417 }
3418 
3419 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3420                                 const struct cntr_entry *entry,
3421                                 void *context, int vl, int mode, u64 data)
3422 {
3423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3424 
3425         return dd->send_egress_err_status_cnt[36];
3426 }
3427 
3428 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3429                                 const struct cntr_entry *entry,
3430                                 void *context, int vl, int mode, u64 data)
3431 {
3432         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3433 
3434         return dd->send_egress_err_status_cnt[35];
3435 }
3436 
3437 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3438                                 const struct cntr_entry *entry,
3439                                 void *context, int vl, int mode, u64 data)
3440 {
3441         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3442 
3443         return dd->send_egress_err_status_cnt[34];
3444 }
3445 
3446 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3447                                 const struct cntr_entry *entry,
3448                                 void *context, int vl, int mode, u64 data)
3449 {
3450         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3451 
3452         return dd->send_egress_err_status_cnt[33];
3453 }
3454 
3455 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3456                                 const struct cntr_entry *entry,
3457                                 void *context, int vl, int mode, u64 data)
3458 {
3459         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3460 
3461         return dd->send_egress_err_status_cnt[32];
3462 }
3463 
3464 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3465                                 const struct cntr_entry *entry,
3466                                 void *context, int vl, int mode, u64 data)
3467 {
3468         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3469 
3470         return dd->send_egress_err_status_cnt[31];
3471 }
3472 
3473 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3474                                 const struct cntr_entry *entry,
3475                                 void *context, int vl, int mode, u64 data)
3476 {
3477         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3478 
3479         return dd->send_egress_err_status_cnt[30];
3480 }
3481 
3482 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3483                                 const struct cntr_entry *entry,
3484                                 void *context, int vl, int mode, u64 data)
3485 {
3486         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3487 
3488         return dd->send_egress_err_status_cnt[29];
3489 }
3490 
3491 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3492                                 const struct cntr_entry *entry,
3493                                 void *context, int vl, int mode, u64 data)
3494 {
3495         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3496 
3497         return dd->send_egress_err_status_cnt[28];
3498 }
3499 
3500 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3501                                 const struct cntr_entry *entry,
3502                                 void *context, int vl, int mode, u64 data)
3503 {
3504         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3505 
3506         return dd->send_egress_err_status_cnt[27];
3507 }
3508 
3509 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3510                                 const struct cntr_entry *entry,
3511                                 void *context, int vl, int mode, u64 data)
3512 {
3513         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3514 
3515         return dd->send_egress_err_status_cnt[26];
3516 }
3517 
3518 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3519                                 const struct cntr_entry *entry,
3520                                 void *context, int vl, int mode, u64 data)
3521 {
3522         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3523 
3524         return dd->send_egress_err_status_cnt[25];
3525 }
3526 
3527 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3528                                 const struct cntr_entry *entry,
3529                                 void *context, int vl, int mode, u64 data)
3530 {
3531         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3532 
3533         return dd->send_egress_err_status_cnt[24];
3534 }
3535 
3536 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3537                                 const struct cntr_entry *entry,
3538                                 void *context, int vl, int mode, u64 data)
3539 {
3540         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3541 
3542         return dd->send_egress_err_status_cnt[23];
3543 }
3544 
3545 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3546                                 const struct cntr_entry *entry,
3547                                 void *context, int vl, int mode, u64 data)
3548 {
3549         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3550 
3551         return dd->send_egress_err_status_cnt[22];
3552 }
3553 
3554 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3555                                 const struct cntr_entry *entry,
3556                                 void *context, int vl, int mode, u64 data)
3557 {
3558         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3559 
3560         return dd->send_egress_err_status_cnt[21];
3561 }
3562 
3563 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3564                                 const struct cntr_entry *entry,
3565                                 void *context, int vl, int mode, u64 data)
3566 {
3567         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3568 
3569         return dd->send_egress_err_status_cnt[20];
3570 }
3571 
3572 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3573                                 const struct cntr_entry *entry,
3574                                 void *context, int vl, int mode, u64 data)
3575 {
3576         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3577 
3578         return dd->send_egress_err_status_cnt[19];
3579 }
3580 
3581 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3582                                 const struct cntr_entry *entry,
3583                                 void *context, int vl, int mode, u64 data)
3584 {
3585         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3586 
3587         return dd->send_egress_err_status_cnt[18];
3588 }
3589 
3590 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3591                                 const struct cntr_entry *entry,
3592                                 void *context, int vl, int mode, u64 data)
3593 {
3594         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3595 
3596         return dd->send_egress_err_status_cnt[17];
3597 }
3598 
3599 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3600                                 const struct cntr_entry *entry,
3601                                 void *context, int vl, int mode, u64 data)
3602 {
3603         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3604 
3605         return dd->send_egress_err_status_cnt[16];
3606 }
3607 
3608 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3609                                            void *context, int vl, int mode,
3610                                            u64 data)
3611 {
3612         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3613 
3614         return dd->send_egress_err_status_cnt[15];
3615 }
3616 
3617 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3618                                                  void *context, int vl,
3619                                                  int mode, u64 data)
3620 {
3621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3622 
3623         return dd->send_egress_err_status_cnt[14];
3624 }
3625 
3626 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3627                                                void *context, int vl, int mode,
3628                                                u64 data)
3629 {
3630         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3631 
3632         return dd->send_egress_err_status_cnt[13];
3633 }
3634 
3635 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3636                                         void *context, int vl, int mode,
3637                                         u64 data)
3638 {
3639         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3640 
3641         return dd->send_egress_err_status_cnt[12];
3642 }
3643 
3644 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3645                                 const struct cntr_entry *entry,
3646                                 void *context, int vl, int mode, u64 data)
3647 {
3648         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3649 
3650         return dd->send_egress_err_status_cnt[11];
3651 }
3652 
3653 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3654                                              void *context, int vl, int mode,
3655                                              u64 data)
3656 {
3657         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3658 
3659         return dd->send_egress_err_status_cnt[10];
3660 }
3661 
3662 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3663                                             void *context, int vl, int mode,
3664                                             u64 data)
3665 {
3666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667 
3668         return dd->send_egress_err_status_cnt[9];
3669 }
3670 
3671 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3672                                 const struct cntr_entry *entry,
3673                                 void *context, int vl, int mode, u64 data)
3674 {
3675         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676 
3677         return dd->send_egress_err_status_cnt[8];
3678 }
3679 
3680 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3681                                 const struct cntr_entry *entry,
3682                                 void *context, int vl, int mode, u64 data)
3683 {
3684         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685 
3686         return dd->send_egress_err_status_cnt[7];
3687 }
3688 
3689 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3690                                             void *context, int vl, int mode,
3691                                             u64 data)
3692 {
3693         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694 
3695         return dd->send_egress_err_status_cnt[6];
3696 }
3697 
3698 static u64 access_tx_incorrect_link_state_err_cnt(
3699                                 const struct cntr_entry *entry,
3700                                 void *context, int vl, int mode, u64 data)
3701 {
3702         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703 
3704         return dd->send_egress_err_status_cnt[5];
3705 }
3706 
3707 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3708                                       void *context, int vl, int mode,
3709                                       u64 data)
3710 {
3711         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712 
3713         return dd->send_egress_err_status_cnt[4];
3714 }
3715 
3716 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3717                                 const struct cntr_entry *entry,
3718                                 void *context, int vl, int mode, u64 data)
3719 {
3720         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721 
3722         return dd->send_egress_err_status_cnt[3];
3723 }
3724 
3725 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3726                                             void *context, int vl, int mode,
3727                                             u64 data)
3728 {
3729         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730 
3731         return dd->send_egress_err_status_cnt[2];
3732 }
3733 
3734 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3735                                 const struct cntr_entry *entry,
3736                                 void *context, int vl, int mode, u64 data)
3737 {
3738         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739 
3740         return dd->send_egress_err_status_cnt[1];
3741 }
3742 
3743 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3744                                 const struct cntr_entry *entry,
3745                                 void *context, int vl, int mode, u64 data)
3746 {
3747         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748 
3749         return dd->send_egress_err_status_cnt[0];
3750 }
3751 
3752 /*
3753  * Software counters corresponding to each of the
3754  * error status bits within SendErrStatus
3755  */
3756 static u64 access_send_csr_write_bad_addr_err_cnt(
3757                                 const struct cntr_entry *entry,
3758                                 void *context, int vl, int mode, u64 data)
3759 {
3760         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761 
3762         return dd->send_err_status_cnt[2];
3763 }
3764 
3765 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3766                                                  void *context, int vl,
3767                                                  int mode, u64 data)
3768 {
3769         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770 
3771         return dd->send_err_status_cnt[1];
3772 }
3773 
3774 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3775                                       void *context, int vl, int mode,
3776                                       u64 data)
3777 {
3778         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779 
3780         return dd->send_err_status_cnt[0];
3781 }
3782 
3783 /*
3784  * Software counters corresponding to each of the
3785  * error status bits within SendCtxtErrStatus
3786  */
3787 static u64 access_pio_write_out_of_bounds_err_cnt(
3788                                 const struct cntr_entry *entry,
3789                                 void *context, int vl, int mode, u64 data)
3790 {
3791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792 
3793         return dd->sw_ctxt_err_status_cnt[4];
3794 }
3795 
3796 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3797                                              void *context, int vl, int mode,
3798                                              u64 data)
3799 {
3800         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801 
3802         return dd->sw_ctxt_err_status_cnt[3];
3803 }
3804 
3805 static u64 access_pio_write_crosses_boundary_err_cnt(
3806                                 const struct cntr_entry *entry,
3807                                 void *context, int vl, int mode, u64 data)
3808 {
3809         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810 
3811         return dd->sw_ctxt_err_status_cnt[2];
3812 }
3813 
3814 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3815                                                 void *context, int vl,
3816                                                 int mode, u64 data)
3817 {
3818         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819 
3820         return dd->sw_ctxt_err_status_cnt[1];
3821 }
3822 
3823 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3824                                                void *context, int vl, int mode,
3825                                                u64 data)
3826 {
3827         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828 
3829         return dd->sw_ctxt_err_status_cnt[0];
3830 }
3831 
3832 /*
3833  * Software counters corresponding to each of the
3834  * error status bits within SendDmaEngErrStatus
3835  */
3836 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3837                                 const struct cntr_entry *entry,
3838                                 void *context, int vl, int mode, u64 data)
3839 {
3840         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3841 
3842         return dd->sw_send_dma_eng_err_status_cnt[23];
3843 }
3844 
3845 static u64 access_sdma_header_storage_cor_err_cnt(
3846                                 const struct cntr_entry *entry,
3847                                 void *context, int vl, int mode, u64 data)
3848 {
3849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3850 
3851         return dd->sw_send_dma_eng_err_status_cnt[22];
3852 }
3853 
3854 static u64 access_sdma_packet_tracking_cor_err_cnt(
3855                                 const struct cntr_entry *entry,
3856                                 void *context, int vl, int mode, u64 data)
3857 {
3858         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3859 
3860         return dd->sw_send_dma_eng_err_status_cnt[21];
3861 }
3862 
3863 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3864                                             void *context, int vl, int mode,
3865                                             u64 data)
3866 {
3867         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3868 
3869         return dd->sw_send_dma_eng_err_status_cnt[20];
3870 }
3871 
3872 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3873                                               void *context, int vl, int mode,
3874                                               u64 data)
3875 {
3876         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3877 
3878         return dd->sw_send_dma_eng_err_status_cnt[19];
3879 }
3880 
3881 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3882                                 const struct cntr_entry *entry,
3883                                 void *context, int vl, int mode, u64 data)
3884 {
3885         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3886 
3887         return dd->sw_send_dma_eng_err_status_cnt[18];
3888 }
3889 
3890 static u64 access_sdma_header_storage_unc_err_cnt(
3891                                 const struct cntr_entry *entry,
3892                                 void *context, int vl, int mode, u64 data)
3893 {
3894         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895 
3896         return dd->sw_send_dma_eng_err_status_cnt[17];
3897 }
3898 
3899 static u64 access_sdma_packet_tracking_unc_err_cnt(
3900                                 const struct cntr_entry *entry,
3901                                 void *context, int vl, int mode, u64 data)
3902 {
3903         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904 
3905         return dd->sw_send_dma_eng_err_status_cnt[16];
3906 }
3907 
3908 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3909                                             void *context, int vl, int mode,
3910                                             u64 data)
3911 {
3912         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913 
3914         return dd->sw_send_dma_eng_err_status_cnt[15];
3915 }
3916 
3917 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3918                                               void *context, int vl, int mode,
3919                                               u64 data)
3920 {
3921         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922 
3923         return dd->sw_send_dma_eng_err_status_cnt[14];
3924 }
3925 
3926 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3927                                        void *context, int vl, int mode,
3928                                        u64 data)
3929 {
3930         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931 
3932         return dd->sw_send_dma_eng_err_status_cnt[13];
3933 }
3934 
3935 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3936                                              void *context, int vl, int mode,
3937                                              u64 data)
3938 {
3939         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940 
3941         return dd->sw_send_dma_eng_err_status_cnt[12];
3942 }
3943 
3944 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3945                                               void *context, int vl, int mode,
3946                                               u64 data)
3947 {
3948         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3949 
3950         return dd->sw_send_dma_eng_err_status_cnt[11];
3951 }
3952 
3953 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3954                                              void *context, int vl, int mode,
3955                                              u64 data)
3956 {
3957         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3958 
3959         return dd->sw_send_dma_eng_err_status_cnt[10];
3960 }
3961 
3962 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3963                                           void *context, int vl, int mode,
3964                                           u64 data)
3965 {
3966         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3967 
3968         return dd->sw_send_dma_eng_err_status_cnt[9];
3969 }
3970 
3971 static u64 access_sdma_packet_desc_overflow_err_cnt(
3972                                 const struct cntr_entry *entry,
3973                                 void *context, int vl, int mode, u64 data)
3974 {
3975         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3976 
3977         return dd->sw_send_dma_eng_err_status_cnt[8];
3978 }
3979 
3980 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3981                                                void *context, int vl,
3982                                                int mode, u64 data)
3983 {
3984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985 
3986         return dd->sw_send_dma_eng_err_status_cnt[7];
3987 }
3988 
3989 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3990                                     void *context, int vl, int mode, u64 data)
3991 {
3992         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3993 
3994         return dd->sw_send_dma_eng_err_status_cnt[6];
3995 }
3996 
3997 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3998                                         void *context, int vl, int mode,
3999                                         u64 data)
4000 {
4001         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4002 
4003         return dd->sw_send_dma_eng_err_status_cnt[5];
4004 }
4005 
4006 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4007                                           void *context, int vl, int mode,
4008                                           u64 data)
4009 {
4010         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4011 
4012         return dd->sw_send_dma_eng_err_status_cnt[4];
4013 }
4014 
4015 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4016                                 const struct cntr_entry *entry,
4017                                 void *context, int vl, int mode, u64 data)
4018 {
4019         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4020 
4021         return dd->sw_send_dma_eng_err_status_cnt[3];
4022 }
4023 
4024 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4025                                         void *context, int vl, int mode,
4026                                         u64 data)
4027 {
4028         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4029 
4030         return dd->sw_send_dma_eng_err_status_cnt[2];
4031 }
4032 
4033 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4034                                             void *context, int vl, int mode,
4035                                             u64 data)
4036 {
4037         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4038 
4039         return dd->sw_send_dma_eng_err_status_cnt[1];
4040 }
4041 
4042 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4043                                         void *context, int vl, int mode,
4044                                         u64 data)
4045 {
4046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4047 
4048         return dd->sw_send_dma_eng_err_status_cnt[0];
4049 }
4050 
4051 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4052                                  void *context, int vl, int mode,
4053                                  u64 data)
4054 {
4055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4056 
4057         u64 val = 0;
4058         u64 csr = entry->csr;
4059 
4060         val = read_write_csr(dd, csr, mode, data);
4061         if (mode == CNTR_MODE_R) {
4062                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4063                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4064         } else if (mode == CNTR_MODE_W) {
4065                 dd->sw_rcv_bypass_packet_errors = 0;
4066         } else {
4067                 dd_dev_err(dd, "Invalid cntr register access mode");
4068                 return 0;
4069         }
4070         return val;
4071 }
4072 
4073 #define def_access_sw_cpu(cntr) \
4074 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4075                               void *context, int vl, int mode, u64 data)      \
4076 {                                                                             \
4077         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4078         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4079                               ppd->ibport_data.rvp.cntr, vl,                  \
4080                               mode, data);                                    \
4081 }
4082 
4083 def_access_sw_cpu(rc_acks);
4084 def_access_sw_cpu(rc_qacks);
4085 def_access_sw_cpu(rc_delayed_comp);
4086 
4087 #define def_access_ibp_counter(cntr) \
4088 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4089                                 void *context, int vl, int mode, u64 data)    \
4090 {                                                                             \
4091         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4092                                                                               \
4093         if (vl != CNTR_INVALID_VL)                                            \
4094                 return 0;                                                     \
4095                                                                               \
4096         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4097                              mode, data);                                     \
4098 }
4099 
4100 def_access_ibp_counter(loop_pkts);
4101 def_access_ibp_counter(rc_resends);
4102 def_access_ibp_counter(rnr_naks);
4103 def_access_ibp_counter(other_naks);
4104 def_access_ibp_counter(rc_timeouts);
4105 def_access_ibp_counter(pkt_drops);
4106 def_access_ibp_counter(dmawait);
4107 def_access_ibp_counter(rc_seqnak);
4108 def_access_ibp_counter(rc_dupreq);
4109 def_access_ibp_counter(rdma_seq);
4110 def_access_ibp_counter(unaligned);
4111 def_access_ibp_counter(seq_naks);
4112 def_access_ibp_counter(rc_crwaits);
4113 
4114 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4115 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4116 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4117 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4118 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4119 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4120 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4121                         CNTR_NORMAL),
4122 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4123                         CNTR_NORMAL),
4124 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4125                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4126                         CNTR_NORMAL),
4127 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4128                         CNTR_NORMAL),
4129 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4130                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4131 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4132                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4133 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4134                         CNTR_NORMAL),
4135 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4136                         CNTR_NORMAL),
4137 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4138                         CNTR_NORMAL),
4139 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4140                         CNTR_NORMAL),
4141 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4142                         CNTR_NORMAL),
4143 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4144                         CNTR_NORMAL),
4145 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4146                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4147 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4148                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4149 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4150                               CNTR_SYNTH),
4151 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4152                             access_dc_rcv_err_cnt),
4153 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4154                                  CNTR_SYNTH),
4155 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4156                                   CNTR_SYNTH),
4157 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4158                                   CNTR_SYNTH),
4159 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4160                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4161 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4162                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4163                                   CNTR_SYNTH),
4164 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4165                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4166 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4167                                CNTR_SYNTH),
4168 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4169                               CNTR_SYNTH),
4170 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4171                                CNTR_SYNTH),
4172 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4173                                  CNTR_SYNTH),
4174 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4175                                 CNTR_SYNTH),
4176 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4177                                 CNTR_SYNTH),
4178 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4179                                CNTR_SYNTH),
4180 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4181                                  CNTR_SYNTH | CNTR_VL),
4182 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4183                                 CNTR_SYNTH | CNTR_VL),
4184 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4185 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4186                                  CNTR_SYNTH | CNTR_VL),
4187 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4188 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4189                                  CNTR_SYNTH | CNTR_VL),
4190 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4191                               CNTR_SYNTH),
4192 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4193                                  CNTR_SYNTH | CNTR_VL),
4194 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4195                                 CNTR_SYNTH),
4196 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4197                                    CNTR_SYNTH | CNTR_VL),
4198 [C_DC_TOTAL_CRC] =
4199         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4200                          CNTR_SYNTH),
4201 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4202                                   CNTR_SYNTH),
4203 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4204                                   CNTR_SYNTH),
4205 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4206                                   CNTR_SYNTH),
4207 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4208                                   CNTR_SYNTH),
4209 [C_DC_CRC_MULT_LN] =
4210         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4211                          CNTR_SYNTH),
4212 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4213                                     CNTR_SYNTH),
4214 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4215                                     CNTR_SYNTH),
4216 [C_DC_SEQ_CRC_CNT] =
4217         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4218                          CNTR_SYNTH),
4219 [C_DC_ESC0_ONLY_CNT] =
4220         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4221                          CNTR_SYNTH),
4222 [C_DC_ESC0_PLUS1_CNT] =
4223         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4224                          CNTR_SYNTH),
4225 [C_DC_ESC0_PLUS2_CNT] =
4226         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4227                          CNTR_SYNTH),
4228 [C_DC_REINIT_FROM_PEER_CNT] =
4229         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4230                          CNTR_SYNTH),
4231 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4232                                   CNTR_SYNTH),
4233 [C_DC_MISC_FLG_CNT] =
4234         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4235                          CNTR_SYNTH),
4236 [C_DC_PRF_GOOD_LTP_CNT] =
4237         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4238 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4239         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4240                          CNTR_SYNTH),
4241 [C_DC_PRF_RX_FLIT_CNT] =
4242         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4243 [C_DC_PRF_TX_FLIT_CNT] =
4244         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4245 [C_DC_PRF_CLK_CNTR] =
4246         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4247 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4248         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4249 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4250         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4251                          CNTR_SYNTH),
4252 [C_DC_PG_STS_TX_SBE_CNT] =
4253         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4254 [C_DC_PG_STS_TX_MBE_CNT] =
4255         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4256                          CNTR_SYNTH),
4257 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4258                             access_sw_cpu_intr),
4259 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4260                             access_sw_cpu_rcv_limit),
4261 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4262                             access_sw_ctx0_seq_drop),
4263 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4264                             access_sw_vtx_wait),
4265 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4266                             access_sw_pio_wait),
4267 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4268                             access_sw_pio_drain),
4269 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4270                             access_sw_kmem_wait),
4271 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4272                             hfi1_access_sw_tid_wait),
4273 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4274                             access_sw_send_schedule),
4275 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4276                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4277                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278                                       dev_access_u32_csr),
4279 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4280                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4281                              access_sde_int_cnt),
4282 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4283                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4284                              access_sde_err_cnt),
4285 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4286                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4287                                   access_sde_idle_int_cnt),
4288 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4289                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4290                                       access_sde_progress_int_cnt),
4291 /* MISC_ERR_STATUS */
4292 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4293                                 CNTR_NORMAL,
4294                                 access_misc_pll_lock_fail_err_cnt),
4295 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4296                                 CNTR_NORMAL,
4297                                 access_misc_mbist_fail_err_cnt),
4298 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_misc_invalid_eep_cmd_err_cnt),
4301 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_misc_efuse_done_parity_err_cnt),
4304 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_misc_efuse_write_err_cnt),
4307 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4308                                 0, CNTR_NORMAL,
4309                                 access_misc_efuse_read_bad_addr_err_cnt),
4310 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_misc_efuse_csr_parity_err_cnt),
4313 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_misc_fw_auth_failed_err_cnt),
4316 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4317                                 CNTR_NORMAL,
4318                                 access_misc_key_mismatch_err_cnt),
4319 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4320                                 CNTR_NORMAL,
4321                                 access_misc_sbus_write_failed_err_cnt),
4322 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4323                                 CNTR_NORMAL,
4324                                 access_misc_csr_write_bad_addr_err_cnt),
4325 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4326                                 CNTR_NORMAL,
4327                                 access_misc_csr_read_bad_addr_err_cnt),
4328 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4329                                 CNTR_NORMAL,
4330                                 access_misc_csr_parity_err_cnt),
4331 /* CceErrStatus */
4332 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4333                                 CNTR_NORMAL,
4334                                 access_sw_cce_err_status_aggregated_cnt),
4335 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4336                                 CNTR_NORMAL,
4337                                 access_cce_msix_csr_parity_err_cnt),
4338 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4339                                 CNTR_NORMAL,
4340                                 access_cce_int_map_unc_err_cnt),
4341 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4342                                 CNTR_NORMAL,
4343                                 access_cce_int_map_cor_err_cnt),
4344 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4345                                 CNTR_NORMAL,
4346                                 access_cce_msix_table_unc_err_cnt),
4347 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4348                                 CNTR_NORMAL,
4349                                 access_cce_msix_table_cor_err_cnt),
4350 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4351                                 0, CNTR_NORMAL,
4352                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4353 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4354                                 0, CNTR_NORMAL,
4355                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4356 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4357                                 CNTR_NORMAL,
4358                                 access_cce_seg_write_bad_addr_err_cnt),
4359 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4360                                 CNTR_NORMAL,
4361                                 access_cce_seg_read_bad_addr_err_cnt),
4362 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4363                                 CNTR_NORMAL,
4364                                 access_la_triggered_cnt),
4365 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4366                                 CNTR_NORMAL,
4367                                 access_cce_trgt_cpl_timeout_err_cnt),
4368 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4369                                 CNTR_NORMAL,
4370                                 access_pcic_receive_parity_err_cnt),
4371 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4372                                 CNTR_NORMAL,
4373                                 access_pcic_transmit_back_parity_err_cnt),
4374 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4375                                 0, CNTR_NORMAL,
4376                                 access_pcic_transmit_front_parity_err_cnt),
4377 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4378                                 CNTR_NORMAL,
4379                                 access_pcic_cpl_dat_q_unc_err_cnt),
4380 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4381                                 CNTR_NORMAL,
4382                                 access_pcic_cpl_hd_q_unc_err_cnt),
4383 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4384                                 CNTR_NORMAL,
4385                                 access_pcic_post_dat_q_unc_err_cnt),
4386 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4387                                 CNTR_NORMAL,
4388                                 access_pcic_post_hd_q_unc_err_cnt),
4389 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4390                                 CNTR_NORMAL,
4391                                 access_pcic_retry_sot_mem_unc_err_cnt),
4392 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4393                                 CNTR_NORMAL,
4394                                 access_pcic_retry_mem_unc_err),
4395 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4396                                 CNTR_NORMAL,
4397                                 access_pcic_n_post_dat_q_parity_err_cnt),
4398 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4399                                 CNTR_NORMAL,
4400                                 access_pcic_n_post_h_q_parity_err_cnt),
4401 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4402                                 CNTR_NORMAL,
4403                                 access_pcic_cpl_dat_q_cor_err_cnt),
4404 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4405                                 CNTR_NORMAL,
4406                                 access_pcic_cpl_hd_q_cor_err_cnt),
4407 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4408                                 CNTR_NORMAL,
4409                                 access_pcic_post_dat_q_cor_err_cnt),
4410 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4411                                 CNTR_NORMAL,
4412                                 access_pcic_post_hd_q_cor_err_cnt),
4413 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4414                                 CNTR_NORMAL,
4415                                 access_pcic_retry_sot_mem_cor_err_cnt),
4416 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4417                                 CNTR_NORMAL,
4418                                 access_pcic_retry_mem_cor_err_cnt),
4419 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4420                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4421                                 CNTR_NORMAL,
4422                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4423 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4424                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4425                                 CNTR_NORMAL,
4426                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4427                                 ),
4428 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4429                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4430                         CNTR_NORMAL,
4431                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4432 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4433                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4434                         CNTR_NORMAL,
4435                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4436 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4437                         0, CNTR_NORMAL,
4438                         access_cce_cli2_async_fifo_parity_err_cnt),
4439 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4440                         CNTR_NORMAL,
4441                         access_cce_csr_cfg_bus_parity_err_cnt),
4442 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4443                         0, CNTR_NORMAL,
4444                         access_cce_cli0_async_fifo_parity_err_cnt),
4445 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_cce_rspd_data_parity_err_cnt),
4448 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4449                         CNTR_NORMAL,
4450                         access_cce_trgt_access_err_cnt),
4451 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4452                         0, CNTR_NORMAL,
4453                         access_cce_trgt_async_fifo_parity_err_cnt),
4454 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4455                         CNTR_NORMAL,
4456                         access_cce_csr_write_bad_addr_err_cnt),
4457 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4458                         CNTR_NORMAL,
4459                         access_cce_csr_read_bad_addr_err_cnt),
4460 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4461                         CNTR_NORMAL,
4462                         access_ccs_csr_parity_err_cnt),
4463 
4464 /* RcvErrStatus */
4465 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rx_csr_parity_err_cnt),
4468 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rx_csr_write_bad_addr_err_cnt),
4471 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_csr_read_bad_addr_err_cnt),
4474 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_dma_csr_unc_err_cnt),
4477 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_dma_dq_fsm_encoding_err_cnt),
4480 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_dma_eq_fsm_encoding_err_cnt),
4483 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_dma_csr_parity_err_cnt),
4486 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_rbuf_data_cor_err_cnt),
4489 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_rbuf_data_unc_err_cnt),
4492 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4495 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4498 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4501 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4504 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_rbuf_desc_part2_cor_err_cnt),
4507 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_rbuf_desc_part2_unc_err_cnt),
4510 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_rbuf_desc_part1_cor_err_cnt),
4513 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_rbuf_desc_part1_unc_err_cnt),
4516 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4517                         CNTR_NORMAL,
4518                         access_rx_hq_intr_fsm_err_cnt),
4519 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4520                         CNTR_NORMAL,
4521                         access_rx_hq_intr_csr_parity_err_cnt),
4522 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4523                         CNTR_NORMAL,
4524                         access_rx_lookup_csr_parity_err_cnt),
4525 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4526                         CNTR_NORMAL,
4527                         access_rx_lookup_rcv_array_cor_err_cnt),
4528 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4529                         CNTR_NORMAL,
4530                         access_rx_lookup_rcv_array_unc_err_cnt),
4531 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4532                         0, CNTR_NORMAL,
4533                         access_rx_lookup_des_part2_parity_err_cnt),
4534 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4535                         0, CNTR_NORMAL,
4536                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4537 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4538                         CNTR_NORMAL,
4539                         access_rx_lookup_des_part1_unc_err_cnt),
4540 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4541                         CNTR_NORMAL,
4542                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4543 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4544                         CNTR_NORMAL,
4545                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4546 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4547                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4550 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4551                         0, CNTR_NORMAL,
4552                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4553 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4554                         0, CNTR_NORMAL,
4555                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4556 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4559 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4560                         CNTR_NORMAL,
4561                         access_rx_rbuf_empty_err_cnt),
4562 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4563                         CNTR_NORMAL,
4564                         access_rx_rbuf_full_err_cnt),
4565 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4566                         CNTR_NORMAL,
4567                         access_rbuf_bad_lookup_err_cnt),
4568 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4569                         CNTR_NORMAL,
4570                         access_rbuf_ctx_id_parity_err_cnt),
4571 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4572                         CNTR_NORMAL,
4573                         access_rbuf_csr_qeopdw_parity_err_cnt),
4574 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4575                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4576                         CNTR_NORMAL,
4577                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4578 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4579                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4580                         CNTR_NORMAL,
4581                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4583                         0, CNTR_NORMAL,
4584                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4585 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4586                         0, CNTR_NORMAL,
4587                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4588 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4589                         0, 0, CNTR_NORMAL,
4590                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4591 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4592                         0, CNTR_NORMAL,
4593                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4594 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4595                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4596                         CNTR_NORMAL,
4597                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4598 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4599                         0, CNTR_NORMAL,
4600                         access_rx_rbuf_block_list_read_cor_err_cnt),
4601 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4602                         0, CNTR_NORMAL,
4603                         access_rx_rbuf_block_list_read_unc_err_cnt),
4604 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4605                         CNTR_NORMAL,
4606                         access_rx_rbuf_lookup_des_cor_err_cnt),
4607 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4608                         CNTR_NORMAL,
4609                         access_rx_rbuf_lookup_des_unc_err_cnt),
4610 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4611                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4614 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4617 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_rx_rbuf_free_list_cor_err_cnt),
4620 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_rx_rbuf_free_list_unc_err_cnt),
4623 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_rx_rcv_fsm_encoding_err_cnt),
4626 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_rx_dma_flag_cor_err_cnt),
4629 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_rx_dma_flag_unc_err_cnt),
4632 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_rx_dc_sop_eop_parity_err_cnt),
4635 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_rx_rcv_csr_parity_err_cnt),
4638 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_rx_rcv_qp_map_table_cor_err_cnt),
4641 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4642                         CNTR_NORMAL,
4643                         access_rx_rcv_qp_map_table_unc_err_cnt),
4644 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4645                         CNTR_NORMAL,
4646                         access_rx_rcv_data_cor_err_cnt),
4647 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4648                         CNTR_NORMAL,
4649                         access_rx_rcv_data_unc_err_cnt),
4650 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4651                         CNTR_NORMAL,
4652                         access_rx_rcv_hdr_cor_err_cnt),
4653 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4654                         CNTR_NORMAL,
4655                         access_rx_rcv_hdr_unc_err_cnt),
4656 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4657                         CNTR_NORMAL,
4658                         access_rx_dc_intf_parity_err_cnt),
4659 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4660                         CNTR_NORMAL,
4661                         access_rx_dma_csr_cor_err_cnt),
4662 /* SendPioErrStatus */
4663 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_pec_sop_head_parity_err_cnt),
4666 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_pcc_sop_head_parity_err_cnt),
4669 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4670                         0, 0, CNTR_NORMAL,
4671                         access_pio_last_returned_cnt_parity_err_cnt),
4672 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4673                         0, CNTR_NORMAL,
4674                         access_pio_current_free_cnt_parity_err_cnt),
4675 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4676                         CNTR_NORMAL,
4677                         access_pio_reserved_31_err_cnt),
4678 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4679                         CNTR_NORMAL,
4680                         access_pio_reserved_30_err_cnt),
4681 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4682                         CNTR_NORMAL,
4683                         access_pio_ppmc_sop_len_err_cnt),
4684 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4685                         CNTR_NORMAL,
4686                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4687 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4688                         CNTR_NORMAL,
4689                         access_pio_vl_fifo_parity_err_cnt),
4690 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4691                         CNTR_NORMAL,
4692                         access_pio_vlf_sop_parity_err_cnt),
4693 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4694                         CNTR_NORMAL,
4695                         access_pio_vlf_v1_len_parity_err_cnt),
4696 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4697                         CNTR_NORMAL,
4698                         access_pio_block_qw_count_parity_err_cnt),
4699 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4700                         CNTR_NORMAL,
4701                         access_pio_write_qw_valid_parity_err_cnt),
4702 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4703                         CNTR_NORMAL,
4704                         access_pio_state_machine_err_cnt),
4705 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4706                         CNTR_NORMAL,
4707                         access_pio_write_data_parity_err_cnt),
4708 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4709                         CNTR_NORMAL,
4710                         access_pio_host_addr_mem_cor_err_cnt),
4711 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4712                         CNTR_NORMAL,
4713                         access_pio_host_addr_mem_unc_err_cnt),
4714 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4715                         CNTR_NORMAL,
4716                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4717 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4718                         CNTR_NORMAL,
4719                         access_pio_init_sm_in_err_cnt),
4720 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4721                         CNTR_NORMAL,
4722                         access_pio_ppmc_pbl_fifo_err_cnt),
4723 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4724                         0, CNTR_NORMAL,
4725                         access_pio_credit_ret_fifo_parity_err_cnt),
4726 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4727                         CNTR_NORMAL,
4728                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4729 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4730                         CNTR_NORMAL,
4731                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4732 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4733                         CNTR_NORMAL,
4734                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4735 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4736                         CNTR_NORMAL,
4737                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4738 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4739                         CNTR_NORMAL,
4740                         access_pio_sm_pkt_reset_parity_err_cnt),
4741 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4742                         CNTR_NORMAL,
4743                         access_pio_pkt_evict_fifo_parity_err_cnt),
4744 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4745                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4746                         CNTR_NORMAL,
4747                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4748 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4749                         CNTR_NORMAL,
4750                         access_pio_sbrdctl_crrel_parity_err_cnt),
4751 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4752                         CNTR_NORMAL,
4753                         access_pio_pec_fifo_parity_err_cnt),
4754 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4755                         CNTR_NORMAL,
4756                         access_pio_pcc_fifo_parity_err_cnt),
4757 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4758                         CNTR_NORMAL,
4759                         access_pio_sb_mem_fifo1_err_cnt),
4760 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4761                         CNTR_NORMAL,
4762                         access_pio_sb_mem_fifo0_err_cnt),
4763 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4764                         CNTR_NORMAL,
4765                         access_pio_csr_parity_err_cnt),
4766 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4767                         CNTR_NORMAL,
4768                         access_pio_write_addr_parity_err_cnt),
4769 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4770                         CNTR_NORMAL,
4771                         access_pio_write_bad_ctxt_err_cnt),
4772 /* SendDmaErrStatus */
4773 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4774                         0, CNTR_NORMAL,
4775                         access_sdma_pcie_req_tracking_cor_err_cnt),
4776 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4777                         0, CNTR_NORMAL,
4778                         access_sdma_pcie_req_tracking_unc_err_cnt),
4779 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4780                         CNTR_NORMAL,
4781                         access_sdma_csr_parity_err_cnt),
4782 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4783                         CNTR_NORMAL,
4784                         access_sdma_rpy_tag_err_cnt),
4785 /* SendEgressErrStatus */
4786 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4787                         CNTR_NORMAL,
4788                         access_tx_read_pio_memory_csr_unc_err_cnt),
4789 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4790                         0, CNTR_NORMAL,
4791                         access_tx_read_sdma_memory_csr_err_cnt),
4792 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_tx_egress_fifo_cor_err_cnt),
4795 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_tx_read_pio_memory_cor_err_cnt),
4798 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_tx_read_sdma_memory_cor_err_cnt),
4801 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_tx_sb_hdr_cor_err_cnt),
4804 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4805                         CNTR_NORMAL,
4806                         access_tx_credit_overrun_err_cnt),
4807 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_tx_launch_fifo8_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_launch_fifo7_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_tx_launch_fifo6_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_tx_launch_fifo5_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_tx_launch_fifo4_cor_err_cnt),
4822 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_launch_fifo3_cor_err_cnt),
4825 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_tx_launch_fifo2_cor_err_cnt),
4828 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_launch_fifo1_cor_err_cnt),
4831 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_launch_fifo0_cor_err_cnt),
4834 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_tx_credit_return_vl_err_cnt),
4837 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_hcrc_insertion_err_cnt),
4840 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_tx_egress_fifo_unc_err_cnt),
4843 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4844                         CNTR_NORMAL,
4845                         access_tx_read_pio_memory_unc_err_cnt),
4846 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4847                         CNTR_NORMAL,
4848                         access_tx_read_sdma_memory_unc_err_cnt),
4849 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4850                         CNTR_NORMAL,
4851                         access_tx_sb_hdr_unc_err_cnt),
4852 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4853                         CNTR_NORMAL,
4854                         access_tx_credit_return_partiy_err_cnt),
4855 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4856                         0, 0, CNTR_NORMAL,
4857                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4859                         0, 0, CNTR_NORMAL,
4860                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4862                         0, 0, CNTR_NORMAL,
4863                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4868                         0, 0, CNTR_NORMAL,
4869                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4870 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4873 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4874                         0, 0, CNTR_NORMAL,
4875                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4876 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4877                         0, 0, CNTR_NORMAL,
4878                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4879 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4880                         0, 0, CNTR_NORMAL,
4881                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4882 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4883                         0, 0, CNTR_NORMAL,
4884                         access_tx_sdma15_disallowed_packet_err_cnt),
4885 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_tx_sdma14_disallowed_packet_err_cnt),
4888 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4889                         0, 0, CNTR_NORMAL,
4890                         access_tx_sdma13_disallowed_packet_err_cnt),
4891 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4892                         0, 0, CNTR_NORMAL,
4893                         access_tx_sdma12_disallowed_packet_err_cnt),
4894 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_tx_sdma11_disallowed_packet_err_cnt),
4897 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4898                         0, 0, CNTR_NORMAL,
4899                         access_tx_sdma10_disallowed_packet_err_cnt),
4900 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4901                         0, 0, CNTR_NORMAL,
4902                         access_tx_sdma9_disallowed_packet_err_cnt),
4903 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4904                         0, 0, CNTR_NORMAL,
4905                         access_tx_sdma8_disallowed_packet_err_cnt),
4906 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4907                         0, 0, CNTR_NORMAL,
4908                         access_tx_sdma7_disallowed_packet_err_cnt),
4909 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4910                         0, 0, CNTR_NORMAL,
4911                         access_tx_sdma6_disallowed_packet_err_cnt),
4912 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4913                         0, 0, CNTR_NORMAL,
4914                         access_tx_sdma5_disallowed_packet_err_cnt),
4915 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4916                         0, 0, CNTR_NORMAL,
4917                         access_tx_sdma4_disallowed_packet_err_cnt),
4918 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4919                         0, 0, CNTR_NORMAL,
4920                         access_tx_sdma3_disallowed_packet_err_cnt),
4921 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4922                         0, 0, CNTR_NORMAL,
4923                         access_tx_sdma2_disallowed_packet_err_cnt),
4924 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4925                         0, 0, CNTR_NORMAL,
4926                         access_tx_sdma1_disallowed_packet_err_cnt),
4927 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4928                         0, 0, CNTR_NORMAL,
4929                         access_tx_sdma0_disallowed_packet_err_cnt),
4930 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4931                         CNTR_NORMAL,
4932                         access_tx_config_parity_err_cnt),
4933 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4934                         CNTR_NORMAL,
4935                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4936 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4937                         CNTR_NORMAL,
4938                         access_tx_launch_csr_parity_err_cnt),
4939 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4940                         CNTR_NORMAL,
4941                         access_tx_illegal_vl_err_cnt),
4942 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4943                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4946 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_egress_reserved_10_err_cnt),
4949 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_egress_reserved_9_err_cnt),
4952 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4953                         0, 0, CNTR_NORMAL,
4954                         access_tx_sdma_launch_intf_parity_err_cnt),
4955 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4956                         CNTR_NORMAL,
4957                         access_tx_pio_launch_intf_parity_err_cnt),
4958 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4959                         CNTR_NORMAL,
4960                         access_egress_reserved_6_err_cnt),
4961 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4962                         CNTR_NORMAL,
4963                         access_tx_incorrect_link_state_err_cnt),
4964 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4965                         CNTR_NORMAL,
4966                         access_tx_linkdown_err_cnt),
4967 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4968                         "EgressFifoUnderrunOrParityErr", 0, 0,
4969                         CNTR_NORMAL,
4970                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4971 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4972                         CNTR_NORMAL,
4973                         access_egress_reserved_2_err_cnt),
4974 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4975                         CNTR_NORMAL,
4976                         access_tx_pkt_integrity_mem_unc_err_cnt),
4977 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4978                         CNTR_NORMAL,
4979                         access_tx_pkt_integrity_mem_cor_err_cnt),
4980 /* SendErrStatus */
4981 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4982                         CNTR_NORMAL,
4983                         access_send_csr_write_bad_addr_err_cnt),
4984 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4985                         CNTR_NORMAL,
4986                         access_send_csr_read_bad_addr_err_cnt),
4987 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4988                         CNTR_NORMAL,
4989                         access_send_csr_parity_cnt),
4990 /* SendCtxtErrStatus */
4991 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4992                         CNTR_NORMAL,
4993                         access_pio_write_out_of_bounds_err_cnt),
4994 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4995                         CNTR_NORMAL,
4996                         access_pio_write_overflow_err_cnt),
4997 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4998                         0, 0, CNTR_NORMAL,
4999                         access_pio_write_crosses_boundary_err_cnt),
5000 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
5001                         CNTR_NORMAL,
5002                         access_pio_disallowed_packet_err_cnt),
5003 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
5004                         CNTR_NORMAL,
5005                         access_pio_inconsistent_sop_err_cnt),
5006 /* SendDmaEngErrStatus */
5007 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5008                         0, 0, CNTR_NORMAL,
5009                         access_sdma_header_request_fifo_cor_err_cnt),
5010 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5011                         CNTR_NORMAL,
5012                         access_sdma_header_storage_cor_err_cnt),
5013 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5014                         CNTR_NORMAL,
5015                         access_sdma_packet_tracking_cor_err_cnt),
5016 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5017                         CNTR_NORMAL,
5018                         access_sdma_assembly_cor_err_cnt),
5019 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5020                         CNTR_NORMAL,
5021                         access_sdma_desc_table_cor_err_cnt),
5022 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5023                         0, 0, CNTR_NORMAL,
5024                         access_sdma_header_request_fifo_unc_err_cnt),
5025 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5026                         CNTR_NORMAL,
5027                         access_sdma_header_storage_unc_err_cnt),
5028 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5029                         CNTR_NORMAL,
5030                         access_sdma_packet_tracking_unc_err_cnt),
5031 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5032                         CNTR_NORMAL,
5033                         access_sdma_assembly_unc_err_cnt),
5034 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5035                         CNTR_NORMAL,
5036                         access_sdma_desc_table_unc_err_cnt),
5037 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5038                         CNTR_NORMAL,
5039                         access_sdma_timeout_err_cnt),
5040 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5041                         CNTR_NORMAL,
5042                         access_sdma_header_length_err_cnt),
5043 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5044                         CNTR_NORMAL,
5045                         access_sdma_header_address_err_cnt),
5046 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5047                         CNTR_NORMAL,
5048                         access_sdma_header_select_err_cnt),
5049 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5050                         CNTR_NORMAL,
5051                         access_sdma_reserved_9_err_cnt),
5052 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5053                         CNTR_NORMAL,
5054                         access_sdma_packet_desc_overflow_err_cnt),
5055 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5056                         CNTR_NORMAL,
5057                         access_sdma_length_mismatch_err_cnt),
5058 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5059                         CNTR_NORMAL,
5060                         access_sdma_halt_err_cnt),
5061 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5062                         CNTR_NORMAL,
5063                         access_sdma_mem_read_err_cnt),
5064 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5065                         CNTR_NORMAL,
5066                         access_sdma_first_desc_err_cnt),
5067 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5068                         CNTR_NORMAL,
5069                         access_sdma_tail_out_of_bounds_err_cnt),
5070 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5071                         CNTR_NORMAL,
5072                         access_sdma_too_long_err_cnt),
5073 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5074                         CNTR_NORMAL,
5075                         access_sdma_gen_mismatch_err_cnt),
5076 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5077                         CNTR_NORMAL,
5078                         access_sdma_wrong_dw_err_cnt),
5079 };
5080 
5081 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5082 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5083                         CNTR_NORMAL),
5084 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5085                         CNTR_NORMAL),
5086 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5087                         CNTR_NORMAL),
5088 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5089                         CNTR_NORMAL),
5090 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5091                         CNTR_NORMAL),
5092 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5093                         CNTR_NORMAL),
5094 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5095                         CNTR_NORMAL),
5096 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5097 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5098 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5099 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5100                                       CNTR_SYNTH | CNTR_VL),
5101 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5102                                      CNTR_SYNTH | CNTR_VL),
5103 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5104                                       CNTR_SYNTH | CNTR_VL),
5105 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5106 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5107 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5108                              access_sw_link_dn_cnt),
5109 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5110                            access_sw_link_up_cnt),
5111 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5112                                  access_sw_unknown_frame_cnt),
5113 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5114                              access_sw_xmit_discards),
5115 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5116                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5117                                 access_sw_xmit_discards),
5118 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5119                                  access_xmit_constraint_errs),
5120 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5121                                 access_rcv_constraint_errs),
5122 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5123 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5124 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5125 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5126 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5127 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5128 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5129 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5130 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5131 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5132 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5133 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5134 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5135 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5136                                access_sw_cpu_rc_acks),
5137 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5138                                 access_sw_cpu_rc_qacks),
5139 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5140                                        access_sw_cpu_rc_delayed_comp),
5141 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5142 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5143 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5144 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5145 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5146 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5147 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5148 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5149 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5150 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5151 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5152 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5153 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5154 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5155 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5156 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5157 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5158 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5159 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5160 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5161 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5162 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5163 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5164 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5165 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5166 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5167 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5168 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5169 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5170 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5171 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5172 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5173 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5174 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5175 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5176 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5177 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5178 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5179 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5180 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5181 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5182 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5183 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5184 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5185 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5186 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5187 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5188 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5189 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5190 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5191 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5192 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5193 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5194 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5195 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5196 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5197 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5198 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5199 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5200 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5201 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5202 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5203 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5204 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5205 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5206 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5207 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5208 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5209 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5210 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5211 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5212 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5213 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5214 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5215 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5216 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5217 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5218 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5219 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5220 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5221 };
5222 
5223 /* ======================================================================== */
5224 
5225 /* return true if this is chip revision revision a */
5226 int is_ax(struct hfi1_devdata *dd)
5227 {
5228         u8 chip_rev_minor =
5229                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5230                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5231         return (chip_rev_minor & 0xf0) == 0;
5232 }
5233 
5234 /* return true if this is chip revision revision b */
5235 int is_bx(struct hfi1_devdata *dd)
5236 {
5237         u8 chip_rev_minor =
5238                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5239                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5240         return (chip_rev_minor & 0xF0) == 0x10;
5241 }
5242 
5243 /* return true is kernel urg disabled for rcd */
5244 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5245 {
5246         u64 mask;
5247         u32 is = IS_RCVURGENT_START + rcd->ctxt;
5248         u8 bit = is % 64;
5249 
5250         mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5251         return !(mask & BIT_ULL(bit));
5252 }
5253 
5254 /*
5255  * Append string s to buffer buf.  Arguments curp and len are the current
5256  * position and remaining length, respectively.
5257  *
5258  * return 0 on success, 1 on out of room
5259  */
5260 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5261 {
5262         char *p = *curp;
5263         int len = *lenp;
5264         int result = 0; /* success */
5265         char c;
5266 
5267         /* add a comma, if first in the buffer */
5268         if (p != buf) {
5269                 if (len == 0) {
5270                         result = 1; /* out of room */
5271                         goto done;
5272                 }
5273                 *p++ = ',';
5274                 len--;
5275         }
5276 
5277         /* copy the string */
5278         while ((c = *s++) != 0) {
5279                 if (len == 0) {
5280                         result = 1; /* out of room */
5281                         goto done;
5282                 }
5283                 *p++ = c;
5284                 len--;
5285         }
5286 
5287 done:
5288         /* write return values */
5289         *curp = p;
5290         *lenp = len;
5291 
5292         return result;
5293 }
5294 
5295 /*
5296  * Using the given flag table, print a comma separated string into
5297  * the buffer.  End in '*' if the buffer is too short.
5298  */
5299 static char *flag_string(char *buf, int buf_len, u64 flags,
5300                          struct flag_table *table, int table_size)
5301 {
5302         char extra[32];
5303         char *p = buf;
5304         int len = buf_len;
5305         int no_room = 0;
5306         int i;
5307 
5308         /* make sure there is at least 2 so we can form "*" */
5309         if (len < 2)
5310                 return "";
5311 
5312         len--;  /* leave room for a nul */
5313         for (i = 0; i < table_size; i++) {
5314                 if (flags & table[i].flag) {
5315                         no_room = append_str(buf, &p, &len, table[i].str);
5316                         if (no_room)
5317                                 break;
5318                         flags &= ~table[i].flag;
5319                 }
5320         }
5321 
5322         /* any undocumented bits left? */
5323         if (!no_room && flags) {
5324                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5325                 no_room = append_str(buf, &p, &len, extra);
5326         }
5327 
5328         /* add * if ran out of room */
5329         if (no_room) {
5330                 /* may need to back up to add space for a '*' */
5331                 if (len == 0)
5332                         --p;
5333                 *p++ = '*';
5334         }
5335 
5336         /* add final nul - space already allocated above */
5337         *p = 0;
5338         return buf;
5339 }
5340 
5341 /* first 8 CCE error interrupt source names */
5342 static const char * const cce_misc_names[] = {
5343         "CceErrInt",            /* 0 */
5344         "RxeErrInt",            /* 1 */
5345         "MiscErrInt",           /* 2 */
5346         "Reserved3",            /* 3 */
5347         "PioErrInt",            /* 4 */
5348         "SDmaErrInt",           /* 5 */
5349         "EgressErrInt",         /* 6 */
5350         "TxeErrInt"             /* 7 */
5351 };
5352 
5353 /*
5354  * Return the miscellaneous error interrupt name.
5355  */
5356 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5357 {
5358         if (source < ARRAY_SIZE(cce_misc_names))
5359                 strncpy(buf, cce_misc_names[source], bsize);
5360         else
5361                 snprintf(buf, bsize, "Reserved%u",
5362                          source + IS_GENERAL_ERR_START);
5363 
5364         return buf;
5365 }
5366 
5367 /*
5368  * Return the SDMA engine error interrupt name.
5369  */
5370 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5371 {
5372         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5373         return buf;
5374 }
5375 
5376 /*
5377  * Return the send context error interrupt name.
5378  */
5379 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5380 {
5381         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5382         return buf;
5383 }
5384 
5385 static const char * const various_names[] = {
5386         "PbcInt",
5387         "GpioAssertInt",
5388         "Qsfp1Int",
5389         "Qsfp2Int",
5390         "TCritInt"
5391 };
5392 
5393 /*
5394  * Return the various interrupt name.
5395  */
5396 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5397 {
5398         if (source < ARRAY_SIZE(various_names))
5399                 strncpy(buf, various_names[source], bsize);
5400         else
5401                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5402         return buf;
5403 }
5404 
5405 /*
5406  * Return the DC interrupt name.
5407  */
5408 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5409 {
5410         static const char * const dc_int_names[] = {
5411                 "common",
5412                 "lcb",
5413                 "8051",
5414                 "lbm"   /* local block merge */
5415         };
5416 
5417         if (source < ARRAY_SIZE(dc_int_names))
5418                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5419         else
5420                 snprintf(buf, bsize, "DCInt%u", source);
5421         return buf;
5422 }
5423 
5424 static const char * const sdma_int_names[] = {
5425         "SDmaInt",
5426         "SdmaIdleInt",
5427         "SdmaProgressInt",
5428 };
5429 
5430 /*
5431  * Return the SDMA engine interrupt name.
5432  */
5433 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5434 {
5435         /* what interrupt */
5436         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5437         /* which engine */
5438         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5439 
5440         if (likely(what < 3))
5441                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5442         else
5443                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5444         return buf;
5445 }
5446 
5447 /*
5448  * Return the receive available interrupt name.
5449  */
5450 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5451 {
5452         snprintf(buf, bsize, "RcvAvailInt%u", source);
5453         return buf;
5454 }
5455 
5456 /*
5457  * Return the receive urgent interrupt name.
5458  */
5459 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5460 {
5461         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5462         return buf;
5463 }
5464 
5465 /*
5466  * Return the send credit interrupt name.
5467  */
5468 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5469 {
5470         snprintf(buf, bsize, "SendCreditInt%u", source);
5471         return buf;
5472 }
5473 
5474 /*
5475  * Return the reserved interrupt name.
5476  */
5477 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5478 {
5479         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5480         return buf;
5481 }
5482 
5483 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5484 {
5485         return flag_string(buf, buf_len, flags,
5486                            cce_err_status_flags,
5487                            ARRAY_SIZE(cce_err_status_flags));
5488 }
5489 
5490 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5491 {
5492         return flag_string(buf, buf_len, flags,
5493                            rxe_err_status_flags,
5494                            ARRAY_SIZE(rxe_err_status_flags));
5495 }
5496 
5497 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5498 {
5499         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5500                            ARRAY_SIZE(misc_err_status_flags));
5501 }
5502 
5503 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5504 {
5505         return flag_string(buf, buf_len, flags,
5506                            pio_err_status_flags,
5507                            ARRAY_SIZE(pio_err_status_flags));
5508 }
5509 
5510 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5511 {
5512         return flag_string(buf, buf_len, flags,
5513                            sdma_err_status_flags,
5514                            ARRAY_SIZE(sdma_err_status_flags));
5515 }
5516 
5517 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5518 {
5519         return flag_string(buf, buf_len, flags,
5520                            egress_err_status_flags,
5521                            ARRAY_SIZE(egress_err_status_flags));
5522 }
5523 
5524 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5525 {
5526         return flag_string(buf, buf_len, flags,
5527                            egress_err_info_flags,
5528                            ARRAY_SIZE(egress_err_info_flags));
5529 }
5530 
5531 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5532 {
5533         return flag_string(buf, buf_len, flags,
5534                            send_err_status_flags,
5535                            ARRAY_SIZE(send_err_status_flags));
5536 }
5537 
5538 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5539 {
5540         char buf[96];
5541         int i = 0;
5542 
5543         /*
5544          * For most these errors, there is nothing that can be done except
5545          * report or record it.
5546          */
5547         dd_dev_info(dd, "CCE Error: %s\n",
5548                     cce_err_status_string(buf, sizeof(buf), reg));
5549 
5550         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5551             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5552                 /* this error requires a manual drop into SPC freeze mode */
5553                 /* then a fix up */
5554                 start_freeze_handling(dd->pport, FREEZE_SELF);
5555         }
5556 
5557         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5558                 if (reg & (1ull << i)) {
5559                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5560                         /* maintain a counter over all cce_err_status errors */
5561                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5562                 }
5563         }
5564 }
5565 
5566 /*
5567  * Check counters for receive errors that do not have an interrupt
5568  * associated with them.
5569  */
5570 #define RCVERR_CHECK_TIME 10
5571 static void update_rcverr_timer(struct timer_list *t)
5572 {
5573         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5574         struct hfi1_pportdata *ppd = dd->pport;
5575         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5576 
5577         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5578             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5579                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5580                 set_link_down_reason(
5581                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5582                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5583                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5584         }
5585         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5586 
5587         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5588 }
5589 
5590 static int init_rcverr(struct hfi1_devdata *dd)
5591 {
5592         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5593         /* Assume the hardware counter has been reset */
5594         dd->rcv_ovfl_cnt = 0;
5595         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5596 }
5597 
5598 static void free_rcverr(struct hfi1_devdata *dd)
5599 {
5600         if (dd->rcverr_timer.function)
5601                 del_timer_sync(&dd->rcverr_timer);
5602 }
5603 
5604 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5605 {
5606         char buf[96];
5607         int i = 0;
5608 
5609         dd_dev_info(dd, "Receive Error: %s\n",
5610                     rxe_err_status_string(buf, sizeof(buf), reg));
5611 
5612         if (reg & ALL_RXE_FREEZE_ERR) {
5613                 int flags = 0;
5614 
5615                 /*
5616                  * Freeze mode recovery is disabled for the errors
5617                  * in RXE_FREEZE_ABORT_MASK
5618                  */
5619                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5620                         flags = FREEZE_ABORT;
5621 
5622                 start_freeze_handling(dd->pport, flags);
5623         }
5624 
5625         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5626                 if (reg & (1ull << i))
5627                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5628         }
5629 }
5630 
5631 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5632 {
5633         char buf[96];
5634         int i = 0;
5635 
5636         dd_dev_info(dd, "Misc Error: %s",
5637                     misc_err_status_string(buf, sizeof(buf), reg));
5638         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5639                 if (reg & (1ull << i))
5640                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5641         }
5642 }
5643 
5644 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5645 {
5646         char buf[96];
5647         int i = 0;
5648 
5649         dd_dev_info(dd, "PIO Error: %s\n",
5650                     pio_err_status_string(buf, sizeof(buf), reg));
5651 
5652         if (reg & ALL_PIO_FREEZE_ERR)
5653                 start_freeze_handling(dd->pport, 0);
5654 
5655         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5656                 if (reg & (1ull << i))
5657                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5658         }
5659 }
5660 
5661 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5662 {
5663         char buf[96];
5664         int i = 0;
5665 
5666         dd_dev_info(dd, "SDMA Error: %s\n",
5667                     sdma_err_status_string(buf, sizeof(buf), reg));
5668 
5669         if (reg & ALL_SDMA_FREEZE_ERR)
5670                 start_freeze_handling(dd->pport, 0);
5671 
5672         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5673                 if (reg & (1ull << i))
5674                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5675         }
5676 }
5677 
5678 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5679 {
5680         incr_cntr64(&ppd->port_xmit_discards);
5681 }
5682 
5683 static void count_port_inactive(struct hfi1_devdata *dd)
5684 {
5685         __count_port_discards(dd->pport);
5686 }
5687 
5688 /*
5689  * We have had a "disallowed packet" error during egress. Determine the
5690  * integrity check which failed, and update relevant error counter, etc.
5691  *
5692  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5693  * bit of state per integrity check, and so we can miss the reason for an
5694  * egress error if more than one packet fails the same integrity check
5695  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5696  */
5697 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5698                                         int vl)
5699 {
5700         struct hfi1_pportdata *ppd = dd->pport;
5701         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5702         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5703         char buf[96];
5704 
5705         /* clear down all observed info as quickly as possible after read */
5706         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5707 
5708         dd_dev_info(dd,
5709                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5710                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5711 
5712         /* Eventually add other counters for each bit */
5713         if (info & PORT_DISCARD_EGRESS_ERRS) {
5714                 int weight, i;
5715 
5716                 /*
5717                  * Count all applicable bits as individual errors and
5718                  * attribute them to the packet that triggered this handler.
5719                  * This may not be completely accurate due to limitations
5720                  * on the available hardware error information.  There is
5721                  * a single information register and any number of error
5722                  * packets may have occurred and contributed to it before
5723                  * this routine is called.  This means that:
5724                  * a) If multiple packets with the same error occur before
5725                  *    this routine is called, earlier packets are missed.
5726                  *    There is only a single bit for each error type.
5727                  * b) Errors may not be attributed to the correct VL.
5728                  *    The driver is attributing all bits in the info register
5729                  *    to the packet that triggered this call, but bits
5730                  *    could be an accumulation of different packets with
5731                  *    different VLs.
5732                  * c) A single error packet may have multiple counts attached
5733                  *    to it.  There is no way for the driver to know if
5734                  *    multiple bits set in the info register are due to a
5735                  *    single packet or multiple packets.  The driver assumes
5736                  *    multiple packets.
5737                  */
5738                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5739                 for (i = 0; i < weight; i++) {
5740                         __count_port_discards(ppd);
5741                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5742                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5743                         else if (vl == 15)
5744                                 incr_cntr64(&ppd->port_xmit_discards_vl
5745                                             [C_VL_15]);
5746                 }
5747         }
5748 }
5749 
5750 /*
5751  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5752  * register. Does it represent a 'port inactive' error?
5753  */
5754 static inline int port_inactive_err(u64 posn)
5755 {
5756         return (posn >= SEES(TX_LINKDOWN) &&
5757                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5758 }
5759 
5760 /*
5761  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5762  * register. Does it represent a 'disallowed packet' error?
5763  */
5764 static inline int disallowed_pkt_err(int posn)
5765 {
5766         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5767                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5768 }
5769 
5770 /*
5771  * Input value is a bit position of one of the SDMA engine disallowed
5772  * packet errors.  Return which engine.  Use of this must be guarded by
5773  * disallowed_pkt_err().
5774  */
5775 static inline int disallowed_pkt_engine(int posn)
5776 {
5777         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5778 }
5779 
5780 /*
5781  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5782  * be done.
5783  */
5784 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5785 {
5786         struct sdma_vl_map *m;
5787         int vl;
5788 
5789         /* range check */
5790         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5791                 return -1;
5792 
5793         rcu_read_lock();
5794         m = rcu_dereference(dd->sdma_map);
5795         vl = m->engine_to_vl[engine];
5796         rcu_read_unlock();
5797 
5798         return vl;
5799 }
5800 
5801 /*
5802  * Translate the send context (sofware index) into a VL.  Return -1 if the
5803  * translation cannot be done.
5804  */
5805 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5806 {
5807         struct send_context_info *sci;
5808         struct send_context *sc;
5809         int i;
5810 
5811         sci = &dd->send_contexts[sw_index];
5812 
5813         /* there is no information for user (PSM) and ack contexts */
5814         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5815                 return -1;
5816 
5817         sc = sci->sc;
5818         if (!sc)
5819                 return -1;
5820         if (dd->vld[15].sc == sc)
5821                 return 15;
5822         for (i = 0; i < num_vls; i++)
5823                 if (dd->vld[i].sc == sc)
5824                         return i;
5825 
5826         return -1;
5827 }
5828 
5829 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5830 {
5831         u64 reg_copy = reg, handled = 0;
5832         char buf[96];
5833         int i = 0;
5834 
5835         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5836                 start_freeze_handling(dd->pport, 0);
5837         else if (is_ax(dd) &&
5838                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5839                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5840                 start_freeze_handling(dd->pport, 0);
5841 
5842         while (reg_copy) {
5843                 int posn = fls64(reg_copy);
5844                 /* fls64() returns a 1-based offset, we want it zero based */
5845                 int shift = posn - 1;
5846                 u64 mask = 1ULL << shift;
5847 
5848                 if (port_inactive_err(shift)) {
5849                         count_port_inactive(dd);
5850                         handled |= mask;
5851                 } else if (disallowed_pkt_err(shift)) {
5852                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5853 
5854                         handle_send_egress_err_info(dd, vl);
5855                         handled |= mask;
5856                 }
5857                 reg_copy &= ~mask;
5858         }
5859 
5860         reg &= ~handled;
5861 
5862         if (reg)
5863                 dd_dev_info(dd, "Egress Error: %s\n",
5864                             egress_err_status_string(buf, sizeof(buf), reg));
5865 
5866         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5867                 if (reg & (1ull << i))
5868                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5869         }
5870 }
5871 
5872 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5873 {
5874         char buf[96];
5875         int i = 0;
5876 
5877         dd_dev_info(dd, "Send Error: %s\n",
5878                     send_err_status_string(buf, sizeof(buf), reg));
5879 
5880         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5881                 if (reg & (1ull << i))
5882                         incr_cntr64(&dd->send_err_status_cnt[i]);
5883         }
5884 }
5885 
5886 /*
5887  * The maximum number of times the error clear down will loop before
5888  * blocking a repeating error.  This value is arbitrary.
5889  */
5890 #define MAX_CLEAR_COUNT 20
5891 
5892 /*
5893  * Clear and handle an error register.  All error interrupts are funneled
5894  * through here to have a central location to correctly handle single-
5895  * or multi-shot errors.
5896  *
5897  * For non per-context registers, call this routine with a context value
5898  * of 0 so the per-context offset is zero.
5899  *
5900  * If the handler loops too many times, assume that something is wrong
5901  * and can't be fixed, so mask the error bits.
5902  */
5903 static void interrupt_clear_down(struct hfi1_devdata *dd,
5904                                  u32 context,
5905                                  const struct err_reg_info *eri)
5906 {
5907         u64 reg;
5908         u32 count;
5909 
5910         /* read in a loop until no more errors are seen */
5911         count = 0;
5912         while (1) {
5913                 reg = read_kctxt_csr(dd, context, eri->status);
5914                 if (reg == 0)
5915                         break;
5916                 write_kctxt_csr(dd, context, eri->clear, reg);
5917                 if (likely(eri->handler))
5918                         eri->handler(dd, context, reg);
5919                 count++;
5920                 if (count > MAX_CLEAR_COUNT) {
5921                         u64 mask;
5922 
5923                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5924                                    eri->desc, reg);
5925                         /*
5926                          * Read-modify-write so any other masked bits
5927                          * remain masked.
5928                          */
5929                         mask = read_kctxt_csr(dd, context, eri->mask);
5930                         mask &= ~reg;
5931                         write_kctxt_csr(dd, context, eri->mask, mask);
5932                         break;
5933                 }
5934         }
5935 }
5936 
5937 /*
5938  * CCE block "misc" interrupt.  Source is < 16.
5939  */
5940 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5941 {
5942         const struct err_reg_info *eri = &misc_errs[source];
5943 
5944         if (eri->handler) {
5945                 interrupt_clear_down(dd, 0, eri);
5946         } else {
5947                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5948                            source);
5949         }
5950 }
5951 
5952 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5953 {
5954         return flag_string(buf, buf_len, flags,
5955                            sc_err_status_flags,
5956                            ARRAY_SIZE(sc_err_status_flags));
5957 }
5958 
5959 /*
5960  * Send context error interrupt.  Source (hw_context) is < 160.
5961  *
5962  * All send context errors cause the send context to halt.  The normal
5963  * clear-down mechanism cannot be used because we cannot clear the
5964  * error bits until several other long-running items are done first.
5965  * This is OK because with the context halted, nothing else is going
5966  * to happen on it anyway.
5967  */
5968 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5969                                 unsigned int hw_context)
5970 {
5971         struct send_context_info *sci;
5972         struct send_context *sc;
5973         char flags[96];
5974         u64 status;
5975         u32 sw_index;
5976         int i = 0;
5977         unsigned long irq_flags;
5978 
5979         sw_index = dd->hw_to_sw[hw_context];
5980         if (sw_index >= dd->num_send_contexts) {
5981                 dd_dev_err(dd,
5982                            "out of range sw index %u for send context %u\n",
5983                            sw_index, hw_context);
5984                 return;
5985         }
5986         sci = &dd->send_contexts[sw_index];
5987         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5988         sc = sci->sc;
5989         if (!sc) {
5990                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5991                            sw_index, hw_context);
5992                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5993                 return;
5994         }
5995 
5996         /* tell the software that a halt has begun */
5997         sc_stop(sc, SCF_HALTED);
5998 
5999         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
6000 
6001         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
6002                     send_context_err_status_string(flags, sizeof(flags),
6003                                                    status));
6004 
6005         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
6006                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
6007 
6008         /*
6009          * Automatically restart halted kernel contexts out of interrupt
6010          * context.  User contexts must ask the driver to restart the context.
6011          */
6012         if (sc->type != SC_USER)
6013                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6014         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6015 
6016         /*
6017          * Update the counters for the corresponding status bits.
6018          * Note that these particular counters are aggregated over all
6019          * 160 contexts.
6020          */
6021         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6022                 if (status & (1ull << i))
6023                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6024         }
6025 }
6026 
6027 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6028                                 unsigned int source, u64 status)
6029 {
6030         struct sdma_engine *sde;
6031         int i = 0;
6032 
6033         sde = &dd->per_sdma[source];
6034 #ifdef CONFIG_SDMA_VERBOSITY
6035         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6036                    slashstrip(__FILE__), __LINE__, __func__);
6037         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6038                    sde->this_idx, source, (unsigned long long)status);
6039 #endif
6040         sde->err_cnt++;
6041         sdma_engine_error(sde, status);
6042 
6043         /*
6044         * Update the counters for the corresponding status bits.
6045         * Note that these particular counters are aggregated over
6046         * all 16 DMA engines.
6047         */
6048         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6049                 if (status & (1ull << i))
6050                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6051         }
6052 }
6053 
6054 /*
6055  * CCE block SDMA error interrupt.  Source is < 16.
6056  */
6057 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6058 {
6059 #ifdef CONFIG_SDMA_VERBOSITY
6060         struct sdma_engine *sde = &dd->per_sdma[source];
6061 
6062         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6063                    slashstrip(__FILE__), __LINE__, __func__);
6064         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6065                    source);
6066         sdma_dumpstate(sde);
6067 #endif
6068         interrupt_clear_down(dd, source, &sdma_eng_err);
6069 }
6070 
6071 /*
6072  * CCE block "various" interrupt.  Source is < 8.
6073  */
6074 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6075 {
6076         const struct err_reg_info *eri = &various_err[source];
6077 
6078         /*
6079          * TCritInt cannot go through interrupt_clear_down()
6080          * because it is not a second tier interrupt. The handler
6081          * should be called directly.
6082          */
6083         if (source == TCRIT_INT_SOURCE)
6084                 handle_temp_err(dd);
6085         else if (eri->handler)
6086                 interrupt_clear_down(dd, 0, eri);
6087         else
6088                 dd_dev_info(dd,
6089                             "%s: Unimplemented/reserved interrupt %d\n",
6090                             __func__, source);
6091 }
6092 
6093 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6094 {
6095         /* src_ctx is always zero */
6096         struct hfi1_pportdata *ppd = dd->pport;
6097         unsigned long flags;
6098         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6099 
6100         if (reg & QSFP_HFI0_MODPRST_N) {
6101                 if (!qsfp_mod_present(ppd)) {
6102                         dd_dev_info(dd, "%s: QSFP module removed\n",
6103                                     __func__);
6104 
6105                         ppd->driver_link_ready = 0;
6106                         /*
6107                          * Cable removed, reset all our information about the
6108                          * cache and cable capabilities
6109                          */
6110 
6111                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6112                         /*
6113                          * We don't set cache_refresh_required here as we expect
6114                          * an interrupt when a cable is inserted
6115                          */
6116                         ppd->qsfp_info.cache_valid = 0;
6117                         ppd->qsfp_info.reset_needed = 0;
6118                         ppd->qsfp_info.limiting_active = 0;
6119                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6120                                                flags);
6121                         /* Invert the ModPresent pin now to detect plug-in */
6122                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6123                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6124 
6125                         if ((ppd->offline_disabled_reason >
6126                           HFI1_ODR_MASK(
6127                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6128                           (ppd->offline_disabled_reason ==
6129                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6130                                 ppd->offline_disabled_reason =
6131                                 HFI1_ODR_MASK(
6132                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6133 
6134                         if (ppd->host_link_state == HLS_DN_POLL) {
6135                                 /*
6136                                  * The link is still in POLL. This means
6137                                  * that the normal link down processing
6138                                  * will not happen. We have to do it here
6139                                  * before turning the DC off.
6140                                  */
6141                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6142                         }
6143                 } else {
6144                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6145                                     __func__);
6146 
6147                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6148                         ppd->qsfp_info.cache_valid = 0;
6149                         ppd->qsfp_info.cache_refresh_required = 1;
6150                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6151                                                flags);
6152 
6153                         /*
6154                          * Stop inversion of ModPresent pin to detect
6155                          * removal of the cable
6156                          */
6157                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6158                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6159                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6160 
6161                         ppd->offline_disabled_reason =
6162                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6163                 }
6164         }
6165 
6166         if (reg & QSFP_HFI0_INT_N) {
6167                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6168                             __func__);
6169                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6170                 ppd->qsfp_info.check_interrupt_flags = 1;
6171                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6172         }
6173 
6174         /* Schedule the QSFP work only if there is a cable attached. */
6175         if (qsfp_mod_present(ppd))
6176                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6177 }
6178 
6179 static int request_host_lcb_access(struct hfi1_devdata *dd)
6180 {
6181         int ret;
6182 
6183         ret = do_8051_command(dd, HCMD_MISC,
6184                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6185                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6186         if (ret != HCMD_SUCCESS) {
6187                 dd_dev_err(dd, "%s: command failed with error %d\n",
6188                            __func__, ret);
6189         }
6190         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6191 }
6192 
6193 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6194 {
6195         int ret;
6196 
6197         ret = do_8051_command(dd, HCMD_MISC,
6198                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6199                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6200         if (ret != HCMD_SUCCESS) {
6201                 dd_dev_err(dd, "%s: command failed with error %d\n",
6202                            __func__, ret);
6203         }
6204         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6205 }
6206 
6207 /*
6208  * Set the LCB selector - allow host access.  The DCC selector always
6209  * points to the host.
6210  */
6211 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6212 {
6213         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6214                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6215                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6216 }
6217 
6218 /*
6219  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6220  * points to the host.
6221  */
6222 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6223 {
6224         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6225                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6226 }
6227 
6228 /*
6229  * Acquire LCB access from the 8051.  If the host already has access,
6230  * just increment a counter.  Otherwise, inform the 8051 that the
6231  * host is taking access.
6232  *
6233  * Returns:
6234  *      0 on success
6235  *      -EBUSY if the 8051 has control and cannot be disturbed
6236  *      -errno if unable to acquire access from the 8051
6237  */
6238 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6239 {
6240         struct hfi1_pportdata *ppd = dd->pport;
6241         int ret = 0;
6242 
6243         /*
6244          * Use the host link state lock so the operation of this routine
6245          * { link state check, selector change, count increment } can occur
6246          * as a unit against a link state change.  Otherwise there is a
6247          * race between the state change and the count increment.
6248          */
6249         if (sleep_ok) {
6250                 mutex_lock(&ppd->hls_lock);
6251         } else {
6252                 while (!mutex_trylock(&ppd->hls_lock))
6253                         udelay(1);
6254         }
6255 
6256         /* this access is valid only when the link is up */
6257         if (ppd->host_link_state & HLS_DOWN) {
6258                 dd_dev_info(dd, "%s: link state %s not up\n",
6259                             __func__, link_state_name(ppd->host_link_state));
6260                 ret = -EBUSY;
6261                 goto done;
6262         }
6263 
6264         if (dd->lcb_access_count == 0) {
6265                 ret = request_host_lcb_access(dd);
6266                 if (ret) {
6267                         dd_dev_err(dd,
6268                                    "%s: unable to acquire LCB access, err %d\n",
6269                                    __func__, ret);
6270                         goto done;
6271                 }
6272                 set_host_lcb_access(dd);
6273         }
6274         dd->lcb_access_count++;
6275 done:
6276         mutex_unlock(&ppd->hls_lock);
6277         return ret;
6278 }
6279 
6280 /*
6281  * Release LCB access by decrementing the use count.  If the count is moving
6282  * from 1 to 0, inform 8051 that it has control back.
6283  *
6284  * Returns:
6285  *      0 on success
6286  *      -errno if unable to release access to the 8051
6287  */
6288 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6289 {
6290         int ret = 0;
6291 
6292         /*
6293          * Use the host link state lock because the acquire needed it.
6294          * Here, we only need to keep { selector change, count decrement }
6295          * as a unit.
6296          */
6297         if (sleep_ok) {
6298                 mutex_lock(&dd->pport->hls_lock);
6299         } else {
6300                 while (!mutex_trylock(&dd->pport->hls_lock))
6301                         udelay(1);
6302         }
6303 
6304         if (dd->lcb_access_count == 0) {
6305                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6306                            __func__);
6307                 goto done;
6308         }
6309 
6310         if (dd->lcb_access_count == 1) {
6311                 set_8051_lcb_access(dd);
6312                 ret = request_8051_lcb_access(dd);
6313                 if (ret) {
6314                         dd_dev_err(dd,
6315                                    "%s: unable to release LCB access, err %d\n",
6316                                    __func__, ret);
6317                         /* restore host access if the grant didn't work */
6318                         set_host_lcb_access(dd);
6319                         goto done;
6320                 }
6321         }
6322         dd->lcb_access_count--;
6323 done:
6324         mutex_unlock(&dd->pport->hls_lock);
6325         return ret;
6326 }
6327 
6328 /*
6329  * Initialize LCB access variables and state.  Called during driver load,
6330  * after most of the initialization is finished.
6331  *
6332  * The DC default is LCB access on for the host.  The driver defaults to
6333  * leaving access to the 8051.  Assign access now - this constrains the call
6334  * to this routine to be after all LCB set-up is done.  In particular, after
6335  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6336  */
6337 static void init_lcb_access(struct hfi1_devdata *dd)
6338 {
6339         dd->lcb_access_count = 0;
6340 }
6341 
6342 /*
6343  * Write a response back to a 8051 request.
6344  */
6345 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6346 {
6347         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6348                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6349                   (u64)return_code <<
6350                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6351                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6352 }
6353 
6354 /*
6355  * Handle host requests from the 8051.
6356  */
6357 static void handle_8051_request(struct hfi1_pportdata *ppd)
6358 {
6359         struct hfi1_devdata *dd = ppd->dd;
6360         u64 reg;
6361         u16 data = 0;
6362         u8 type;
6363 
6364         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6365         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6366                 return; /* no request */
6367 
6368         /* zero out COMPLETED so the response is seen */
6369         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6370 
6371         /* extract request details */
6372         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6373                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6374         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6375                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6376 
6377         switch (type) {
6378         case HREQ_LOAD_CONFIG:
6379         case HREQ_SAVE_CONFIG:
6380         case HREQ_READ_CONFIG:
6381         case HREQ_SET_TX_EQ_ABS:
6382         case HREQ_SET_TX_EQ_REL:
6383         case HREQ_ENABLE:
6384                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6385                             type);
6386                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6387                 break;
6388         case HREQ_LCB_RESET:
6389                 /* Put the LCB, RX FPE and TX FPE into reset */
6390                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6391                 /* Make sure the write completed */
6392                 (void)read_csr(dd, DCC_CFG_RESET);
6393                 /* Hold the reset long enough to take effect */
6394                 udelay(1);
6395                 /* Take the LCB, RX FPE and TX FPE out of reset */
6396                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6397                 hreq_response(dd, HREQ_SUCCESS, 0);
6398 
6399                 break;
6400         case HREQ_CONFIG_DONE:
6401                 hreq_response(dd, HREQ_SUCCESS, 0);
6402                 break;
6403 
6404         case HREQ_INTERFACE_TEST:
6405                 hreq_response(dd, HREQ_SUCCESS, data);
6406                 break;
6407         default:
6408                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6409                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6410                 break;
6411         }
6412 }
6413 
6414 /*
6415  * Set up allocation unit vaulue.
6416  */
6417 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6418 {
6419         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6420 
6421         /* do not modify other values in the register */
6422         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6423         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6424         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6425 }
6426 
6427 /*
6428  * Set up initial VL15 credits of the remote.  Assumes the rest of
6429  * the CM credit registers are zero from a previous global or credit reset.
6430  * Shared limit for VL15 will always be 0.
6431  */
6432 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6433 {
6434         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6435 
6436         /* set initial values for total and shared credit limit */
6437         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6438                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6439 
6440         /*
6441          * Set total limit to be equal to VL15 credits.
6442          * Leave shared limit at 0.
6443          */
6444         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6445         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6446 
6447         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6448                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6449 }
6450 
6451 /*
6452  * Zero all credit details from the previous connection and
6453  * reset the CM manager's internal counters.
6454  */
6455 void reset_link_credits(struct hfi1_devdata *dd)
6456 {
6457         int i;
6458 
6459         /* remove all previous VL credit limits */
6460         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6461                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6462         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6463         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6464         /* reset the CM block */
6465         pio_send_control(dd, PSC_CM_RESET);
6466         /* reset cached value */
6467         dd->vl15buf_cached = 0;
6468 }
6469 
6470 /* convert a vCU to a CU */
6471 static u32 vcu_to_cu(u8 vcu)
6472 {
6473         return 1 << vcu;
6474 }
6475 
6476 /* convert a CU to a vCU */
6477 static u8 cu_to_vcu(u32 cu)
6478 {
6479         return ilog2(cu);
6480 }
6481 
6482 /* convert a vAU to an AU */
6483 static u32 vau_to_au(u8 vau)
6484 {
6485         return 8 * (1 << vau);
6486 }
6487 
6488 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6489 {
6490         ppd->sm_trap_qp = 0x0;
6491         ppd->sa_qp = 0x1;
6492 }
6493 
6494 /*
6495  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6496  */
6497 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6498 {
6499         u64 reg;
6500 
6501         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6502         write_csr(dd, DC_LCB_CFG_RUN, 0);
6503         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6504         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6505                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6506         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6507         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6508         reg = read_csr(dd, DCC_CFG_RESET);
6509         write_csr(dd, DCC_CFG_RESET, reg |
6510                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6511         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6512         if (!abort) {
6513                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6514                 write_csr(dd, DCC_CFG_RESET, reg);
6515                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6516         }
6517 }
6518 
6519 /*
6520  * This routine should be called after the link has been transitioned to
6521  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6522  * reset).
6523  *
6524  * The expectation is that the caller of this routine would have taken
6525  * care of properly transitioning the link into the correct state.
6526  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6527  *       before calling this function.
6528  */
6529 static void _dc_shutdown(struct hfi1_devdata *dd)
6530 {
6531         lockdep_assert_held(&dd->dc8051_lock);
6532 
6533         if (dd->dc_shutdown)
6534                 return;
6535 
6536         dd->dc_shutdown = 1;
6537         /* Shutdown the LCB */
6538         lcb_shutdown(dd, 1);
6539         /*
6540          * Going to OFFLINE would have causes the 8051 to put the
6541          * SerDes into reset already. Just need to shut down the 8051,
6542          * itself.
6543          */
6544         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6545 }
6546 
6547 static void dc_shutdown(struct hfi1_devdata *dd)
6548 {
6549         mutex_lock(&dd->dc8051_lock);
6550         _dc_shutdown(dd);
6551         mutex_unlock(&dd->dc8051_lock);
6552 }
6553 
6554 /*
6555  * Calling this after the DC has been brought out of reset should not
6556  * do any damage.
6557  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6558  *       before calling this function.
6559  */
6560 static void _dc_start(struct hfi1_devdata *dd)
6561 {
6562         lockdep_assert_held(&dd->dc8051_lock);
6563 
6564         if (!dd->dc_shutdown)
6565                 return;
6566 
6567         /* Take the 8051 out of reset */
6568         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6569         /* Wait until 8051 is ready */
6570         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6571                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6572                            __func__);
6573 
6574         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6575         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6576         /* lcb_shutdown() with abort=1 does not restore these */
6577         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6578         dd->dc_shutdown = 0;
6579 }
6580 
6581 static void dc_start(struct hfi1_devdata *dd)
6582 {
6583         mutex_lock(&dd->dc8051_lock);
6584         _dc_start(dd);
6585         mutex_unlock(&dd->dc8051_lock);
6586 }
6587 
6588 /*
6589  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6590  */
6591 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6592 {
6593         u64 rx_radr, tx_radr;
6594         u32 version;
6595 
6596         if (dd->icode != ICODE_FPGA_EMULATION)
6597                 return;
6598 
6599         /*
6600          * These LCB defaults on emulator _s are good, nothing to do here:
6601          *      LCB_CFG_TX_FIFOS_RADR
6602          *      LCB_CFG_RX_FIFOS_RADR
6603          *      LCB_CFG_LN_DCLK
6604          *      LCB_CFG_IGNORE_LOST_RCLK
6605          */
6606         if (is_emulator_s(dd))
6607                 return;
6608         /* else this is _p */
6609 
6610         version = emulator_rev(dd);
6611         if (!is_ax(dd))
6612                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6613 
6614         if (version <= 0x12) {
6615                 /* release 0x12 and below */
6616 
6617                 /*
6618                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6619                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6620                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6621                  */
6622                 rx_radr =
6623                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6624                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6625                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6626                 /*
6627                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6628                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6629                  */
6630                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6631         } else if (version <= 0x18) {
6632                 /* release 0x13 up to 0x18 */
6633                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6634                 rx_radr =
6635                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6636                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6637                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6638                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6639         } else if (version == 0x19) {
6640                 /* release 0x19 */
6641                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6642                 rx_radr =
6643                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6644                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6645                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6646                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6647         } else if (version == 0x1a) {
6648                 /* release 0x1a */
6649                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6650                 rx_radr =
6651                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6652                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6653                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6654                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6655                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6656         } else {
6657                 /* release 0x1b and higher */
6658                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6659                 rx_radr =
6660                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6661                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6662                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6663                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6664         }
6665 
6666         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6667         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6668         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6669                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6670         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6671 }
6672 
6673 /*
6674  * Handle a SMA idle message
6675  *
6676  * This is a work-queue function outside of the interrupt.
6677  */
6678 void handle_sma_message(struct work_struct *work)
6679 {
6680         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6681                                                         sma_message_work);
6682         struct hfi1_devdata *dd = ppd->dd;
6683         u64 msg;
6684         int ret;
6685 
6686         /*
6687          * msg is bytes 1-4 of the 40-bit idle message - the command code
6688          * is stripped off
6689          */
6690         ret = read_idle_sma(dd, &msg);
6691         if (ret)
6692                 return;
6693         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6694         /*
6695          * React to the SMA message.  Byte[1] (0 for us) is the command.
6696          */
6697         switch (msg & 0xff) {
6698         case SMA_IDLE_ARM:
6699                 /*
6700                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6701                  * State Transitions
6702                  *
6703                  * Only expected in INIT or ARMED, discard otherwise.
6704                  */
6705                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6706                         ppd->neighbor_normal = 1;
6707                 break;
6708         case SMA_IDLE_ACTIVE:
6709                 /*
6710                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6711                  * State Transitions
6712                  *
6713                  * Can activate the node.  Discard otherwise.
6714                  */
6715                 if (ppd->host_link_state == HLS_UP_ARMED &&
6716                     ppd->is_active_optimize_enabled) {
6717                         ppd->neighbor_normal = 1;
6718                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6719                         if (ret)
6720                                 dd_dev_err(
6721                                         dd,
6722                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6723                                         __func__);
6724                 }
6725                 break;
6726         default:
6727                 dd_dev_err(dd,
6728                            "%s: received unexpected SMA idle message 0x%llx\n",
6729                            __func__, msg);
6730                 break;
6731         }
6732 }
6733 
6734 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6735 {
6736         u64 rcvctrl;
6737         unsigned long flags;
6738 
6739         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6740         rcvctrl = read_csr(dd, RCV_CTRL);
6741         rcvctrl |= add;
6742         rcvctrl &= ~clear;
6743         write_csr(dd, RCV_CTRL, rcvctrl);
6744         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6745 }
6746 
6747 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6748 {
6749         adjust_rcvctrl(dd, add, 0);
6750 }
6751 
6752 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6753 {
6754         adjust_rcvctrl(dd, 0, clear);
6755 }
6756 
6757 /*
6758  * Called from all interrupt handlers to start handling an SPC freeze.
6759  */
6760 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6761 {
6762         struct hfi1_devdata *dd = ppd->dd;
6763         struct send_context *sc;
6764         int i;
6765         int sc_flags;
6766 
6767         if (flags & FREEZE_SELF)
6768                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6769 
6770         /* enter frozen mode */
6771         dd->flags |= HFI1_FROZEN;
6772 
6773         /* notify all SDMA engines that they are going into a freeze */
6774         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6775 
6776         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6777                                               SCF_LINK_DOWN : 0);
6778         /* do halt pre-handling on all enabled send contexts */
6779         for (i = 0; i < dd->num_send_contexts; i++) {
6780                 sc = dd->send_contexts[i].sc;
6781                 if (sc && (sc->flags & SCF_ENABLED))
6782                         sc_stop(sc, sc_flags);
6783         }
6784 
6785         /* Send context are frozen. Notify user space */
6786         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6787 
6788         if (flags & FREEZE_ABORT) {
6789                 dd_dev_err(dd,
6790                            "Aborted freeze recovery. Please REBOOT system\n");
6791                 return;
6792         }
6793         /* queue non-interrupt handler */
6794         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6795 }
6796 
6797 /*
6798  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6799  * depending on the "freeze" parameter.
6800  *
6801  * No need to return an error if it times out, our only option
6802  * is to proceed anyway.
6803  */
6804 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6805 {
6806         unsigned long timeout;
6807         u64 reg;
6808 
6809         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6810         while (1) {
6811                 reg = read_csr(dd, CCE_STATUS);
6812                 if (freeze) {
6813                         /* waiting until all indicators are set */
6814                         if ((reg & ALL_FROZE) == ALL_FROZE)
6815                                 return; /* all done */
6816                 } else {
6817                         /* waiting until all indicators are clear */
6818                         if ((reg & ALL_FROZE) == 0)
6819                                 return; /* all done */
6820                 }
6821 
6822                 if (time_after(jiffies, timeout)) {
6823                         dd_dev_err(dd,
6824                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6825                                    freeze ? "" : "un", reg & ALL_FROZE,
6826                                    freeze ? ALL_FROZE : 0ull);
6827                         return;
6828                 }
6829                 usleep_range(80, 120);
6830         }
6831 }
6832 
6833 /*
6834  * Do all freeze handling for the RXE block.
6835  */
6836 static void rxe_freeze(struct hfi1_devdata *dd)
6837 {
6838         int i;
6839         struct hfi1_ctxtdata *rcd;
6840 
6841         /* disable port */
6842         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6843 
6844         /* disable all receive contexts */
6845         for (i = 0; i < dd->num_rcv_contexts; i++) {
6846                 rcd = hfi1_rcd_get_by_index(dd, i);
6847                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6848                 hfi1_rcd_put(rcd);
6849         }
6850 }
6851 
6852 /*
6853  * Unfreeze handling for the RXE block - kernel contexts only.
6854  * This will also enable the port.  User contexts will do unfreeze
6855  * handling on a per-context basis as they call into the driver.
6856  *
6857  */
6858 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6859 {
6860         u32 rcvmask;
6861         u16 i;
6862         struct hfi1_ctxtdata *rcd;
6863 
6864         /* enable all kernel contexts */
6865         for (i = 0; i < dd->num_rcv_contexts; i++) {
6866                 rcd = hfi1_rcd_get_by_index(dd, i);
6867 
6868                 /* Ensure all non-user contexts(including vnic) are enabled */
6869                 if (!rcd ||
6870                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6871                         hfi1_rcd_put(rcd);
6872                         continue;
6873                 }
6874                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6875                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6876                 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6877                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6878                 hfi1_rcvctrl(dd, rcvmask, rcd);
6879                 hfi1_rcd_put(rcd);
6880         }
6881 
6882         /* enable port */
6883         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6884 }
6885 
6886 /*
6887  * Non-interrupt SPC freeze handling.
6888  *
6889  * This is a work-queue function outside of the triggering interrupt.
6890  */
6891 void handle_freeze(struct work_struct *work)
6892 {
6893         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6894                                                                 freeze_work);
6895         struct hfi1_devdata *dd = ppd->dd;
6896 
6897         /* wait for freeze indicators on all affected blocks */
6898         wait_for_freeze_status(dd, 1);
6899 
6900         /* SPC is now frozen */
6901 
6902         /* do send PIO freeze steps */
6903         pio_freeze(dd);
6904 
6905         /* do send DMA freeze steps */
6906         sdma_freeze(dd);
6907 
6908         /* do send egress freeze steps - nothing to do */
6909 
6910         /* do receive freeze steps */
6911         rxe_freeze(dd);
6912 
6913         /*
6914          * Unfreeze the hardware - clear the freeze, wait for each
6915          * block's frozen bit to clear, then clear the frozen flag.
6916          */
6917         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6918         wait_for_freeze_status(dd, 0);
6919 
6920         if (is_ax(dd)) {
6921                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6922                 wait_for_freeze_status(dd, 1);
6923                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6924                 wait_for_freeze_status(dd, 0);
6925         }
6926 
6927         /* do send PIO unfreeze steps for kernel contexts */
6928         pio_kernel_unfreeze(dd);
6929 
6930         /* do send DMA unfreeze steps */
6931         sdma_unfreeze(dd);
6932 
6933         /* do send egress unfreeze steps - nothing to do */
6934 
6935         /* do receive unfreeze steps for kernel contexts */
6936         rxe_kernel_unfreeze(dd);
6937 
6938         /*
6939          * The unfreeze procedure touches global device registers when
6940          * it disables and re-enables RXE. Mark the device unfrozen
6941          * after all that is done so other parts of the driver waiting
6942          * for the device to unfreeze don't do things out of order.
6943          *
6944          * The above implies that the meaning of HFI1_FROZEN flag is
6945          * "Device has gone into freeze mode and freeze mode handling
6946          * is still in progress."
6947          *
6948          * The flag will be removed when freeze mode processing has
6949          * completed.
6950          */
6951         dd->flags &= ~HFI1_FROZEN;
6952         wake_up(&dd->event_queue);
6953 
6954         /* no longer frozen */
6955 }
6956 
6957 /**
6958  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6959  * counters.
6960  * @ppd: info of physical Hfi port
6961  * @link_width: new link width after link up or downgrade
6962  *
6963  * Update the PortXmitWait and PortVlXmitWait counters after
6964  * a link up or downgrade event to reflect a link width change.
6965  */
6966 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6967 {
6968         int i;
6969         u16 tx_width;
6970         u16 link_speed;
6971 
6972         tx_width = tx_link_width(link_width);
6973         link_speed = get_link_speed(ppd->link_speed_active);
6974 
6975         /*
6976          * There are C_VL_COUNT number of PortVLXmitWait counters.
6977          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6978          */
6979         for (i = 0; i < C_VL_COUNT + 1; i++)
6980                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6981 }
6982 
6983 /*
6984  * Handle a link up interrupt from the 8051.
6985  *
6986  * This is a work-queue function outside of the interrupt.
6987  */
6988 void handle_link_up(struct work_struct *work)
6989 {
6990         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6991                                                   link_up_work);
6992         struct hfi1_devdata *dd = ppd->dd;
6993 
6994         set_link_state(ppd, HLS_UP_INIT);
6995 
6996         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6997         read_ltp_rtt(dd);
6998         /*
6999          * OPA specifies that certain counters are cleared on a transition
7000          * to link up, so do that.
7001          */
7002         clear_linkup_counters(dd);
7003         /*
7004          * And (re)set link up default values.
7005          */
7006         set_linkup_defaults(ppd);
7007 
7008         /*
7009          * Set VL15 credits. Use cached value from verify cap interrupt.
7010          * In case of quick linkup or simulator, vl15 value will be set by
7011          * handle_linkup_change. VerifyCap interrupt handler will not be
7012          * called in those scenarios.
7013          */
7014         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7015                 set_up_vl15(dd, dd->vl15buf_cached);
7016 
7017         /* enforce link speed enabled */
7018         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7019                 /* oops - current speed is not enabled, bounce */
7020                 dd_dev_err(dd,
7021                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7022                            ppd->link_speed_active, ppd->link_speed_enabled);
7023                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7024                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7025                 set_link_state(ppd, HLS_DN_OFFLINE);
7026                 start_link(ppd);
7027         }
7028 }
7029 
7030 /*
7031  * Several pieces of LNI information were cached for SMA in ppd.
7032  * Reset these on link down
7033  */
7034 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7035 {
7036         ppd->neighbor_guid = 0;
7037         ppd->neighbor_port_number = 0;
7038         ppd->neighbor_type = 0;
7039         ppd->neighbor_fm_security = 0;
7040 }
7041 
7042 static const char * const link_down_reason_strs[] = {
7043         [OPA_LINKDOWN_REASON_NONE] = "None",
7044         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7045         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7046         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7047         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7048         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7049         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7050         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7051         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7052         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7053         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7054         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7055         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7056         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7057         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7058         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7059         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7060         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7061         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7062         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7063         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7064         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7065         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7066         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7067         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7068         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7069         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7070         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7071         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7072         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7073         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7074         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7075         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7076                                         "Excessive buffer overrun",
7077         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7078         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7079         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7080         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7081         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7082         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7083         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7084         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7085                                         "Local media not installed",
7086         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7087         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7088         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7089                                         "End to end not installed",
7090         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7091         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7092         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7093         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7094         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7095         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7096 };
7097 
7098 /* return the neighbor link down reason string */
7099 static const char *link_down_reason_str(u8 reason)
7100 {
7101         const char *str = NULL;
7102 
7103         if (reason < ARRAY_SIZE(link_down_reason_strs))
7104                 str = link_down_reason_strs[reason];
7105         if (!str)
7106                 str = "(invalid)";
7107 
7108         return str;
7109 }
7110 
7111 /*
7112  * Handle a link down interrupt from the 8051.
7113  *
7114  * This is a work-queue function outside of the interrupt.
7115  */
7116 void handle_link_down(struct work_struct *work)
7117 {
7118         u8 lcl_reason, neigh_reason = 0;
7119         u8 link_down_reason;
7120         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7121                                                   link_down_work);
7122         int was_up;
7123         static const char ldr_str[] = "Link down reason: ";
7124 
7125         if ((ppd->host_link_state &
7126              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7127              ppd->port_type == PORT_TYPE_FIXED)
7128                 ppd->offline_disabled_reason =
7129                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7130 
7131         /* Go offline first, then deal with reading/writing through 8051 */
7132         was_up = !!(ppd->host_link_state & HLS_UP);
7133         set_link_state(ppd, HLS_DN_OFFLINE);
7134         xchg(&ppd->is_link_down_queued, 0);
7135 
7136         if (was_up) {
7137                 lcl_reason = 0;
7138                 /* link down reason is only valid if the link was up */
7139                 read_link_down_reason(ppd->dd, &link_down_reason);
7140                 switch (link_down_reason) {
7141                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7142                         /* the link went down, no idle message reason */
7143                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7144                                     ldr_str);
7145                         break;
7146                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7147                         /*
7148                          * The neighbor reason is only valid if an idle message
7149                          * was received for it.
7150                          */
7151                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7152                         dd_dev_info(ppd->dd,
7153                                     "%sNeighbor link down message %d, %s\n",
7154                                     ldr_str, neigh_reason,
7155                                     link_down_reason_str(neigh_reason));
7156                         break;
7157                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7158                         dd_dev_info(ppd->dd,
7159                                     "%sHost requested link to go offline\n",
7160                                     ldr_str);
7161                         break;
7162                 default:
7163                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7164                                     ldr_str, link_down_reason);
7165                         break;
7166                 }
7167 
7168                 /*
7169                  * If no reason, assume peer-initiated but missed
7170                  * LinkGoingDown idle flits.
7171                  */
7172                 if (neigh_reason == 0)
7173                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7174         } else {
7175                 /* went down while polling or going up */
7176                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7177         }
7178 
7179         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7180 
7181         /* inform the SMA when the link transitions from up to down */
7182         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7183             ppd->neigh_link_down_reason.sma == 0) {
7184                 ppd->local_link_down_reason.sma =
7185                                         ppd->local_link_down_reason.latest;
7186                 ppd->neigh_link_down_reason.sma =
7187                                         ppd->neigh_link_down_reason.latest;
7188         }
7189 
7190         reset_neighbor_info(ppd);
7191 
7192         /* disable the port */
7193         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7194 
7195         /*
7196          * If there is no cable attached, turn the DC off. Otherwise,
7197          * start the link bring up.
7198          */
7199         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7200                 dc_shutdown(ppd->dd);
7201         else
7202                 start_link(ppd);
7203 }
7204 
7205 void handle_link_bounce(struct work_struct *work)
7206 {
7207         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7208                                                         link_bounce_work);
7209 
7210         /*
7211          * Only do something if the link is currently up.
7212          */
7213         if (ppd->host_link_state & HLS_UP) {
7214                 set_link_state(ppd, HLS_DN_OFFLINE);
7215                 start_link(ppd);
7216         } else {
7217                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7218                             __func__, link_state_name(ppd->host_link_state));
7219         }
7220 }
7221 
7222 /*
7223  * Mask conversion: Capability exchange to Port LTP.  The capability
7224  * exchange has an implicit 16b CRC that is mandatory.
7225  */
7226 static int cap_to_port_ltp(int cap)
7227 {
7228         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7229 
7230         if (cap & CAP_CRC_14B)
7231                 port_ltp |= PORT_LTP_CRC_MODE_14;
7232         if (cap & CAP_CRC_48B)
7233                 port_ltp |= PORT_LTP_CRC_MODE_48;
7234         if (cap & CAP_CRC_12B_16B_PER_LANE)
7235                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7236 
7237         return port_ltp;
7238 }
7239 
7240 /*
7241  * Convert an OPA Port LTP mask to capability mask
7242  */
7243 int port_ltp_to_cap(int port_ltp)
7244 {
7245         int cap_mask = 0;
7246 
7247         if (port_ltp & PORT_LTP_CRC_MODE_14)
7248                 cap_mask |= CAP_CRC_14B;
7249         if (port_ltp & PORT_LTP_CRC_MODE_48)
7250                 cap_mask |= CAP_CRC_48B;
7251         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7252                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7253 
7254         return cap_mask;
7255 }
7256 
7257 /*
7258  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7259  */
7260 static int lcb_to_port_ltp(int lcb_crc)
7261 {
7262         int port_ltp = 0;
7263 
7264         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7265                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7266         else if (lcb_crc == LCB_CRC_48B)
7267                 port_ltp = PORT_LTP_CRC_MODE_48;
7268         else if (lcb_crc == LCB_CRC_14B)
7269                 port_ltp = PORT_LTP_CRC_MODE_14;
7270         else
7271                 port_ltp = PORT_LTP_CRC_MODE_16;
7272 
7273         return port_ltp;
7274 }
7275 
7276 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7277 {
7278         if (ppd->pkeys[2] != 0) {
7279                 ppd->pkeys[2] = 0;
7280                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7281                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7282         }
7283 }
7284 
7285 /*
7286  * Convert the given link width to the OPA link width bitmask.
7287  */
7288 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7289 {
7290         switch (width) {
7291         case 0:
7292                 /*
7293                  * Simulator and quick linkup do not set the width.
7294                  * Just set it to 4x without complaint.
7295                  */
7296                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7297                         return OPA_LINK_WIDTH_4X;
7298                 return 0; /* no lanes up */
7299         case 1: return OPA_LINK_WIDTH_1X;
7300         case 2: return OPA_LINK_WIDTH_2X;
7301         case 3: return OPA_LINK_WIDTH_3X;
7302         default:
7303                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7304                             __func__, width);
7305                 /* fall through */
7306         case 4: return OPA_LINK_WIDTH_4X;
7307         }
7308 }
7309 
7310 /*
7311  * Do a population count on the bottom nibble.
7312  */
7313 static const u8 bit_counts[16] = {
7314         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7315 };
7316 
7317 static inline u8 nibble_to_count(u8 nibble)
7318 {
7319         return bit_counts[nibble & 0xf];
7320 }
7321 
7322 /*
7323  * Read the active lane information from the 8051 registers and return
7324  * their widths.
7325  *
7326  * Active lane information is found in these 8051 registers:
7327  *      enable_lane_tx
7328  *      enable_lane_rx
7329  */
7330 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7331                             u16 *rx_width)
7332 {
7333         u16 tx, rx;
7334         u8 enable_lane_rx;
7335         u8 enable_lane_tx;
7336         u8 tx_polarity_inversion;
7337         u8 rx_polarity_inversion;
7338         u8 max_rate;
7339 
7340         /* read the active lanes */
7341         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7342                          &rx_polarity_inversion, &max_rate);
7343         read_local_lni(dd, &enable_lane_rx);
7344 
7345         /* convert to counts */
7346         tx = nibble_to_count(enable_lane_tx);
7347         rx = nibble_to_count(enable_lane_rx);
7348 
7349         /*
7350          * Set link_speed_active here, overriding what was set in
7351          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7352          * set the max_rate field in handle_verify_cap until v0.19.
7353          */
7354         if ((dd->icode == ICODE_RTL_SILICON) &&
7355             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7356                 /* max_rate: 0 = 12.5G, 1 = 25G */
7357                 switch (max_rate) {
7358                 case 0:
7359                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7360                         break;
7361                 default:
7362                         dd_dev_err(dd,
7363                                    "%s: unexpected max rate %d, using 25Gb\n",
7364                                    __func__, (int)max_rate);
7365                         /* fall through */
7366                 case 1:
7367                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7368                         break;
7369                 }
7370         }
7371 
7372         dd_dev_info(dd,
7373                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7374                     enable_lane_tx, tx, enable_lane_rx, rx);
7375         *tx_width = link_width_to_bits(dd, tx);
7376         *rx_width = link_width_to_bits(dd, rx);
7377 }
7378 
7379 /*
7380  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7381  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7382  * after link up.  I.e. look elsewhere for downgrade information.
7383  *
7384  * Bits are:
7385  *      + bits [7:4] contain the number of active transmitters
7386  *      + bits [3:0] contain the number of active receivers
7387  * These are numbers 1 through 4 and can be different values if the
7388  * link is asymmetric.
7389  *
7390  * verify_cap_local_fm_link_width[0] retains its original value.
7391  */
7392 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7393                               u16 *rx_width)
7394 {
7395         u16 widths, tx, rx;
7396         u8 misc_bits, local_flags;
7397         u16 active_tx, active_rx;
7398 
7399         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7400         tx = widths >> 12;
7401         rx = (widths >> 8) & 0xf;
7402 
7403         *tx_width = link_width_to_bits(dd, tx);
7404         *rx_width = link_width_to_bits(dd, rx);
7405 
7406         /* print the active widths */
7407         get_link_widths(dd, &active_tx, &active_rx);
7408 }
7409 
7410 /*
7411  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7412  * hardware information when the link first comes up.
7413  *
7414  * The link width is not available until after VerifyCap.AllFramesReceived
7415  * (the trigger for handle_verify_cap), so this is outside that routine
7416  * and should be called when the 8051 signals linkup.
7417  */
7418 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7419 {
7420         u16 tx_width, rx_width;
7421 
7422         /* get end-of-LNI link widths */
7423         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7424 
7425         /* use tx_width as the link is supposed to be symmetric on link up */
7426         ppd->link_width_active = tx_width;
7427         /* link width downgrade active (LWD.A) starts out matching LW.A */
7428         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7429         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7430         /* per OPA spec, on link up LWD.E resets to LWD.S */
7431         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7432         /* cache the active egress rate (units {10^6 bits/sec]) */
7433         ppd->current_egress_rate = active_egress_rate(ppd);
7434 }
7435 
7436 /*
7437  * Handle a verify capabilities interrupt from the 8051.
7438  *
7439  * This is a work-queue function outside of the interrupt.
7440  */
7441 void handle_verify_cap(struct work_struct *work)
7442 {
7443         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7444                                                                 link_vc_work);
7445         struct hfi1_devdata *dd = ppd->dd;
7446         u64 reg;
7447         u8 power_management;
7448         u8 continuous;
7449         u8 vcu;
7450         u8 vau;
7451         u8 z;
7452         u16 vl15buf;
7453         u16 link_widths;
7454         u16 crc_mask;
7455         u16 crc_val;
7456         u16 device_id;
7457         u16 active_tx, active_rx;
7458         u8 partner_supported_crc;
7459         u8 remote_tx_rate;
7460         u8 device_rev;
7461 
7462         set_link_state(ppd, HLS_VERIFY_CAP);
7463 
7464         lcb_shutdown(dd, 0);
7465         adjust_lcb_for_fpga_serdes(dd);
7466 
7467         read_vc_remote_phy(dd, &power_management, &continuous);
7468         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7469                               &partner_supported_crc);
7470         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7471         read_remote_device_id(dd, &device_id, &device_rev);
7472 
7473         /* print the active widths */
7474         get_link_widths(dd, &active_tx, &active_rx);
7475         dd_dev_info(dd,
7476                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7477                     (int)power_management, (int)continuous);
7478         dd_dev_info(dd,
7479                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7480                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7481                     (int)partner_supported_crc);
7482         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7483                     (u32)remote_tx_rate, (u32)link_widths);
7484         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7485                     (u32)device_id, (u32)device_rev);
7486         /*
7487          * The peer vAU value just read is the peer receiver value.  HFI does
7488          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7489          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7490          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7491          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7492          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7493          * subject to the Z value exception.
7494          */
7495         if (vau == 0)
7496                 vau = 1;
7497         set_up_vau(dd, vau);
7498 
7499         /*
7500          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7501          * credits value and wait for link-up interrupt ot set it.
7502          */
7503         set_up_vl15(dd, 0);
7504         dd->vl15buf_cached = vl15buf;
7505 
7506         /* set up the LCB CRC mode */
7507         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7508 
7509         /* order is important: use the lowest bit in common */
7510         if (crc_mask & CAP_CRC_14B)
7511                 crc_val = LCB_CRC_14B;
7512         else if (crc_mask & CAP_CRC_48B)
7513                 crc_val = LCB_CRC_48B;
7514         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7515                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7516         else
7517                 crc_val = LCB_CRC_16B;
7518 
7519         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7520         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7521                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7522 
7523         /* set (14b only) or clear sideband credit */
7524         reg = read_csr(dd, SEND_CM_CTRL);
7525         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7526                 write_csr(dd, SEND_CM_CTRL,
7527                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7528         } else {
7529                 write_csr(dd, SEND_CM_CTRL,
7530                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7531         }
7532 
7533         ppd->link_speed_active = 0;     /* invalid value */
7534         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7535                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7536                 switch (remote_tx_rate) {
7537                 case 0:
7538                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7539                         break;
7540                 case 1:
7541                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7542                         break;
7543                 }
7544         } else {
7545                 /* actual rate is highest bit of the ANDed rates */
7546                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7547 
7548                 if (rate & 2)
7549                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7550                 else if (rate & 1)
7551                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7552         }
7553         if (ppd->link_speed_active == 0) {
7554                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7555                            __func__, (int)remote_tx_rate);
7556                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7557         }
7558 
7559         /*
7560          * Cache the values of the supported, enabled, and active
7561          * LTP CRC modes to return in 'portinfo' queries. But the bit
7562          * flags that are returned in the portinfo query differ from
7563          * what's in the link_crc_mask, crc_sizes, and crc_val
7564          * variables. Convert these here.
7565          */
7566         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7567                 /* supported crc modes */
7568         ppd->port_ltp_crc_mode |=
7569                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7570                 /* enabled crc modes */
7571         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7572                 /* active crc mode */
7573 
7574         /* set up the remote credit return table */
7575         assign_remote_cm_au_table(dd, vcu);
7576 
7577         /*
7578          * The LCB is reset on entry to handle_verify_cap(), so this must
7579          * be applied on every link up.
7580          *
7581          * Adjust LCB error kill enable to kill the link if
7582          * these RBUF errors are seen:
7583          *      REPLAY_BUF_MBE_SMASK
7584          *      FLIT_INPUT_BUF_MBE_SMASK
7585          */
7586         if (is_ax(dd)) {                        /* fixed in B0 */
7587                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7588                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7589                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7590                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7591         }
7592 
7593         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7594         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7595 
7596         /* give 8051 access to the LCB CSRs */
7597         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7598         set_8051_lcb_access(dd);
7599 
7600         /* tell the 8051 to go to LinkUp */
7601         set_link_state(ppd, HLS_GOING_UP);
7602 }
7603 
7604 /**
7605  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7606  * policy against the current active link widths.
7607  * @ppd: info of physical Hfi port
7608  * @refresh_widths: True indicates link downgrade event
7609  * @return: True indicates a successful link downgrade. False indicates
7610  *          link downgrade event failed and the link will bounce back to
7611  *          default link width.
7612  *
7613  * Called when the enabled policy changes or the active link widths
7614  * change.
7615  * Refresh_widths indicates that a link downgrade occurred. The
7616  * link_downgraded variable is set by refresh_widths and
7617  * determines the success/failure of the policy application.
7618  */
7619 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7620                                  bool refresh_widths)
7621 {
7622         int do_bounce = 0;
7623         int tries;
7624         u16 lwde;
7625         u16 tx, rx;
7626         bool link_downgraded = refresh_widths;
7627 
7628         /* use the hls lock to avoid a race with actual link up */
7629         tries = 0;
7630 retry:
7631         mutex_lock(&ppd->hls_lock);
7632         /* only apply if the link is up */
7633         if (ppd->host_link_state & HLS_DOWN) {
7634                 /* still going up..wait and retry */
7635                 if (ppd->host_link_state & HLS_GOING_UP) {
7636                         if (++tries < 1000) {
7637                                 mutex_unlock(&ppd->hls_lock);
7638                                 usleep_range(100, 120); /* arbitrary */
7639                                 goto retry;
7640                         }
7641                         dd_dev_err(ppd->dd,
7642                                    "%s: giving up waiting for link state change\n",
7643                                    __func__);
7644                 }
7645                 goto done;
7646         }
7647 
7648         lwde = ppd->link_width_downgrade_enabled;
7649 
7650         if (refresh_widths) {
7651                 get_link_widths(ppd->dd, &tx, &rx);
7652                 ppd->link_width_downgrade_tx_active = tx;
7653                 ppd->link_width_downgrade_rx_active = rx;
7654         }
7655 
7656         if (ppd->link_width_downgrade_tx_active == 0 ||
7657             ppd->link_width_downgrade_rx_active == 0) {
7658                 /* the 8051 reported a dead link as a downgrade */
7659                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7660                 link_downgraded = false;
7661         } else if (lwde == 0) {
7662                 /* downgrade is disabled */
7663 
7664                 /* bounce if not at starting active width */
7665                 if ((ppd->link_width_active !=
7666                      ppd->link_width_downgrade_tx_active) ||
7667                     (ppd->link_width_active !=
7668                      ppd->link_width_downgrade_rx_active)) {
7669                         dd_dev_err(ppd->dd,
7670                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7671                         dd_dev_err(ppd->dd,
7672                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7673                                    ppd->link_width_active,
7674                                    ppd->link_width_downgrade_tx_active,
7675                                    ppd->link_width_downgrade_rx_active);
7676                         do_bounce = 1;
7677                         link_downgraded = false;
7678                 }
7679         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7680                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7681                 /* Tx or Rx is outside the enabled policy */
7682                 dd_dev_err(ppd->dd,
7683                            "Link is outside of downgrade allowed, downing link\n");
7684                 dd_dev_err(ppd->dd,
7685                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7686                            lwde, ppd->link_width_downgrade_tx_active,
7687                            ppd->link_width_downgrade_rx_active);
7688                 do_bounce = 1;
7689                 link_downgraded = false;
7690         }
7691 
7692 done:
7693         mutex_unlock(&ppd->hls_lock);
7694 
7695         if (do_bounce) {
7696                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7697                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7698                 set_link_state(ppd, HLS_DN_OFFLINE);
7699                 start_link(ppd);
7700         }
7701 
7702         return link_downgraded;
7703 }
7704 
7705 /*
7706  * Handle a link downgrade interrupt from the 8051.
7707  *
7708  * This is a work-queue function outside of the interrupt.
7709  */
7710 void handle_link_downgrade(struct work_struct *work)
7711 {
7712         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7713                                                         link_downgrade_work);
7714 
7715         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7716         if (apply_link_downgrade_policy(ppd, true))
7717                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7718 }
7719 
7720 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7721 {
7722         return flag_string(buf, buf_len, flags, dcc_err_flags,
7723                 ARRAY_SIZE(dcc_err_flags));
7724 }
7725 
7726 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7727 {
7728         return flag_string(buf, buf_len, flags, lcb_err_flags,
7729                 ARRAY_SIZE(lcb_err_flags));
7730 }
7731 
7732 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7733 {
7734         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7735                 ARRAY_SIZE(dc8051_err_flags));
7736 }
7737 
7738 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7739 {
7740         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7741                 ARRAY_SIZE(dc8051_info_err_flags));
7742 }
7743 
7744 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7745 {
7746         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7747                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7748 }
7749 
7750 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7751 {
7752         struct hfi1_pportdata *ppd = dd->pport;
7753         u64 info, err, host_msg;
7754         int queue_link_down = 0;
7755         char buf[96];
7756 
7757         /* look at the flags */
7758         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7759                 /* 8051 information set by firmware */
7760                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7761                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7762                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7763                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7764                 host_msg = (info >>
7765                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7766                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7767 
7768                 /*
7769                  * Handle error flags.
7770                  */
7771                 if (err & FAILED_LNI) {
7772                         /*
7773                          * LNI error indications are cleared by the 8051
7774                          * only when starting polling.  Only pay attention
7775                          * to them when in the states that occur during
7776                          * LNI.
7777                          */
7778                         if (ppd->host_link_state
7779                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7780                                 queue_link_down = 1;
7781                                 dd_dev_info(dd, "Link error: %s\n",
7782                                             dc8051_info_err_string(buf,
7783                                                                    sizeof(buf),
7784                                                                    err &
7785                                                                    FAILED_LNI));
7786                         }
7787                         err &= ~(u64)FAILED_LNI;
7788                 }
7789                 /* unknown frames can happen durning LNI, just count */
7790                 if (err & UNKNOWN_FRAME) {
7791                         ppd->unknown_frame_count++;
7792                         err &= ~(u64)UNKNOWN_FRAME;
7793                 }
7794                 if (err) {
7795                         /* report remaining errors, but do not do anything */
7796                         dd_dev_err(dd, "8051 info error: %s\n",
7797                                    dc8051_info_err_string(buf, sizeof(buf),
7798                                                           err));
7799                 }
7800 
7801                 /*
7802                  * Handle host message flags.
7803                  */
7804                 if (host_msg & HOST_REQ_DONE) {
7805                         /*
7806                          * Presently, the driver does a busy wait for
7807                          * host requests to complete.  This is only an
7808                          * informational message.
7809                          * NOTE: The 8051 clears the host message
7810                          * information *on the next 8051 command*.
7811                          * Therefore, when linkup is achieved,
7812                          * this flag will still be set.
7813                          */
7814                         host_msg &= ~(u64)HOST_REQ_DONE;
7815                 }
7816                 if (host_msg & BC_SMA_MSG) {
7817                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7818                         host_msg &= ~(u64)BC_SMA_MSG;
7819                 }
7820                 if (host_msg & LINKUP_ACHIEVED) {
7821                         dd_dev_info(dd, "8051: Link up\n");
7822                         queue_work(ppd->link_wq, &ppd->link_up_work);
7823                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7824                 }
7825                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7826                         handle_8051_request(ppd);
7827                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7828                 }
7829                 if (host_msg & VERIFY_CAP_FRAME) {
7830                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7831                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7832                 }
7833                 if (host_msg & LINK_GOING_DOWN) {
7834                         const char *extra = "";
7835                         /* no downgrade action needed if going down */
7836                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7837                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7838                                 extra = " (ignoring downgrade)";
7839                         }
7840                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7841                         queue_link_down = 1;
7842                         host_msg &= ~(u64)LINK_GOING_DOWN;
7843                 }
7844                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7845                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7846                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7847                 }
7848                 if (host_msg) {
7849                         /* report remaining messages, but do not do anything */
7850                         dd_dev_info(dd, "8051 info host message: %s\n",
7851                                     dc8051_info_host_msg_string(buf,
7852                                                                 sizeof(buf),
7853                                                                 host_msg));
7854                 }
7855 
7856                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7857         }
7858         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7859                 /*
7860                  * Lost the 8051 heartbeat.  If this happens, we
7861                  * receive constant interrupts about it.  Disable
7862                  * the interrupt after the first.
7863                  */
7864                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7865                 write_csr(dd, DC_DC8051_ERR_EN,
7866                           read_csr(dd, DC_DC8051_ERR_EN) &
7867                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7868 
7869                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7870         }
7871         if (reg) {
7872                 /* report the error, but do not do anything */
7873                 dd_dev_err(dd, "8051 error: %s\n",
7874                            dc8051_err_string(buf, sizeof(buf), reg));
7875         }
7876 
7877         if (queue_link_down) {
7878                 /*
7879                  * if the link is already going down or disabled, do not
7880                  * queue another. If there's a link down entry already
7881                  * queued, don't queue another one.
7882                  */
7883                 if ((ppd->host_link_state &
7884                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7885                     ppd->link_enabled == 0) {
7886                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7887                                     __func__, ppd->host_link_state,
7888                                     ppd->link_enabled);
7889                 } else {
7890                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7891                                 dd_dev_info(dd,
7892                                             "%s: link down request already queued\n",
7893                                             __func__);
7894                         else
7895                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7896                 }
7897         }
7898 }
7899 
7900 static const char * const fm_config_txt[] = {
7901 [0] =
7902         "BadHeadDist: Distance violation between two head flits",
7903 [1] =
7904         "BadTailDist: Distance violation between two tail flits",
7905 [2] =
7906         "BadCtrlDist: Distance violation between two credit control flits",
7907 [3] =
7908         "BadCrdAck: Credits return for unsupported VL",
7909 [4] =
7910         "UnsupportedVLMarker: Received VL Marker",
7911 [5] =
7912         "BadPreempt: Exceeded the preemption nesting level",
7913 [6] =
7914         "BadControlFlit: Received unsupported control flit",
7915 /* no 7 */
7916 [8] =
7917         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7918 };
7919 
7920 static const char * const port_rcv_txt[] = {
7921 [1] =
7922         "BadPktLen: Illegal PktLen",
7923 [2] =
7924         "PktLenTooLong: Packet longer than PktLen",
7925 [3] =
7926         "PktLenTooShort: Packet shorter than PktLen",
7927 [4] =
7928         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7929 [5] =
7930         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7931 [6] =
7932         "BadL2: Illegal L2 opcode",
7933 [7] =
7934         "BadSC: Unsupported SC",
7935 [9] =
7936         "BadRC: Illegal RC",
7937 [11] =
7938         "PreemptError: Preempting with same VL",
7939 [12] =
7940         "PreemptVL15: Preempting a VL15 packet",
7941 };
7942 
7943 #define OPA_LDR_FMCONFIG_OFFSET 16
7944 #define OPA_LDR_PORTRCV_OFFSET 0
7945 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7946 {
7947         u64 info, hdr0, hdr1;
7948         const char *extra;
7949         char buf[96];
7950         struct hfi1_pportdata *ppd = dd->pport;
7951         u8 lcl_reason = 0;
7952         int do_bounce = 0;
7953 
7954         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7955                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7956                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7957                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7958                         /* set status bit */
7959                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7960                 }
7961                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7962         }
7963 
7964         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7965                 struct hfi1_pportdata *ppd = dd->pport;
7966                 /* this counter saturates at (2^32) - 1 */
7967                 if (ppd->link_downed < (u32)UINT_MAX)
7968                         ppd->link_downed++;
7969                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7970         }
7971 
7972         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7973                 u8 reason_valid = 1;
7974 
7975                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7976                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7977                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7978                         /* set status bit */
7979                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7980                 }
7981                 switch (info) {
7982                 case 0:
7983                 case 1:
7984                 case 2:
7985                 case 3:
7986                 case 4:
7987                 case 5:
7988                 case 6:
7989                         extra = fm_config_txt[info];
7990                         break;
7991                 case 8:
7992                         extra = fm_config_txt[info];
7993                         if (ppd->port_error_action &
7994                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7995                                 do_bounce = 1;
7996                                 /*
7997                                  * lcl_reason cannot be derived from info
7998                                  * for this error
7999                                  */
8000                                 lcl_reason =
8001                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
8002                         }
8003                         break;
8004                 default:
8005                         reason_valid = 0;
8006                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8007                         extra = buf;
8008                         break;
8009                 }
8010 
8011                 if (reason_valid && !do_bounce) {
8012                         do_bounce = ppd->port_error_action &
8013                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8014                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8015                 }
8016 
8017                 /* just report this */
8018                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8019                                         extra);
8020                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8021         }
8022 
8023         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8024                 u8 reason_valid = 1;
8025 
8026                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8027                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8028                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8029                 if (!(dd->err_info_rcvport.status_and_code &
8030                       OPA_EI_STATUS_SMASK)) {
8031                         dd->err_info_rcvport.status_and_code =
8032                                 info & OPA_EI_CODE_SMASK;
8033                         /* set status bit */
8034                         dd->err_info_rcvport.status_and_code |=
8035                                 OPA_EI_STATUS_SMASK;
8036                         /*
8037                          * save first 2 flits in the packet that caused
8038                          * the error
8039                          */
8040                         dd->err_info_rcvport.packet_flit1 = hdr0;
8041                         dd->err_info_rcvport.packet_flit2 = hdr1;
8042                 }
8043                 switch (info) {
8044                 case 1:
8045                 case 2:
8046                 case 3:
8047                 case 4:
8048                 case 5:
8049                 case 6:
8050                 case 7:
8051                 case 9:
8052                 case 11:
8053                 case 12:
8054                         extra = port_rcv_txt[info];
8055                         break;
8056                 default:
8057                         reason_valid = 0;
8058                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8059                         extra = buf;
8060                         break;
8061                 }
8062 
8063                 if (reason_valid && !do_bounce) {
8064                         do_bounce = ppd->port_error_action &
8065                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8066                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8067                 }
8068 
8069                 /* just report this */
8070                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8071                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8072                                         extra, hdr0, hdr1);
8073 
8074                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8075         }
8076 
8077         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8078                 /* informative only */
8079                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8080                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8081         }
8082         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8083                 /* informative only */
8084                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8085                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8086         }
8087 
8088         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8089                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8090 
8091         /* report any remaining errors */
8092         if (reg)
8093                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8094                                         dcc_err_string(buf, sizeof(buf), reg));
8095 
8096         if (lcl_reason == 0)
8097                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8098 
8099         if (do_bounce) {
8100                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8101                                         __func__);
8102                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8103                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8104         }
8105 }
8106 
8107 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8108 {
8109         char buf[96];
8110 
8111         dd_dev_info(dd, "LCB Error: %s\n",
8112                     lcb_err_string(buf, sizeof(buf), reg));
8113 }
8114 
8115 /*
8116  * CCE block DC interrupt.  Source is < 8.
8117  */
8118 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8119 {
8120         const struct err_reg_info *eri = &dc_errs[source];
8121 
8122         if (eri->handler) {
8123                 interrupt_clear_down(dd, 0, eri);
8124         } else if (source == 3 /* dc_lbm_int */) {
8125                 /*
8126                  * This indicates that a parity error has occurred on the
8127                  * address/control lines presented to the LBM.  The error
8128                  * is a single pulse, there is no associated error flag,
8129                  * and it is non-maskable.  This is because if a parity
8130                  * error occurs on the request the request is dropped.
8131                  * This should never occur, but it is nice to know if it
8132                  * ever does.
8133                  */
8134                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8135         } else {
8136                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8137         }
8138 }
8139 
8140 /*
8141  * TX block send credit interrupt.  Source is < 160.
8142  */
8143 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8144 {
8145         sc_group_release_update(dd, source);
8146 }
8147 
8148 /*
8149  * TX block SDMA interrupt.  Source is < 48.
8150  *
8151  * SDMA interrupts are grouped by type:
8152  *
8153  *       0 -  N-1 = SDma
8154  *       N - 2N-1 = SDmaProgress
8155  *      2N - 3N-1 = SDmaIdle
8156  */
8157 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8158 {
8159         /* what interrupt */
8160         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8161         /* which engine */
8162         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8163 
8164 #ifdef CONFIG_SDMA_VERBOSITY
8165         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8166                    slashstrip(__FILE__), __LINE__, __func__);
8167         sdma_dumpstate(&dd->per_sdma[which]);
8168 #endif
8169 
8170         if (likely(what < 3 && which < dd->num_sdma)) {
8171                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8172         } else {
8173                 /* should not happen */
8174                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8175         }
8176 }
8177 
8178 /**
8179  * is_rcv_avail_int() - User receive context available IRQ handler
8180  * @dd: valid dd
8181  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8182  *
8183  * RX block receive available interrupt.  Source is < 160.
8184  *
8185  * This is the general interrupt handler for user (PSM) receive contexts,
8186  * and can only be used for non-threaded IRQs.
8187  */
8188 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8189 {
8190         struct hfi1_ctxtdata *rcd;
8191         char *err_detail;
8192 
8193         if (likely(source < dd->num_rcv_contexts)) {
8194                 rcd = hfi1_rcd_get_by_index(dd, source);
8195                 if (rcd) {
8196                         handle_user_interrupt(rcd);
8197                         hfi1_rcd_put(rcd);
8198                         return; /* OK */
8199                 }
8200                 /* received an interrupt, but no rcd */
8201                 err_detail = "dataless";
8202         } else {
8203                 /* received an interrupt, but are not using that context */
8204                 err_detail = "out of range";
8205         }
8206         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8207                    err_detail, source);
8208 }
8209 
8210 /**
8211  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8212  * @dd: valid dd
8213  * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8214  *
8215  * RX block receive urgent interrupt.  Source is < 160.
8216  *
8217  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8218  */
8219 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8220 {
8221         struct hfi1_ctxtdata *rcd;
8222         char *err_detail;
8223 
8224         if (likely(source < dd->num_rcv_contexts)) {
8225                 rcd = hfi1_rcd_get_by_index(dd, source);
8226                 if (rcd) {
8227                         handle_user_interrupt(rcd);
8228                         hfi1_rcd_put(rcd);
8229                         return; /* OK */
8230                 }
8231                 /* received an interrupt, but no rcd */
8232                 err_detail = "dataless";
8233         } else {
8234                 /* received an interrupt, but are not using that context */
8235                 err_detail = "out of range";
8236         }
8237         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8238                    err_detail, source);
8239 }
8240 
8241 /*
8242  * Reserved range interrupt.  Should not be called in normal operation.
8243  */
8244 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8245 {
8246         char name[64];
8247 
8248         dd_dev_err(dd, "unexpected %s interrupt\n",
8249                    is_reserved_name(name, sizeof(name), source));
8250 }
8251 
8252 static const struct is_table is_table[] = {
8253 /*
8254  * start                 end
8255  *                              name func               interrupt func
8256  */
8257 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8258                                 is_misc_err_name,       is_misc_err_int },
8259 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8260                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8261 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8262                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8263 { IS_SDMA_START,             IS_SDMA_IDLE_END,
8264                                 is_sdma_eng_name,       is_sdma_eng_int },
8265 { IS_VARIOUS_START,          IS_VARIOUS_END,
8266                                 is_various_name,        is_various_int },
8267 { IS_DC_START,       IS_DC_END,
8268                                 is_dc_name,             is_dc_int },
8269 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8270                                 is_rcv_avail_name,      is_rcv_avail_int },
8271 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8272                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8273 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8274                                 is_send_credit_name,    is_send_credit_int},
8275 { IS_RESERVED_START,     IS_RESERVED_END,
8276                                 is_reserved_name,       is_reserved_int},
8277 };
8278 
8279 /*
8280  * Interrupt source interrupt - called when the given source has an interrupt.
8281  * Source is a bit index into an array of 64-bit integers.
8282  */
8283 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8284 {
8285         const struct is_table *entry;
8286 
8287         /* avoids a double compare by walking the table in-order */
8288         for (entry = &is_table[0]; entry->is_name; entry++) {
8289                 if (source <= entry->end) {
8290                         trace_hfi1_interrupt(dd, entry, source);
8291                         entry->is_int(dd, source - entry->start);
8292                         return;
8293                 }
8294         }
8295         /* fell off the end */
8296         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8297 }
8298 
8299 /**
8300  * gerneral_interrupt() -  General interrupt handler
8301  * @irq: MSIx IRQ vector
8302  * @data: hfi1 devdata
8303  *
8304  * This is able to correctly handle all non-threaded interrupts.  Receive
8305  * context DATA IRQs are threaded and are not supported by this handler.
8306  *
8307  */
8308 irqreturn_t general_interrupt(int irq, void *data)
8309 {
8310         struct hfi1_devdata *dd = data;
8311         u64 regs[CCE_NUM_INT_CSRS];
8312         u32 bit;
8313         int i;
8314         irqreturn_t handled = IRQ_NONE;
8315 
8316         this_cpu_inc(*dd->int_counter);
8317 
8318         /* phase 1: scan and clear all handled interrupts */
8319         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8320                 if (dd->gi_mask[i] == 0) {
8321                         regs[i] = 0;    /* used later */
8322                         continue;
8323                 }
8324                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8325                                 dd->gi_mask[i];
8326                 /* only clear if anything is set */
8327                 if (regs[i])
8328                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8329         }
8330 
8331         /* phase 2: call the appropriate handler */
8332         for_each_set_bit(bit, (unsigned long *)&regs[0],
8333                          CCE_NUM_INT_CSRS * 64) {
8334                 is_interrupt(dd, bit);
8335                 handled = IRQ_HANDLED;
8336         }
8337 
8338         return handled;
8339 }
8340 
8341 irqreturn_t sdma_interrupt(int irq, void *data)
8342 {
8343         struct sdma_engine *sde = data;
8344         struct hfi1_devdata *dd = sde->dd;
8345         u64 status;
8346 
8347 #ifdef CONFIG_SDMA_VERBOSITY
8348         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8349                    slashstrip(__FILE__), __LINE__, __func__);
8350         sdma_dumpstate(sde);
8351 #endif
8352 
8353         this_cpu_inc(*dd->int_counter);
8354 
8355         /* This read_csr is really bad in the hot path */
8356         status = read_csr(dd,
8357                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8358                           & sde->imask;
8359         if (likely(status)) {
8360                 /* clear the interrupt(s) */
8361                 write_csr(dd,
8362                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8363                           status);
8364 
8365                 /* handle the interrupt(s) */
8366                 sdma_engine_interrupt(sde, status);
8367         } else {
8368                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8369                                         sde->this_idx);
8370         }
8371         return IRQ_HANDLED;
8372 }
8373 
8374 /*
8375  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8376  * to insure that the write completed.  This does NOT guarantee that
8377  * queued DMA writes to memory from the chip are pushed.
8378  */
8379 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8380 {
8381         struct hfi1_devdata *dd = rcd->dd;
8382         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8383 
8384         write_csr(dd, addr, rcd->imask);
8385         /* force the above write on the chip and get a value back */
8386         (void)read_csr(dd, addr);
8387 }
8388 
8389 /* force the receive interrupt */
8390 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8391 {
8392         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8393 }
8394 
8395 /*
8396  * Return non-zero if a packet is present.
8397  *
8398  * This routine is called when rechecking for packets after the RcvAvail
8399  * interrupt has been cleared down.  First, do a quick check of memory for
8400  * a packet present.  If not found, use an expensive CSR read of the context
8401  * tail to determine the actual tail.  The CSR read is necessary because there
8402  * is no method to push pending DMAs to memory other than an interrupt and we
8403  * are trying to determine if we need to force an interrupt.
8404  */
8405 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8406 {
8407         u32 tail;
8408         int present;
8409 
8410         if (!rcd->rcvhdrtail_kvaddr)
8411                 present = (rcd->seq_cnt ==
8412                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8413         else /* is RDMA rtail */
8414                 present = (rcd->head != get_rcvhdrtail(rcd));
8415 
8416         if (present)
8417                 return 1;
8418 
8419         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8420         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8421         return rcd->head != tail;
8422 }
8423 
8424 /*
8425  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8426  * This routine will try to handle packets immediately (latency), but if
8427  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8428  * chip receive interrupt is *not* cleared down until this or the thread (if
8429  * invoked) is finished.  The intent is to avoid extra interrupts while we
8430  * are processing packets anyway.
8431  */
8432 irqreturn_t receive_context_interrupt(int irq, void *data)
8433 {
8434         struct hfi1_ctxtdata *rcd = data;
8435         struct hfi1_devdata *dd = rcd->dd;
8436         int disposition;
8437         int present;
8438 
8439         trace_hfi1_receive_interrupt(dd, rcd);
8440         this_cpu_inc(*dd->int_counter);
8441         aspm_ctx_disable(rcd);
8442 
8443         /* receive interrupt remains blocked while processing packets */
8444         disposition = rcd->do_interrupt(rcd, 0);
8445 
8446         /*
8447          * Too many packets were seen while processing packets in this
8448          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8449          * remains blocked.
8450          */
8451         if (disposition == RCV_PKT_LIMIT)
8452                 return IRQ_WAKE_THREAD;
8453 
8454         /*
8455          * The packet processor detected no more packets.  Clear the receive
8456          * interrupt and recheck for a packet packet that may have arrived
8457          * after the previous check and interrupt clear.  If a packet arrived,
8458          * force another interrupt.
8459          */
8460         clear_recv_intr(rcd);
8461         present = check_packet_present(rcd);
8462         if (present)
8463                 force_recv_intr(rcd);
8464 
8465         return IRQ_HANDLED;
8466 }
8467 
8468 /*
8469  * Receive packet thread handler.  This expects to be invoked with the
8470  * receive interrupt still blocked.
8471  */
8472 irqreturn_t receive_context_thread(int irq, void *data)
8473 {
8474         struct hfi1_ctxtdata *rcd = data;
8475         int present;
8476 
8477         /* receive interrupt is still blocked from the IRQ handler */
8478         (void)rcd->do_interrupt(rcd, 1);
8479 
8480         /*
8481          * The packet processor will only return if it detected no more
8482          * packets.  Hold IRQs here so we can safely clear the interrupt and
8483          * recheck for a packet that may have arrived after the previous
8484          * check and the interrupt clear.  If a packet arrived, force another
8485          * interrupt.
8486          */
8487         local_irq_disable();
8488         clear_recv_intr(rcd);
8489         present = check_packet_present(rcd);
8490         if (present)
8491                 force_recv_intr(rcd);
8492         local_irq_enable();
8493 
8494         return IRQ_HANDLED;
8495 }
8496 
8497 /* ========================================================================= */
8498 
8499 u32 read_physical_state(struct hfi1_devdata *dd)
8500 {
8501         u64 reg;
8502 
8503         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8504         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8505                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8506 }
8507 
8508 u32 read_logical_state(struct hfi1_devdata *dd)
8509 {
8510         u64 reg;
8511 
8512         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8513         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8514                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8515 }
8516 
8517 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8518 {
8519         u64 reg;
8520 
8521         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8522         /* clear current state, set new state */
8523         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8524         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8525         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8526 }
8527 
8528 /*
8529  * Use the 8051 to read a LCB CSR.
8530  */
8531 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8532 {
8533         u32 regno;
8534         int ret;
8535 
8536         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8537                 if (acquire_lcb_access(dd, 0) == 0) {
8538                         *data = read_csr(dd, addr);
8539                         release_lcb_access(dd, 0);
8540                         return 0;
8541                 }
8542                 return -EBUSY;
8543         }
8544 
8545         /* register is an index of LCB registers: (offset - base) / 8 */
8546         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8547         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8548         if (ret != HCMD_SUCCESS)
8549                 return -EBUSY;
8550         return 0;
8551 }
8552 
8553 /*
8554  * Provide a cache for some of the LCB registers in case the LCB is
8555  * unavailable.
8556  * (The LCB is unavailable in certain link states, for example.)
8557  */
8558 struct lcb_datum {
8559         u32 off;
8560         u64 val;
8561 };
8562 
8563 static struct lcb_datum lcb_cache[] = {
8564         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8565         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8566         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8567 };
8568 
8569 static void update_lcb_cache(struct hfi1_devdata *dd)
8570 {
8571         int i;
8572         int ret;
8573         u64 val;
8574 
8575         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8576                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8577 
8578                 /* Update if we get good data */
8579                 if (likely(ret != -EBUSY))
8580                         lcb_cache[i].val = val;
8581         }
8582 }
8583 
8584 static int read_lcb_cache(u32 off, u64 *val)
8585 {
8586         int i;
8587 
8588         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8589                 if (lcb_cache[i].off == off) {
8590                         *val = lcb_cache[i].val;
8591                         return 0;
8592                 }
8593         }
8594 
8595         pr_warn("%s bad offset 0x%x\n", __func__, off);
8596         return -1;
8597 }
8598 
8599 /*
8600  * Read an LCB CSR.  Access may not be in host control, so check.
8601  * Return 0 on success, -EBUSY on failure.
8602  */
8603 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8604 {
8605         struct hfi1_pportdata *ppd = dd->pport;
8606 
8607         /* if up, go through the 8051 for the value */
8608         if (ppd->host_link_state & HLS_UP)
8609                 return read_lcb_via_8051(dd, addr, data);
8610         /* if going up or down, check the cache, otherwise, no access */
8611         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8612                 if (read_lcb_cache(addr, data))
8613                         return -EBUSY;
8614                 return 0;
8615         }
8616 
8617         /* otherwise, host has access */
8618         *data = read_csr(dd, addr);
8619         return 0;
8620 }
8621 
8622 /*
8623  * Use the 8051 to write a LCB CSR.
8624  */
8625 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8626 {
8627         u32 regno;
8628         int ret;
8629 
8630         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8631             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8632                 if (acquire_lcb_access(dd, 0) == 0) {
8633                         write_csr(dd, addr, data);
8634                         release_lcb_access(dd, 0);
8635                         return 0;
8636                 }
8637                 return -EBUSY;
8638         }
8639 
8640         /* register is an index of LCB registers: (offset - base) / 8 */
8641         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8642         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8643         if (ret != HCMD_SUCCESS)
8644                 return -EBUSY;
8645         return 0;
8646 }
8647 
8648 /*
8649  * Write an LCB CSR.  Access may not be in host control, so check.
8650  * Return 0 on success, -EBUSY on failure.
8651  */
8652 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8653 {
8654         struct hfi1_pportdata *ppd = dd->pport;
8655 
8656         /* if up, go through the 8051 for the value */
8657         if (ppd->host_link_state & HLS_UP)
8658                 return write_lcb_via_8051(dd, addr, data);
8659         /* if going up or down, no access */
8660         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8661                 return -EBUSY;
8662         /* otherwise, host has access */
8663         write_csr(dd, addr, data);
8664         return 0;
8665 }
8666 
8667 /*
8668  * Returns:
8669  *      < 0 = Linux error, not able to get access
8670  *      > 0 = 8051 command RETURN_CODE
8671  */
8672 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8673                            u64 *out_data)
8674 {
8675         u64 reg, completed;
8676         int return_code;
8677         unsigned long timeout;
8678 
8679         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8680 
8681         mutex_lock(&dd->dc8051_lock);
8682 
8683         /* We can't send any commands to the 8051 if it's in reset */
8684         if (dd->dc_shutdown) {
8685                 return_code = -ENODEV;
8686                 goto fail;
8687         }
8688 
8689         /*
8690          * If an 8051 host command timed out previously, then the 8051 is
8691          * stuck.
8692          *
8693          * On first timeout, attempt to reset and restart the entire DC
8694          * block (including 8051). (Is this too big of a hammer?)
8695          *
8696          * If the 8051 times out a second time, the reset did not bring it
8697          * back to healthy life. In that case, fail any subsequent commands.
8698          */
8699         if (dd->dc8051_timed_out) {
8700                 if (dd->dc8051_timed_out > 1) {
8701                         dd_dev_err(dd,
8702                                    "Previous 8051 host command timed out, skipping command %u\n",
8703                                    type);
8704                         return_code = -ENXIO;
8705                         goto fail;
8706                 }
8707                 _dc_shutdown(dd);
8708                 _dc_start(dd);
8709         }
8710 
8711         /*
8712          * If there is no timeout, then the 8051 command interface is
8713          * waiting for a command.
8714          */
8715 
8716         /*
8717          * When writing a LCB CSR, out_data contains the full value to
8718          * to be written, while in_data contains the relative LCB
8719          * address in 7:0.  Do the work here, rather than the caller,
8720          * of distrubting the write data to where it needs to go:
8721          *
8722          * Write data
8723          *   39:00 -> in_data[47:8]
8724          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8725          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8726          */
8727         if (type == HCMD_WRITE_LCB_CSR) {
8728                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8729                 /* must preserve COMPLETED - it is tied to hardware */
8730                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8731                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8732                 reg |= ((((*out_data) >> 40) & 0xff) <<
8733                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8734                       | ((((*out_data) >> 48) & 0xffff) <<
8735                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8736                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8737         }
8738 
8739         /*
8740          * Do two writes: the first to stabilize the type and req_data, the
8741          * second to activate.
8742          */
8743         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8744                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8745                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8746                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8747         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8748         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8749         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8750 
8751         /* wait for completion, alternate: interrupt */
8752         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8753         while (1) {
8754                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8755                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8756                 if (completed)
8757                         break;
8758                 if (time_after(jiffies, timeout)) {
8759                         dd->dc8051_timed_out++;
8760                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8761                         if (out_data)
8762                                 *out_data = 0;
8763                         return_code = -ETIMEDOUT;
8764                         goto fail;
8765                 }
8766                 udelay(2);
8767         }
8768 
8769         if (out_data) {
8770                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8771                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8772                 if (type == HCMD_READ_LCB_CSR) {
8773                         /* top 16 bits are in a different register */
8774                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8775                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8776                                 << (48
8777                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8778                 }
8779         }
8780         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8781                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8782         dd->dc8051_timed_out = 0;
8783         /*
8784          * Clear command for next user.
8785          */
8786         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8787 
8788 fail:
8789         mutex_unlock(&dd->dc8051_lock);
8790         return return_code;
8791 }
8792 
8793 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8794 {
8795         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8796 }
8797 
8798 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8799                      u8 lane_id, u32 config_data)
8800 {
8801         u64 data;
8802         int ret;
8803 
8804         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8805                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8806                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8807         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8808         if (ret != HCMD_SUCCESS) {
8809                 dd_dev_err(dd,
8810                            "load 8051 config: field id %d, lane %d, err %d\n",
8811                            (int)field_id, (int)lane_id, ret);
8812         }
8813         return ret;
8814 }
8815 
8816 /*
8817  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8818  * set the result, even on error.
8819  * Return 0 on success, -errno on failure
8820  */
8821 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8822                      u32 *result)
8823 {
8824         u64 big_data;
8825         u32 addr;
8826         int ret;
8827 
8828         /* address start depends on the lane_id */
8829         if (lane_id < 4)
8830                 addr = (4 * NUM_GENERAL_FIELDS)
8831                         + (lane_id * 4 * NUM_LANE_FIELDS);
8832         else
8833                 addr = 0;
8834         addr += field_id * 4;
8835 
8836         /* read is in 8-byte chunks, hardware will truncate the address down */
8837         ret = read_8051_data(dd, addr, 8, &big_data);
8838 
8839         if (ret == 0) {
8840                 /* extract the 4 bytes we want */
8841                 if (addr & 0x4)
8842                         *result = (u32)(big_data >> 32);
8843                 else
8844                         *result = (u32)big_data;
8845         } else {
8846                 *result = 0;
8847                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8848                            __func__, lane_id, field_id);
8849         }
8850 
8851         return ret;
8852 }
8853 
8854 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8855                               u8 continuous)
8856 {
8857         u32 frame;
8858 
8859         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8860                 | power_management << POWER_MANAGEMENT_SHIFT;
8861         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8862                                 GENERAL_CONFIG, frame);
8863 }
8864 
8865 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8866                                  u16 vl15buf, u8 crc_sizes)
8867 {
8868         u32 frame;
8869 
8870         frame = (u32)vau << VAU_SHIFT
8871                 | (u32)z << Z_SHIFT
8872                 | (u32)vcu << VCU_SHIFT
8873                 | (u32)vl15buf << VL15BUF_SHIFT
8874                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8875         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8876                                 GENERAL_CONFIG, frame);
8877 }
8878 
8879 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8880                                     u8 *flag_bits, u16 *link_widths)
8881 {
8882         u32 frame;
8883 
8884         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8885                          &frame);
8886         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8887         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8888         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8889 }
8890 
8891 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8892                                     u8 misc_bits,
8893                                     u8 flag_bits,
8894                                     u16 link_widths)
8895 {
8896         u32 frame;
8897 
8898         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8899                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8900                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8901         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8902                      frame);
8903 }
8904 
8905 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8906                                  u8 device_rev)
8907 {
8908         u32 frame;
8909 
8910         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8911                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8912         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8913 }
8914 
8915 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8916                                   u8 *device_rev)
8917 {
8918         u32 frame;
8919 
8920         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8921         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8922         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8923                         & REMOTE_DEVICE_REV_MASK;
8924 }
8925 
8926 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8927 {
8928         u32 frame;
8929         u32 mask;
8930 
8931         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8932         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8933         /* Clear, then set field */
8934         frame &= ~mask;
8935         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8936         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8937                                 frame);
8938 }
8939 
8940 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8941                       u8 *ver_patch)
8942 {
8943         u32 frame;
8944 
8945         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8946         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8947                 STS_FM_VERSION_MAJOR_MASK;
8948         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8949                 STS_FM_VERSION_MINOR_MASK;
8950 
8951         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8952         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8953                 STS_FM_VERSION_PATCH_MASK;
8954 }
8955 
8956 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8957                                u8 *continuous)
8958 {
8959         u32 frame;
8960 
8961         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8962         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8963                                         & POWER_MANAGEMENT_MASK;
8964         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8965                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8966 }
8967 
8968 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8969                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8970 {
8971         u32 frame;
8972 
8973         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8974         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8975         *z = (frame >> Z_SHIFT) & Z_MASK;
8976         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8977         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8978         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8979 }
8980 
8981 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8982                                       u8 *remote_tx_rate,
8983                                       u16 *link_widths)
8984 {
8985         u32 frame;
8986 
8987         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8988                          &frame);
8989         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8990                                 & REMOTE_TX_RATE_MASK;
8991         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8992 }
8993 
8994 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8995 {
8996         u32 frame;
8997 
8998         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8999         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
9000 }
9001 
9002 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
9003 {
9004         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
9005 }
9006 
9007 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
9008 {
9009         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
9010 }
9011 
9012 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9013 {
9014         u32 frame;
9015         int ret;
9016 
9017         *link_quality = 0;
9018         if (dd->pport->host_link_state & HLS_UP) {
9019                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9020                                        &frame);
9021                 if (ret == 0)
9022                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9023                                                 & LINK_QUALITY_MASK;
9024         }
9025 }
9026 
9027 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9028 {
9029         u32 frame;
9030 
9031         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9032         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9033 }
9034 
9035 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9036 {
9037         u32 frame;
9038 
9039         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9040         *ldr = (frame & 0xff);
9041 }
9042 
9043 static int read_tx_settings(struct hfi1_devdata *dd,
9044                             u8 *enable_lane_tx,
9045                             u8 *tx_polarity_inversion,
9046                             u8 *rx_polarity_inversion,
9047                             u8 *max_rate)
9048 {
9049         u32 frame;
9050         int ret;
9051 
9052         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9053         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9054                                 & ENABLE_LANE_TX_MASK;
9055         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9056                                 & TX_POLARITY_INVERSION_MASK;
9057         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9058                                 & RX_POLARITY_INVERSION_MASK;
9059         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9060         return ret;
9061 }
9062 
9063 static int write_tx_settings(struct hfi1_devdata *dd,
9064                              u8 enable_lane_tx,
9065                              u8 tx_polarity_inversion,
9066                              u8 rx_polarity_inversion,
9067                              u8 max_rate)
9068 {
9069         u32 frame;
9070 
9071         /* no need to mask, all variable sizes match field widths */
9072         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9073                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9074                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9075                 | max_rate << MAX_RATE_SHIFT;
9076         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9077 }
9078 
9079 /*
9080  * Read an idle LCB message.
9081  *
9082  * Returns 0 on success, -EINVAL on error
9083  */
9084 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9085 {
9086         int ret;
9087 
9088         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9089         if (ret != HCMD_SUCCESS) {
9090                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9091                            (u32)type, ret);
9092                 return -EINVAL;
9093         }
9094         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9095         /* return only the payload as we already know the type */
9096         *data_out >>= IDLE_PAYLOAD_SHIFT;
9097         return 0;
9098 }
9099 
9100 /*
9101  * Read an idle SMA message.  To be done in response to a notification from
9102  * the 8051.
9103  *
9104  * Returns 0 on success, -EINVAL on error
9105  */
9106 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9107 {
9108         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9109                                  data);
9110 }
9111 
9112 /*
9113  * Send an idle LCB message.
9114  *
9115  * Returns 0 on success, -EINVAL on error
9116  */
9117 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9118 {
9119         int ret;
9120 
9121         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9122         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9123         if (ret != HCMD_SUCCESS) {
9124                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9125                            data, ret);
9126                 return -EINVAL;
9127         }
9128         return 0;
9129 }
9130 
9131 /*
9132  * Send an idle SMA message.
9133  *
9134  * Returns 0 on success, -EINVAL on error
9135  */
9136 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9137 {
9138         u64 data;
9139 
9140         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9141                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9142         return send_idle_message(dd, data);
9143 }
9144 
9145 /*
9146  * Initialize the LCB then do a quick link up.  This may or may not be
9147  * in loopback.
9148  *
9149  * return 0 on success, -errno on error
9150  */
9151 static int do_quick_linkup(struct hfi1_devdata *dd)
9152 {
9153         int ret;
9154 
9155         lcb_shutdown(dd, 0);
9156 
9157         if (loopback) {
9158                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9159                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9160                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9161                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9162                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9163         }
9164 
9165         /* start the LCBs */
9166         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9167         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9168 
9169         /* simulator only loopback steps */
9170         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9171                 /* LCB_CFG_RUN.EN = 1 */
9172                 write_csr(dd, DC_LCB_CFG_RUN,
9173                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9174 
9175                 ret = wait_link_transfer_active(dd, 10);
9176                 if (ret)
9177                         return ret;
9178 
9179                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9180                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9181         }
9182 
9183         if (!loopback) {
9184                 /*
9185                  * When doing quick linkup and not in loopback, both
9186                  * sides must be done with LCB set-up before either
9187                  * starts the quick linkup.  Put a delay here so that
9188                  * both sides can be started and have a chance to be
9189                  * done with LCB set up before resuming.
9190                  */
9191                 dd_dev_err(dd,
9192                            "Pausing for peer to be finished with LCB set up\n");
9193                 msleep(5000);
9194                 dd_dev_err(dd, "Continuing with quick linkup\n");
9195         }
9196 
9197         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9198         set_8051_lcb_access(dd);
9199 
9200         /*
9201          * State "quick" LinkUp request sets the physical link state to
9202          * LinkUp without a verify capability sequence.
9203          * This state is in simulator v37 and later.
9204          */
9205         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9206         if (ret != HCMD_SUCCESS) {
9207                 dd_dev_err(dd,
9208                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9209                            __func__, ret);
9210 
9211                 set_host_lcb_access(dd);
9212                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9213 
9214                 if (ret >= 0)
9215                         ret = -EINVAL;
9216                 return ret;
9217         }
9218 
9219         return 0; /* success */
9220 }
9221 
9222 /*
9223  * Do all special steps to set up loopback.
9224  */
9225 static int init_loopback(struct hfi1_devdata *dd)
9226 {
9227         dd_dev_info(dd, "Entering loopback mode\n");
9228 
9229         /* all loopbacks should disable self GUID check */
9230         write_csr(dd, DC_DC8051_CFG_MODE,
9231                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9232 
9233         /*
9234          * The simulator has only one loopback option - LCB.  Switch
9235          * to that option, which includes quick link up.
9236          *
9237          * Accept all valid loopback values.
9238          */
9239         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9240             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9241              loopback == LOOPBACK_CABLE)) {
9242                 loopback = LOOPBACK_LCB;
9243                 quick_linkup = 1;
9244                 return 0;
9245         }
9246 
9247         /*
9248          * SerDes loopback init sequence is handled in set_local_link_attributes
9249          */
9250         if (loopback == LOOPBACK_SERDES)
9251                 return 0;
9252 
9253         /* LCB loopback - handled at poll time */
9254         if (loopback == LOOPBACK_LCB) {
9255                 quick_linkup = 1; /* LCB is always quick linkup */
9256 
9257                 /* not supported in emulation due to emulation RTL changes */
9258                 if (dd->icode == ICODE_FPGA_EMULATION) {
9259                         dd_dev_err(dd,
9260                                    "LCB loopback not supported in emulation\n");
9261                         return -EINVAL;
9262                 }
9263                 return 0;
9264         }
9265 
9266         /* external cable loopback requires no extra steps */
9267         if (loopback == LOOPBACK_CABLE)
9268                 return 0;
9269 
9270         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9271         return -EINVAL;
9272 }
9273 
9274 /*
9275  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9276  * used in the Verify Capability link width attribute.
9277  */
9278 static u16 opa_to_vc_link_widths(u16 opa_widths)
9279 {
9280         int i;
9281         u16 result = 0;
9282 
9283         static const struct link_bits {
9284                 u16 from;
9285                 u16 to;
9286         } opa_link_xlate[] = {
9287                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9288                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9289                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9290                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9291         };
9292 
9293         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9294                 if (opa_widths & opa_link_xlate[i].from)
9295                         result |= opa_link_xlate[i].to;
9296         }
9297         return result;
9298 }
9299 
9300 /*
9301  * Set link attributes before moving to polling.
9302  */
9303 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9304 {
9305         struct hfi1_devdata *dd = ppd->dd;
9306         u8 enable_lane_tx;
9307         u8 tx_polarity_inversion;
9308         u8 rx_polarity_inversion;
9309         int ret;
9310         u32 misc_bits = 0;
9311         /* reset our fabric serdes to clear any lingering problems */
9312         fabric_serdes_reset(dd);
9313 
9314         /* set the local tx rate - need to read-modify-write */
9315         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9316                                &rx_polarity_inversion, &ppd->local_tx_rate);
9317         if (ret)
9318                 goto set_local_link_attributes_fail;
9319 
9320         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9321                 /* set the tx rate to the fastest enabled */
9322                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9323                         ppd->local_tx_rate = 1;
9324                 else
9325                         ppd->local_tx_rate = 0;
9326         } else {
9327                 /* set the tx rate to all enabled */
9328                 ppd->local_tx_rate = 0;
9329                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9330                         ppd->local_tx_rate |= 2;
9331                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9332                         ppd->local_tx_rate |= 1;
9333         }
9334 
9335         enable_lane_tx = 0xF; /* enable all four lanes */
9336         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9337                                 rx_polarity_inversion, ppd->local_tx_rate);
9338         if (ret != HCMD_SUCCESS)
9339                 goto set_local_link_attributes_fail;
9340 
9341         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9342         if (ret != HCMD_SUCCESS) {
9343                 dd_dev_err(dd,
9344                            "Failed to set host interface version, return 0x%x\n",
9345                            ret);
9346                 goto set_local_link_attributes_fail;
9347         }
9348 
9349         /*
9350          * DC supports continuous updates.
9351          */
9352         ret = write_vc_local_phy(dd,
9353                                  0 /* no power management */,
9354                                  1 /* continuous updates */);
9355         if (ret != HCMD_SUCCESS)
9356                 goto set_local_link_attributes_fail;
9357 
9358         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9359         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9360                                     ppd->port_crc_mode_enabled);
9361         if (ret != HCMD_SUCCESS)
9362                 goto set_local_link_attributes_fail;
9363 
9364         /*
9365          * SerDes loopback init sequence requires
9366          * setting bit 0 of MISC_CONFIG_BITS
9367          */
9368         if (loopback == LOOPBACK_SERDES)
9369                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9370 
9371         /*
9372          * An external device configuration request is used to reset the LCB
9373          * to retry to obtain operational lanes when the first attempt is
9374          * unsuccesful.
9375          */
9376         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9377                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9378 
9379         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9380                                        opa_to_vc_link_widths(
9381                                                 ppd->link_width_enabled));
9382         if (ret != HCMD_SUCCESS)
9383                 goto set_local_link_attributes_fail;
9384 
9385         /* let peer know who we are */
9386         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9387         if (ret == HCMD_SUCCESS)
9388                 return 0;
9389 
9390 set_local_link_attributes_fail:
9391         dd_dev_err(dd,
9392                    "Failed to set local link attributes, return 0x%x\n",
9393                    ret);
9394         return ret;
9395 }
9396 
9397 /*
9398  * Call this to start the link.
9399  * Do not do anything if the link is disabled.
9400  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9401  */
9402 int start_link(struct hfi1_pportdata *ppd)
9403 {
9404         /*
9405          * Tune the SerDes to a ballpark setting for optimal signal and bit
9406          * error rate.  Needs to be done before starting the link.
9407          */
9408         tune_serdes(ppd);
9409 
9410         if (!ppd->driver_link_ready) {
9411                 dd_dev_info(ppd->dd,
9412                             "%s: stopping link start because driver is not ready\n",
9413                             __func__);
9414                 return 0;
9415         }
9416 
9417         /*
9418          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9419          * pkey table can be configured properly if the HFI unit is connected
9420          * to switch port with MgmtAllowed=NO
9421          */
9422         clear_full_mgmt_pkey(ppd);
9423 
9424         return set_link_state(ppd, HLS_DN_POLL);
9425 }
9426 
9427 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9428 {
9429         struct hfi1_devdata *dd = ppd->dd;
9430         u64 mask;
9431         unsigned long timeout;
9432 
9433         /*
9434          * Some QSFP cables have a quirk that asserts the IntN line as a side
9435          * effect of power up on plug-in. We ignore this false positive
9436          * interrupt until the module has finished powering up by waiting for
9437          * a minimum timeout of the module inrush initialization time of
9438          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9439          * module have stabilized.
9440          */
9441         msleep(500);
9442 
9443         /*
9444          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9445          */
9446         timeout = jiffies + msecs_to_jiffies(2000);
9447         while (1) {
9448                 mask = read_csr(dd, dd->hfi1_id ?
9449                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9450                 if (!(mask & QSFP_HFI0_INT_N))
9451                         break;
9452                 if (time_after(jiffies, timeout)) {
9453                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9454                                     __func__);
9455                         break;
9456                 }
9457                 udelay(2);
9458         }
9459 }
9460 
9461 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9462 {
9463         struct hfi1_devdata *dd = ppd->dd;
9464         u64 mask;
9465 
9466         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9467         if (enable) {
9468                 /*
9469                  * Clear the status register to avoid an immediate interrupt
9470                  * when we re-enable the IntN pin
9471                  */
9472                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9473                           QSFP_HFI0_INT_N);
9474                 mask |= (u64)QSFP_HFI0_INT_N;
9475         } else {
9476                 mask &= ~(u64)QSFP_HFI0_INT_N;
9477         }
9478         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9479 }
9480 
9481 int reset_qsfp(struct hfi1_pportdata *ppd)
9482 {
9483         struct hfi1_devdata *dd = ppd->dd;
9484         u64 mask, qsfp_mask;
9485 
9486         /* Disable INT_N from triggering QSFP interrupts */
9487         set_qsfp_int_n(ppd, 0);
9488 
9489         /* Reset the QSFP */
9490         mask = (u64)QSFP_HFI0_RESET_N;
9491 
9492         qsfp_mask = read_csr(dd,
9493                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9494         qsfp_mask &= ~mask;
9495         write_csr(dd,
9496                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9497 
9498         udelay(10);
9499 
9500         qsfp_mask |= mask;
9501         write_csr(dd,
9502                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9503 
9504         wait_for_qsfp_init(ppd);
9505 
9506         /*
9507          * Allow INT_N to trigger the QSFP interrupt to watch
9508          * for alarms and warnings
9509          */
9510         set_qsfp_int_n(ppd, 1);
9511 
9512         /*
9513          * After the reset, AOC transmitters are enabled by default. They need
9514          * to be turned off to complete the QSFP setup before they can be
9515          * enabled again.
9516          */
9517         return set_qsfp_tx(ppd, 0);
9518 }
9519 
9520 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9521                                         u8 *qsfp_interrupt_status)
9522 {
9523         struct hfi1_devdata *dd = ppd->dd;
9524 
9525         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9526             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9527                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9528                            __func__);
9529 
9530         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9531             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9532                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9533                            __func__);
9534 
9535         /*
9536          * The remaining alarms/warnings don't matter if the link is down.
9537          */
9538         if (ppd->host_link_state & HLS_DOWN)
9539                 return 0;
9540 
9541         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9542             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9543                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9544                            __func__);
9545 
9546         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9547             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9548                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9549                            __func__);
9550 
9551         /* Byte 2 is vendor specific */
9552 
9553         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9554             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9555                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9556                            __func__);
9557 
9558         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9559             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9560                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9561                            __func__);
9562 
9563         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9564             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9565                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9566                            __func__);
9567 
9568         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9569             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9570                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9571                            __func__);
9572 
9573         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9574             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9575                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9576                            __func__);
9577 
9578         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9579             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9580                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9581                            __func__);
9582 
9583         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9584             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9585                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9586                            __func__);
9587 
9588         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9589             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9590                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9591                            __func__);
9592 
9593         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9594             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9595                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9596                            __func__);
9597 
9598         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9599             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9600                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9601                            __func__);
9602 
9603         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9604             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9605                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9606                            __func__);
9607 
9608         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9609             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9610                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9611                            __func__);
9612 
9613         /* Bytes 9-10 and 11-12 are reserved */
9614         /* Bytes 13-15 are vendor specific */
9615 
9616         return 0;
9617 }
9618 
9619 /* This routine will only be scheduled if the QSFP module present is asserted */
9620 void qsfp_event(struct work_struct *work)
9621 {
9622         struct qsfp_data *qd;
9623         struct hfi1_pportdata *ppd;
9624         struct hfi1_devdata *dd;
9625 
9626         qd = container_of(work, struct qsfp_data, qsfp_work);
9627         ppd = qd->ppd;
9628         dd = ppd->dd;
9629 
9630         /* Sanity check */
9631         if (!qsfp_mod_present(ppd))
9632                 return;
9633 
9634         if (ppd->host_link_state == HLS_DN_DISABLE) {
9635                 dd_dev_info(ppd->dd,
9636                             "%s: stopping link start because link is disabled\n",
9637                             __func__);
9638                 return;
9639         }
9640 
9641         /*
9642          * Turn DC back on after cable has been re-inserted. Up until
9643          * now, the DC has been in reset to save power.
9644          */
9645         dc_start(dd);
9646 
9647         if (qd->cache_refresh_required) {
9648                 set_qsfp_int_n(ppd, 0);
9649 
9650                 wait_for_qsfp_init(ppd);
9651 
9652                 /*
9653                  * Allow INT_N to trigger the QSFP interrupt to watch
9654                  * for alarms and warnings
9655                  */
9656                 set_qsfp_int_n(ppd, 1);
9657 
9658                 start_link(ppd);
9659         }
9660 
9661         if (qd->check_interrupt_flags) {
9662                 u8 qsfp_interrupt_status[16] = {0,};
9663 
9664                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9665                                   &qsfp_interrupt_status[0], 16) != 16) {
9666                         dd_dev_info(dd,
9667                                     "%s: Failed to read status of QSFP module\n",
9668                                     __func__);
9669                 } else {
9670                         unsigned long flags;
9671 
9672                         handle_qsfp_error_conditions(
9673                                         ppd, qsfp_interrupt_status);
9674                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9675                         ppd->qsfp_info.check_interrupt_flags = 0;
9676                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9677                                                flags);
9678                 }
9679         }
9680 }
9681 
9682 void init_qsfp_int(struct hfi1_devdata *dd)
9683 {
9684         struct hfi1_pportdata *ppd = dd->pport;
9685         u64 qsfp_mask;
9686 
9687         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9688         /* Clear current status to avoid spurious interrupts */
9689         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9690                   qsfp_mask);
9691         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9692                   qsfp_mask);
9693 
9694         set_qsfp_int_n(ppd, 0);
9695 
9696         /* Handle active low nature of INT_N and MODPRST_N pins */
9697         if (qsfp_mod_present(ppd))
9698                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9699         write_csr(dd,
9700                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9701                   qsfp_mask);
9702 
9703         /* Enable the appropriate QSFP IRQ source */
9704         if (!dd->hfi1_id)
9705                 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9706         else
9707                 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9708 }
9709 
9710 /*
9711  * Do a one-time initialize of the LCB block.
9712  */
9713 static void init_lcb(struct hfi1_devdata *dd)
9714 {
9715         /* simulator does not correctly handle LCB cclk loopback, skip */
9716         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9717                 return;
9718 
9719         /* the DC has been reset earlier in the driver load */
9720 
9721         /* set LCB for cclk loopback on the port */
9722         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9723         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9724         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9725         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9726         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9727         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9728         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9729 }
9730 
9731 /*
9732  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9733  * on error.
9734  */
9735 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9736 {
9737         int ret;
9738         u8 status;
9739 
9740         /*
9741          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9742          * not present
9743          */
9744         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9745                 return 0;
9746 
9747         /* read byte 2, the status byte */
9748         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9749         if (ret < 0)
9750                 return ret;
9751         if (ret != 1)
9752                 return -EIO;
9753 
9754         return 0; /* success */
9755 }
9756 
9757 /*
9758  * Values for QSFP retry.
9759  *
9760  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9761  * arrived at from experience on a large cluster.
9762  */
9763 #define MAX_QSFP_RETRIES 20
9764 #define QSFP_RETRY_WAIT 500 /* msec */
9765 
9766 /*
9767  * Try a QSFP read.  If it fails, schedule a retry for later.
9768  * Called on first link activation after driver load.
9769  */
9770 static void try_start_link(struct hfi1_pportdata *ppd)
9771 {
9772         if (test_qsfp_read(ppd)) {
9773                 /* read failed */
9774                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9775                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9776                         return;
9777                 }
9778                 dd_dev_info(ppd->dd,
9779                             "QSFP not responding, waiting and retrying %d\n",
9780                             (int)ppd->qsfp_retry_count);
9781                 ppd->qsfp_retry_count++;
9782                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9783                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9784                 return;
9785         }
9786         ppd->qsfp_retry_count = 0;
9787 
9788         start_link(ppd);
9789 }
9790 
9791 /*
9792  * Workqueue function to start the link after a delay.
9793  */
9794 void handle_start_link(struct work_struct *work)
9795 {
9796         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9797                                                   start_link_work.work);
9798         try_start_link(ppd);
9799 }
9800 
9801 int bringup_serdes(struct hfi1_pportdata *ppd)
9802 {
9803         struct hfi1_devdata *dd = ppd->dd;
9804         u64 guid;
9805         int ret;
9806 
9807         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9808                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9809 
9810         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9811         if (!guid) {
9812                 if (dd->base_guid)
9813                         guid = dd->base_guid + ppd->port - 1;
9814                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9815         }
9816 
9817         /* Set linkinit_reason on power up per OPA spec */
9818         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9819 
9820         /* one-time init of the LCB */
9821         init_lcb(dd);
9822 
9823         if (loopback) {
9824                 ret = init_loopback(dd);
9825                 if (ret < 0)
9826                         return ret;
9827         }
9828 
9829         get_port_type(ppd);
9830         if (ppd->port_type == PORT_TYPE_QSFP) {
9831                 set_qsfp_int_n(ppd, 0);
9832                 wait_for_qsfp_init(ppd);
9833                 set_qsfp_int_n(ppd, 1);
9834         }
9835 
9836         try_start_link(ppd);
9837         return 0;
9838 }
9839 
9840 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9841 {
9842         struct hfi1_devdata *dd = ppd->dd;
9843 
9844         /*
9845          * Shut down the link and keep it down.   First turn off that the
9846          * driver wants to allow the link to be up (driver_link_ready).
9847          * Then make sure the link is not automatically restarted
9848          * (link_enabled).  Cancel any pending restart.  And finally
9849          * go offline.
9850          */
9851         ppd->driver_link_ready = 0;
9852         ppd->link_enabled = 0;
9853 
9854         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9855         flush_delayed_work(&ppd->start_link_work);
9856         cancel_delayed_work_sync(&ppd->start_link_work);
9857 
9858         ppd->offline_disabled_reason =
9859                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9860         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9861                              OPA_LINKDOWN_REASON_REBOOT);
9862         set_link_state(ppd, HLS_DN_OFFLINE);
9863 
9864         /* disable the port */
9865         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9866         cancel_work_sync(&ppd->freeze_work);
9867 }
9868 
9869 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9870 {
9871         struct hfi1_pportdata *ppd;
9872         int i;
9873 
9874         ppd = (struct hfi1_pportdata *)(dd + 1);
9875         for (i = 0; i < dd->num_pports; i++, ppd++) {
9876                 ppd->ibport_data.rvp.rc_acks = NULL;
9877                 ppd->ibport_data.rvp.rc_qacks = NULL;
9878                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9879                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9880                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9881                 if (!ppd->ibport_data.rvp.rc_acks ||
9882                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9883                     !ppd->ibport_data.rvp.rc_qacks)
9884                         return -ENOMEM;
9885         }
9886 
9887         return 0;
9888 }
9889 
9890 /*
9891  * index is the index into the receive array
9892  */
9893 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9894                   u32 type, unsigned long pa, u16 order)
9895 {
9896         u64 reg;
9897 
9898         if (!(dd->flags & HFI1_PRESENT))
9899                 goto done;
9900 
9901         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9902                 pa = 0;
9903                 order = 0;
9904         } else if (type > PT_INVALID) {
9905                 dd_dev_err(dd,
9906                            "unexpected receive array type %u for index %u, not handled\n",
9907                            type, index);
9908                 goto done;
9909         }
9910         trace_hfi1_put_tid(dd, index, type, pa, order);
9911 
9912 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9913         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9914                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9915                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9916                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9917         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9918         writeq(reg, dd->rcvarray_wc + (index * 8));
9919 
9920         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9921                 /*
9922                  * Eager entries are written and flushed
9923                  *
9924                  * Expected entries are flushed every 4 writes
9925                  */
9926                 flush_wc();
9927 done:
9928         return;
9929 }
9930 
9931 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9932 {
9933         struct hfi1_devdata *dd = rcd->dd;
9934         u32 i;
9935 
9936         /* this could be optimized */
9937         for (i = rcd->eager_base; i < rcd->eager_base +
9938                      rcd->egrbufs.alloced; i++)
9939                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9940 
9941         for (i = rcd->expected_base;
9942                         i < rcd->expected_base + rcd->expected_count; i++)
9943                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9944 }
9945 
9946 static const char * const ib_cfg_name_strings[] = {
9947         "HFI1_IB_CFG_LIDLMC",
9948         "HFI1_IB_CFG_LWID_DG_ENB",
9949         "HFI1_IB_CFG_LWID_ENB",
9950         "HFI1_IB_CFG_LWID",
9951         "HFI1_IB_CFG_SPD_ENB",
9952         "HFI1_IB_CFG_SPD",
9953         "HFI1_IB_CFG_RXPOL_ENB",
9954         "HFI1_IB_CFG_LREV_ENB",
9955         "HFI1_IB_CFG_LINKLATENCY",
9956         "HFI1_IB_CFG_HRTBT",
9957         "HFI1_IB_CFG_OP_VLS",
9958         "HFI1_IB_CFG_VL_HIGH_CAP",
9959         "HFI1_IB_CFG_VL_LOW_CAP",
9960         "HFI1_IB_CFG_OVERRUN_THRESH",
9961         "HFI1_IB_CFG_PHYERR_THRESH",
9962         "HFI1_IB_CFG_LINKDEFAULT",
9963         "HFI1_IB_CFG_PKEYS",
9964         "HFI1_IB_CFG_MTU",
9965         "HFI1_IB_CFG_LSTATE",
9966         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9967         "HFI1_IB_CFG_PMA_TICKS",
9968         "HFI1_IB_CFG_PORT"
9969 };
9970 
9971 static const char *ib_cfg_name(int which)
9972 {
9973         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9974                 return "invalid";
9975         return ib_cfg_name_strings[which];
9976 }
9977 
9978 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9979 {
9980         struct hfi1_devdata *dd = ppd->dd;
9981         int val = 0;
9982 
9983         switch (which) {
9984         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9985                 val = ppd->link_width_enabled;
9986                 break;
9987         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9988                 val = ppd->link_width_active;
9989                 break;
9990         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9991                 val = ppd->link_speed_enabled;
9992                 break;
9993         case HFI1_IB_CFG_SPD: /* current Link speed */
9994                 val = ppd->link_speed_active;
9995                 break;
9996 
9997         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9998         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9999         case HFI1_IB_CFG_LINKLATENCY:
10000                 goto unimplemented;
10001 
10002         case HFI1_IB_CFG_OP_VLS:
10003                 val = ppd->actual_vls_operational;
10004                 break;
10005         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10006                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10007                 break;
10008         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10009                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10010                 break;
10011         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10012                 val = ppd->overrun_threshold;
10013                 break;
10014         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10015                 val = ppd->phy_error_threshold;
10016                 break;
10017         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10018                 val = HLS_DEFAULT;
10019                 break;
10020 
10021         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10022         case HFI1_IB_CFG_PMA_TICKS:
10023         default:
10024 unimplemented:
10025                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10026                         dd_dev_info(
10027                                 dd,
10028                                 "%s: which %s: not implemented\n",
10029                                 __func__,
10030                                 ib_cfg_name(which));
10031                 break;
10032         }
10033 
10034         return val;
10035 }
10036 
10037 /*
10038  * The largest MAD packet size.
10039  */
10040 #define MAX_MAD_PACKET 2048
10041 
10042 /*
10043  * Return the maximum header bytes that can go on the _wire_
10044  * for this device. This count includes the ICRC which is
10045  * not part of the packet held in memory but it is appended
10046  * by the HW.
10047  * This is dependent on the device's receive header entry size.
10048  * HFI allows this to be set per-receive context, but the
10049  * driver presently enforces a global value.
10050  */
10051 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10052 {
10053         /*
10054          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10055          * the Receive Header Entry Size minus the PBC (or RHF) size
10056          * plus one DW for the ICRC appended by HW.
10057          *
10058          * dd->rcd[0].rcvhdrqentsize is in DW.
10059          * We use rcd[0] as all context will have the same value. Also,
10060          * the first kernel context would have been allocated by now so
10061          * we are guaranteed a valid value.
10062          */
10063         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10064 }
10065 
10066 /*
10067  * Set Send Length
10068  * @ppd - per port data
10069  *
10070  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10071  * registers compare against LRH.PktLen, so use the max bytes included
10072  * in the LRH.
10073  *
10074  * This routine changes all VL values except VL15, which it maintains at
10075  * the same value.
10076  */
10077 static void set_send_length(struct hfi1_pportdata *ppd)
10078 {
10079         struct hfi1_devdata *dd = ppd->dd;
10080         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10081         u32 maxvlmtu = dd->vld[15].mtu;
10082         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10083                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10084                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10085         int i, j;
10086         u32 thres;
10087 
10088         for (i = 0; i < ppd->vls_supported; i++) {
10089                 if (dd->vld[i].mtu > maxvlmtu)
10090                         maxvlmtu = dd->vld[i].mtu;
10091                 if (i <= 3)
10092                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10093                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10094                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10095                 else
10096                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10097                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10098                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10099         }
10100         write_csr(dd, SEND_LEN_CHECK0, len1);
10101         write_csr(dd, SEND_LEN_CHECK1, len2);
10102         /* adjust kernel credit return thresholds based on new MTUs */
10103         /* all kernel receive contexts have the same hdrqentsize */
10104         for (i = 0; i < ppd->vls_supported; i++) {
10105                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10106                             sc_mtu_to_threshold(dd->vld[i].sc,
10107                                                 dd->vld[i].mtu,
10108                                                 dd->rcd[0]->rcvhdrqentsize));
10109                 for (j = 0; j < INIT_SC_PER_VL; j++)
10110                         sc_set_cr_threshold(
10111                                         pio_select_send_context_vl(dd, j, i),
10112                                             thres);
10113         }
10114         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10115                     sc_mtu_to_threshold(dd->vld[15].sc,
10116                                         dd->vld[15].mtu,
10117                                         dd->rcd[0]->rcvhdrqentsize));
10118         sc_set_cr_threshold(dd->vld[15].sc, thres);
10119 
10120         /* Adjust maximum MTU for the port in DC */
10121         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10122                 (ilog2(maxvlmtu >> 8) + 1);
10123         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10124         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10125         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10126                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10127         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10128 }
10129 
10130 static void set_lidlmc(struct hfi1_pportdata *ppd)
10131 {
10132         int i;
10133         u64 sreg = 0;
10134         struct hfi1_devdata *dd = ppd->dd;
10135         u32 mask = ~((1U << ppd->lmc) - 1);
10136         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10137         u32 lid;
10138 
10139         /*
10140          * Program 0 in CSR if port lid is extended. This prevents
10141          * 9B packets being sent out for large lids.
10142          */
10143         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10144         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10145                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10146         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10147                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10148               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10149                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10150         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10151 
10152         /*
10153          * Iterate over all the send contexts and set their SLID check
10154          */
10155         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10156                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10157                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10158                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10159 
10160         for (i = 0; i < chip_send_contexts(dd); i++) {
10161                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10162                           i, (u32)sreg);
10163                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10164         }
10165 
10166         /* Now we have to do the same thing for the sdma engines */
10167         sdma_update_lmc(dd, mask, lid);
10168 }
10169 
10170 static const char *state_completed_string(u32 completed)
10171 {
10172         static const char * const state_completed[] = {
10173                 "EstablishComm",
10174                 "OptimizeEQ",
10175                 "VerifyCap"
10176         };
10177 
10178         if (completed < ARRAY_SIZE(state_completed))
10179                 return state_completed[completed];
10180 
10181         return "unknown";
10182 }
10183 
10184 static const char all_lanes_dead_timeout_expired[] =
10185         "All lanes were inactive – was the interconnect media removed?";
10186 static const char tx_out_of_policy[] =
10187         "Passing lanes on local port do not meet the local link width policy";
10188 static const char no_state_complete[] =
10189         "State timeout occurred before link partner completed the state";
10190 static const char * const state_complete_reasons[] = {
10191         [0x00] = "Reason unknown",
10192         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10193         [0x02] = "Link partner reported failure",
10194         [0x10] = "Unable to achieve frame sync on any lane",
10195         [0x11] =
10196           "Unable to find a common bit rate with the link partner",
10197         [0x12] =
10198           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10199         [0x13] =
10200           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10201         [0x14] = no_state_complete,
10202         [0x15] =
10203           "State timeout occurred before link partner identified equalization presets",
10204         [0x16] =
10205           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10206         [0x17] = tx_out_of_policy,
10207         [0x20] = all_lanes_dead_timeout_expired,
10208         [0x21] =
10209           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10210         [0x22] = no_state_complete,
10211         [0x23] =
10212           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10213         [0x24] = tx_out_of_policy,
10214         [0x30] = all_lanes_dead_timeout_expired,
10215         [0x31] =
10216           "State timeout occurred waiting for host to process received frames",
10217         [0x32] = no_state_complete,
10218         [0x33] =
10219           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10220         [0x34] = tx_out_of_policy,
10221         [0x35] = "Negotiated link width is mutually exclusive",
10222         [0x36] =
10223           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10224         [0x37] = "Unable to resolve secure data exchange",
10225 };
10226 
10227 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10228                                                      u32 code)
10229 {
10230         const char *str = NULL;
10231 
10232         if (code < ARRAY_SIZE(state_complete_reasons))
10233                 str = state_complete_reasons[code];
10234 
10235         if (str)
10236                 return str;
10237         return "Reserved";
10238 }
10239 
10240 /* describe the given last state complete frame */
10241 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10242                                   const char *prefix)
10243 {
10244         struct hfi1_devdata *dd = ppd->dd;
10245         u32 success;
10246         u32 state;
10247         u32 reason;
10248         u32 lanes;
10249 
10250         /*
10251          * Decode frame:
10252          *  [ 0: 0] - success
10253          *  [ 3: 1] - state
10254          *  [ 7: 4] - next state timeout
10255          *  [15: 8] - reason code
10256          *  [31:16] - lanes
10257          */
10258         success = frame & 0x1;
10259         state = (frame >> 1) & 0x7;
10260         reason = (frame >> 8) & 0xff;
10261         lanes = (frame >> 16) & 0xffff;
10262 
10263         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10264                    prefix, frame);
10265         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10266                    state_completed_string(state), state);
10267         dd_dev_err(dd, "    state successfully completed: %s\n",
10268                    success ? "yes" : "no");
10269         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10270                    reason, state_complete_reason_code_string(ppd, reason));
10271         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10272 }
10273 
10274 /*
10275  * Read the last state complete frames and explain them.  This routine
10276  * expects to be called if the link went down during link negotiation
10277  * and initialization (LNI).  That is, anywhere between polling and link up.
10278  */
10279 static void check_lni_states(struct hfi1_pportdata *ppd)
10280 {
10281         u32 last_local_state;
10282         u32 last_remote_state;
10283 
10284         read_last_local_state(ppd->dd, &last_local_state);
10285         read_last_remote_state(ppd->dd, &last_remote_state);
10286 
10287         /*
10288          * Don't report anything if there is nothing to report.  A value of
10289          * 0 means the link was taken down while polling and there was no
10290          * training in-process.
10291          */
10292         if (last_local_state == 0 && last_remote_state == 0)
10293                 return;
10294 
10295         decode_state_complete(ppd, last_local_state, "transmitted");
10296         decode_state_complete(ppd, last_remote_state, "received");
10297 }
10298 
10299 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10300 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10301 {
10302         u64 reg;
10303         unsigned long timeout;
10304 
10305         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10306         timeout = jiffies + msecs_to_jiffies(wait_ms);
10307         while (1) {
10308                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10309                 if (reg)
10310                         break;
10311                 if (time_after(jiffies, timeout)) {
10312                         dd_dev_err(dd,
10313                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10314                         return -ETIMEDOUT;
10315                 }
10316                 udelay(2);
10317         }
10318         return 0;
10319 }
10320 
10321 /* called when the logical link state is not down as it should be */
10322 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10323 {
10324         struct hfi1_devdata *dd = ppd->dd;
10325 
10326         /*
10327          * Bring link up in LCB loopback
10328          */
10329         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10330         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10331                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10332 
10333         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10334         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10335         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10336         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10337 
10338         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10339         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10340         udelay(3);
10341         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10342         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10343 
10344         wait_link_transfer_active(dd, 100);
10345 
10346         /*
10347          * Bring the link down again.
10348          */
10349         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10350         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10351         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10352 
10353         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10354 }
10355 
10356 /*
10357  * Helper for set_link_state().  Do not call except from that routine.
10358  * Expects ppd->hls_mutex to be held.
10359  *
10360  * @rem_reason value to be sent to the neighbor
10361  *
10362  * LinkDownReasons only set if transition succeeds.
10363  */
10364 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10365 {
10366         struct hfi1_devdata *dd = ppd->dd;
10367         u32 previous_state;
10368         int offline_state_ret;
10369         int ret;
10370 
10371         update_lcb_cache(dd);
10372 
10373         previous_state = ppd->host_link_state;
10374         ppd->host_link_state = HLS_GOING_OFFLINE;
10375 
10376         /* start offline transition */
10377         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10378 
10379         if (ret != HCMD_SUCCESS) {
10380                 dd_dev_err(dd,
10381                            "Failed to transition to Offline link state, return %d\n",
10382                            ret);
10383                 return -EINVAL;
10384         }
10385         if (ppd->offline_disabled_reason ==
10386                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10387                 ppd->offline_disabled_reason =
10388                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10389 
10390         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10391         if (offline_state_ret < 0)
10392                 return offline_state_ret;
10393 
10394         /* Disabling AOC transmitters */
10395         if (ppd->port_type == PORT_TYPE_QSFP &&
10396             ppd->qsfp_info.limiting_active &&
10397             qsfp_mod_present(ppd)) {
10398                 int ret;
10399 
10400                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10401                 if (ret == 0) {
10402                         set_qsfp_tx(ppd, 0);
10403                         release_chip_resource(dd, qsfp_resource(dd));
10404                 } else {
10405                         /* not fatal, but should warn */
10406                         dd_dev_err(dd,
10407                                    "Unable to acquire lock to turn off QSFP TX\n");
10408                 }
10409         }
10410 
10411         /*
10412          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10413          * can take a while for the link to go down.
10414          */
10415         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10416                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10417                 if (ret < 0)
10418                         return ret;
10419         }
10420 
10421         /*
10422          * Now in charge of LCB - must be after the physical state is
10423          * offline.quiet and before host_link_state is changed.
10424          */
10425         set_host_lcb_access(dd);
10426         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10427 
10428         /* make sure the logical state is also down */
10429         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10430         if (ret)
10431                 force_logical_link_state_down(ppd);
10432 
10433         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10434         update_statusp(ppd, IB_PORT_DOWN);
10435 
10436         /*
10437          * The LNI has a mandatory wait time after the physical state
10438          * moves to Offline.Quiet.  The wait time may be different
10439          * depending on how the link went down.  The 8051 firmware
10440          * will observe the needed wait time and only move to ready
10441          * when that is completed.  The largest of the quiet timeouts
10442          * is 6s, so wait that long and then at least 0.5s more for
10443          * other transitions, and another 0.5s for a buffer.
10444          */
10445         ret = wait_fm_ready(dd, 7000);
10446         if (ret) {
10447                 dd_dev_err(dd,
10448                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10449                 /* state is really offline, so make it so */
10450                 ppd->host_link_state = HLS_DN_OFFLINE;
10451                 return ret;
10452         }
10453 
10454         /*
10455          * The state is now offline and the 8051 is ready to accept host
10456          * requests.
10457          *      - change our state
10458          *      - notify others if we were previously in a linkup state
10459          */
10460         ppd->host_link_state = HLS_DN_OFFLINE;
10461         if (previous_state & HLS_UP) {
10462                 /* went down while link was up */
10463                 handle_linkup_change(dd, 0);
10464         } else if (previous_state
10465                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10466                 /* went down while attempting link up */
10467                 check_lni_states(ppd);
10468 
10469                 /* The QSFP doesn't need to be reset on LNI failure */
10470                 ppd->qsfp_info.reset_needed = 0;
10471         }
10472 
10473         /* the active link width (downgrade) is 0 on link down */
10474         ppd->link_width_active = 0;
10475         ppd->link_width_downgrade_tx_active = 0;
10476         ppd->link_width_downgrade_rx_active = 0;
10477         ppd->current_egress_rate = 0;
10478         return 0;
10479 }
10480 
10481 /* return the link state name */
10482 static const char *link_state_name(u32 state)
10483 {
10484         const char *name;
10485         int n = ilog2(state);
10486         static const char * const names[] = {
10487                 [__HLS_UP_INIT_BP]       = "INIT",
10488                 [__HLS_UP_ARMED_BP]      = "ARMED",
10489                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10490                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10491                 [__HLS_DN_POLL_BP]       = "POLL",
10492                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10493                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10494                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10495                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10496                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10497                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10498         };
10499 
10500         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10501         return name ? name : "unknown";
10502 }
10503 
10504 /* return the link state reason name */
10505 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10506 {
10507         if (state == HLS_UP_INIT) {
10508                 switch (ppd->linkinit_reason) {
10509                 case OPA_LINKINIT_REASON_LINKUP:
10510                         return "(LINKUP)";
10511                 case OPA_LINKINIT_REASON_FLAPPING:
10512                         return "(FLAPPING)";
10513                 case OPA_LINKINIT_OUTSIDE_POLICY:
10514                         return "(OUTSIDE_POLICY)";
10515                 case OPA_LINKINIT_QUARANTINED:
10516                         return "(QUARANTINED)";
10517                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10518                         return "(INSUFIC_CAPABILITY)";
10519                 default:
10520                         break;
10521                 }
10522         }
10523         return "";
10524 }
10525 
10526 /*
10527  * driver_pstate - convert the driver's notion of a port's
10528  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10529  * Return -1 (converted to a u32) to indicate error.
10530  */
10531 u32 driver_pstate(struct hfi1_pportdata *ppd)
10532 {
10533         switch (ppd->host_link_state) {
10534         case HLS_UP_INIT:
10535         case HLS_UP_ARMED:
10536         case HLS_UP_ACTIVE:
10537                 return IB_PORTPHYSSTATE_LINKUP;
10538         case HLS_DN_POLL:
10539                 return IB_PORTPHYSSTATE_POLLING;
10540         case HLS_DN_DISABLE:
10541                 return IB_PORTPHYSSTATE_DISABLED;
10542         case HLS_DN_OFFLINE:
10543                 return OPA_PORTPHYSSTATE_OFFLINE;
10544         case HLS_VERIFY_CAP:
10545                 return IB_PORTPHYSSTATE_TRAINING;
10546         case HLS_GOING_UP:
10547                 return IB_PORTPHYSSTATE_TRAINING;
10548         case HLS_GOING_OFFLINE:
10549                 return OPA_PORTPHYSSTATE_OFFLINE;
10550         case HLS_LINK_COOLDOWN:
10551                 return OPA_PORTPHYSSTATE_OFFLINE;
10552         case HLS_DN_DOWNDEF:
10553         default:
10554                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10555                            ppd->host_link_state);
10556                 return  -1;
10557         }
10558 }
10559 
10560 /*
10561  * driver_lstate - convert the driver's notion of a port's
10562  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10563  * (converted to a u32) to indicate error.
10564  */
10565 u32 driver_lstate(struct hfi1_pportdata *ppd)
10566 {
10567         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10568                 return IB_PORT_DOWN;
10569 
10570         switch (ppd->host_link_state & HLS_UP) {
10571         case HLS_UP_INIT:
10572                 return IB_PORT_INIT;
10573         case HLS_UP_ARMED:
10574                 return IB_PORT_ARMED;
10575         case HLS_UP_ACTIVE:
10576                 return IB_PORT_ACTIVE;
10577         default:
10578                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10579                            ppd->host_link_state);
10580         return -1;
10581         }
10582 }
10583 
10584 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10585                           u8 neigh_reason, u8 rem_reason)
10586 {
10587         if (ppd->local_link_down_reason.latest == 0 &&
10588             ppd->neigh_link_down_reason.latest == 0) {
10589                 ppd->local_link_down_reason.latest = lcl_reason;
10590                 ppd->neigh_link_down_reason.latest = neigh_reason;
10591                 ppd->remote_link_down_reason = rem_reason;
10592         }
10593 }
10594 
10595 /**
10596  * data_vls_operational() - Verify if data VL BCT credits and MTU
10597  *                          are both set.
10598  * @ppd: pointer to hfi1_pportdata structure
10599  *
10600  * Return: true - Ok, false -otherwise.
10601  */
10602 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10603 {
10604         int i;
10605         u64 reg;
10606 
10607         if (!ppd->actual_vls_operational)
10608                 return false;
10609 
10610         for (i = 0; i < ppd->vls_supported; i++) {
10611                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10612                 if ((reg && !ppd->dd->vld[i].mtu) ||
10613                     (!reg && ppd->dd->vld[i].mtu))
10614                         return false;
10615         }
10616 
10617         return true;
10618 }
10619 
10620 /*
10621  * Change the physical and/or logical link state.
10622  *
10623  * Do not call this routine while inside an interrupt.  It contains
10624  * calls to routines that can take multiple seconds to finish.
10625  *
10626  * Returns 0 on success, -errno on failure.
10627  */
10628 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10629 {
10630         struct hfi1_devdata *dd = ppd->dd;
10631         struct ib_event event = {.device = NULL};
10632         int ret1, ret = 0;
10633         int orig_new_state, poll_bounce;
10634 
10635         mutex_lock(&ppd->hls_lock);
10636 
10637         orig_new_state = state;
10638         if (state == HLS_DN_DOWNDEF)
10639                 state = HLS_DEFAULT;
10640 
10641         /* interpret poll -> poll as a link bounce */
10642         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10643                       state == HLS_DN_POLL;
10644 
10645         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10646                     link_state_name(ppd->host_link_state),
10647                     link_state_name(orig_new_state),
10648                     poll_bounce ? "(bounce) " : "",
10649                     link_state_reason_name(ppd, state));
10650 
10651         /*
10652          * If we're going to a (HLS_*) link state that implies the logical
10653          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10654          * reset is_sm_config_started to 0.
10655          */
10656         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10657                 ppd->is_sm_config_started = 0;
10658 
10659         /*
10660          * Do nothing if the states match.  Let a poll to poll link bounce
10661          * go through.
10662          */
10663         if (ppd->host_link_state == state && !poll_bounce)
10664                 goto done;
10665 
10666         switch (state) {
10667         case HLS_UP_INIT:
10668                 if (ppd->host_link_state == HLS_DN_POLL &&
10669                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10670                         /*
10671                          * Quick link up jumps from polling to here.
10672                          *
10673                          * Whether in normal or loopback mode, the
10674                          * simulator jumps from polling to link up.
10675                          * Accept that here.
10676                          */
10677                         /* OK */
10678                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10679                         goto unexpected;
10680                 }
10681 
10682                 /*
10683                  * Wait for Link_Up physical state.
10684                  * Physical and Logical states should already be
10685                  * be transitioned to LinkUp and LinkInit respectively.
10686                  */
10687                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10688                 if (ret) {
10689                         dd_dev_err(dd,
10690                                    "%s: physical state did not change to LINK-UP\n",
10691                                    __func__);
10692                         break;
10693                 }
10694 
10695                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10696                 if (ret) {
10697                         dd_dev_err(dd,
10698                                    "%s: logical state did not change to INIT\n",
10699                                    __func__);
10700                         break;
10701                 }
10702 
10703                 /* clear old transient LINKINIT_REASON code */
10704                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10705                         ppd->linkinit_reason =
10706                                 OPA_LINKINIT_REASON_LINKUP;
10707 
10708                 /* enable the port */
10709                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10710 
10711                 handle_linkup_change(dd, 1);
10712                 pio_kernel_linkup(dd);
10713 
10714                 /*
10715                  * After link up, a new link width will have been set.
10716                  * Update the xmit counters with regards to the new
10717                  * link width.
10718                  */
10719                 update_xmit_counters(ppd, ppd->link_width_active);
10720 
10721                 ppd->host_link_state = HLS_UP_INIT;
10722                 update_statusp(ppd, IB_PORT_INIT);
10723                 break;
10724         case HLS_UP_ARMED:
10725                 if (ppd->host_link_state != HLS_UP_INIT)
10726                         goto unexpected;
10727 
10728                 if (!data_vls_operational(ppd)) {
10729                         dd_dev_err(dd,
10730                                    "%s: Invalid data VL credits or mtu\n",
10731                                    __func__);
10732                         ret = -EINVAL;
10733                         break;
10734                 }
10735 
10736                 set_logical_state(dd, LSTATE_ARMED);
10737                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10738                 if (ret) {
10739                         dd_dev_err(dd,
10740                                    "%s: logical state did not change to ARMED\n",
10741                                    __func__);
10742                         break;
10743                 }
10744                 ppd->host_link_state = HLS_UP_ARMED;
10745                 update_statusp(ppd, IB_PORT_ARMED);
10746                 /*
10747                  * The simulator does not currently implement SMA messages,
10748                  * so neighbor_normal is not set.  Set it here when we first
10749                  * move to Armed.
10750                  */
10751                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10752                         ppd->neighbor_normal = 1;
10753                 break;
10754         case HLS_UP_ACTIVE:
10755                 if (ppd->host_link_state != HLS_UP_ARMED)
10756                         goto unexpected;
10757 
10758                 set_logical_state(dd, LSTATE_ACTIVE);
10759                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10760                 if (ret) {
10761                         dd_dev_err(dd,
10762                                    "%s: logical state did not change to ACTIVE\n",
10763                                    __func__);
10764                 } else {
10765                         /* tell all engines to go running */
10766                         sdma_all_running(dd);
10767                         ppd->host_link_state = HLS_UP_ACTIVE;
10768                         update_statusp(ppd, IB_PORT_ACTIVE);
10769 
10770                         /* Signal the IB layer that the port has went active */
10771                         event.device = &dd->verbs_dev.rdi.ibdev;
10772                         event.element.port_num = ppd->port;
10773                         event.event = IB_EVENT_PORT_ACTIVE;
10774                 }
10775                 break;
10776         case HLS_DN_POLL:
10777                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10778                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10779                     dd->dc_shutdown)
10780                         dc_start(dd);
10781                 /* Hand LED control to the DC */
10782                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10783 
10784                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10785                         u8 tmp = ppd->link_enabled;
10786 
10787                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10788                         if (ret) {
10789                                 ppd->link_enabled = tmp;
10790                                 break;
10791                         }
10792                         ppd->remote_link_down_reason = 0;
10793 
10794                         if (ppd->driver_link_ready)
10795                                 ppd->link_enabled = 1;
10796                 }
10797 
10798                 set_all_slowpath(ppd->dd);
10799                 ret = set_local_link_attributes(ppd);
10800                 if (ret)
10801                         break;
10802 
10803                 ppd->port_error_action = 0;
10804 
10805                 if (quick_linkup) {
10806                         /* quick linkup does not go into polling */
10807                         ret = do_quick_linkup(dd);
10808                 } else {
10809                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10810                         if (!ret1)
10811                                 ret1 = wait_phys_link_out_of_offline(ppd,
10812                                                                      3000);
10813                         if (ret1 != HCMD_SUCCESS) {
10814                                 dd_dev_err(dd,
10815                                            "Failed to transition to Polling link state, return 0x%x\n",
10816                                            ret1);
10817                                 ret = -EINVAL;
10818                         }
10819                 }
10820 
10821                 /*
10822                  * Change the host link state after requesting DC8051 to
10823                  * change its physical state so that we can ignore any
10824                  * interrupt with stale LNI(XX) error, which will not be
10825                  * cleared until DC8051 transitions to Polling state.
10826                  */
10827                 ppd->host_link_state = HLS_DN_POLL;
10828                 ppd->offline_disabled_reason =
10829                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10830                 /*
10831                  * If an error occurred above, go back to offline.  The
10832                  * caller may reschedule another attempt.
10833                  */
10834                 if (ret)
10835                         goto_offline(ppd, 0);
10836                 else
10837                         log_physical_state(ppd, PLS_POLLING);
10838                 break;
10839         case HLS_DN_DISABLE:
10840                 /* link is disabled */
10841                 ppd->link_enabled = 0;
10842 
10843                 /* allow any state to transition to disabled */
10844 
10845                 /* must transition to offline first */
10846                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10847                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10848                         if (ret)
10849                                 break;
10850                         ppd->remote_link_down_reason = 0;
10851                 }
10852 
10853                 if (!dd->dc_shutdown) {
10854                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10855                         if (ret1 != HCMD_SUCCESS) {
10856                                 dd_dev_err(dd,
10857                                            "Failed to transition to Disabled link state, return 0x%x\n",
10858                                            ret1);
10859                                 ret = -EINVAL;
10860                                 break;
10861                         }
10862                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10863                         if (ret) {
10864                                 dd_dev_err(dd,
10865                                            "%s: physical state did not change to DISABLED\n",
10866                                            __func__);
10867                                 break;
10868                         }
10869                         dc_shutdown(dd);
10870                 }
10871                 ppd->host_link_state = HLS_DN_DISABLE;
10872                 break;
10873         case HLS_DN_OFFLINE:
10874                 if (ppd->host_link_state == HLS_DN_DISABLE)
10875                         dc_start(dd);
10876 
10877                 /* allow any state to transition to offline */
10878                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10879                 if (!ret)
10880                         ppd->remote_link_down_reason = 0;
10881                 break;
10882         case HLS_VERIFY_CAP:
10883                 if (ppd->host_link_state != HLS_DN_POLL)
10884                         goto unexpected;
10885                 ppd->host_link_state = HLS_VERIFY_CAP;
10886                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10887                 break;
10888         case HLS_GOING_UP:
10889                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10890                         goto unexpected;
10891 
10892                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10893                 if (ret1 != HCMD_SUCCESS) {
10894                         dd_dev_err(dd,
10895                                    "Failed to transition to link up state, return 0x%x\n",
10896                                    ret1);
10897                         ret = -EINVAL;
10898                         break;
10899                 }
10900                 ppd->host_link_state = HLS_GOING_UP;
10901                 break;
10902 
10903         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10904         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10905         default:
10906                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10907                             __func__, state);
10908                 ret = -EINVAL;
10909                 break;
10910         }
10911 
10912         goto done;
10913 
10914 unexpected:
10915         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10916                    __func__, link_state_name(ppd->host_link_state),
10917                    link_state_name(state));
10918         ret = -EINVAL;
10919 
10920 done:
10921         mutex_unlock(&ppd->hls_lock);
10922 
10923         if (event.device)
10924                 ib_dispatch_event(&event);
10925 
10926         return ret;
10927 }
10928 
10929 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10930 {
10931         u64 reg;
10932         int ret = 0;
10933 
10934         switch (which) {
10935         case HFI1_IB_CFG_LIDLMC:
10936                 set_lidlmc(ppd);
10937                 break;
10938         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10939                 /*
10940                  * The VL Arbitrator high limit is sent in units of 4k
10941                  * bytes, while HFI stores it in units of 64 bytes.
10942                  */
10943                 val *= 4096 / 64;
10944                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10945                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10946                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10947                 break;
10948         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10949                 /* HFI only supports POLL as the default link down state */
10950                 if (val != HLS_DN_POLL)
10951                         ret = -EINVAL;
10952                 break;
10953         case HFI1_IB_CFG_OP_VLS:
10954                 if (ppd->vls_operational != val) {
10955                         ppd->vls_operational = val;
10956                         if (!ppd->port)
10957                                 ret = -EINVAL;
10958                 }
10959                 break;
10960         /*
10961          * For link width, link width downgrade, and speed enable, always AND
10962          * the setting with what is actually supported.  This has two benefits.
10963          * First, enabled can't have unsupported values, no matter what the
10964          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10965          * "fill in with your supported value" have all the bits in the
10966          * field set, so simply ANDing with supported has the desired result.
10967          */
10968         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10969                 ppd->link_width_enabled = val & ppd->link_width_supported;
10970                 break;
10971         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10972                 ppd->link_width_downgrade_enabled =
10973                                 val & ppd->link_width_downgrade_supported;
10974                 break;
10975         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10976                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10977                 break;
10978         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10979                 /*
10980                  * HFI does not follow IB specs, save this value
10981                  * so we can report it, if asked.
10982                  */
10983                 ppd->overrun_threshold = val;
10984                 break;
10985         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10986                 /*
10987                  * HFI does not follow IB specs, save this value
10988                  * so we can report it, if asked.
10989                  */
10990                 ppd->phy_error_threshold = val;
10991                 break;
10992 
10993         case HFI1_IB_CFG_MTU:
10994                 set_send_length(ppd);
10995                 break;
10996 
10997         case HFI1_IB_CFG_PKEYS:
10998                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10999                         set_partition_keys(ppd);
11000                 break;
11001 
11002         default:
11003                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11004                         dd_dev_info(ppd->dd,
11005                                     "%s: which %s, val 0x%x: not implemented\n",
11006                                     __func__, ib_cfg_name(which), val);
11007                 break;
11008         }
11009         return ret;
11010 }
11011 
11012 /* begin functions related to vl arbitration table caching */
11013 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11014 {
11015         int i;
11016 
11017         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11018                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11019         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11020                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11021 
11022         /*
11023          * Note that we always return values directly from the
11024          * 'vl_arb_cache' (and do no CSR reads) in response to a
11025          * 'Get(VLArbTable)'. This is obviously correct after a
11026          * 'Set(VLArbTable)', since the cache will then be up to
11027          * date. But it's also correct prior to any 'Set(VLArbTable)'
11028          * since then both the cache, and the relevant h/w registers
11029          * will be zeroed.
11030          */
11031 
11032         for (i = 0; i < MAX_PRIO_TABLE; i++)
11033                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11034 }
11035 
11036 /*
11037  * vl_arb_lock_cache
11038  *
11039  * All other vl_arb_* functions should be called only after locking
11040  * the cache.
11041  */
11042 static inline struct vl_arb_cache *
11043 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11044 {
11045         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11046                 return NULL;
11047         spin_lock(&ppd->vl_arb_cache[idx].lock);
11048         return &ppd->vl_arb_cache[idx];
11049 }
11050 
11051 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11052 {
11053         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11054 }
11055 
11056 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11057                              struct ib_vl_weight_elem *vl)
11058 {
11059         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11060 }
11061 
11062 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11063                              struct ib_vl_weight_elem *vl)
11064 {
11065         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11066 }
11067 
11068 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11069                               struct ib_vl_weight_elem *vl)
11070 {
11071         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11072 }
11073 
11074 /* end functions related to vl arbitration table caching */
11075 
11076 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11077                           u32 size, struct ib_vl_weight_elem *vl)
11078 {
11079         struct hfi1_devdata *dd = ppd->dd;
11080         u64 reg;
11081         unsigned int i, is_up = 0;
11082         int drain, ret = 0;
11083 
11084         mutex_lock(&ppd->hls_lock);
11085 
11086         if (ppd->host_link_state & HLS_UP)
11087                 is_up = 1;
11088 
11089         drain = !is_ax(dd) && is_up;
11090 
11091         if (drain)
11092                 /*
11093                  * Before adjusting VL arbitration weights, empty per-VL
11094                  * FIFOs, otherwise a packet whose VL weight is being
11095                  * set to 0 could get stuck in a FIFO with no chance to
11096                  * egress.
11097                  */
11098                 ret = stop_drain_data_vls(dd);
11099 
11100         if (ret) {
11101                 dd_dev_err(
11102                         dd,
11103                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11104                         __func__);
11105                 goto err;
11106         }
11107 
11108         for (i = 0; i < size; i++, vl++) {
11109                 /*
11110                  * NOTE: The low priority shift and mask are used here, but
11111                  * they are the same for both the low and high registers.
11112                  */
11113                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11114                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11115                       | (((u64)vl->weight
11116                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11117                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11118                 write_csr(dd, target + (i * 8), reg);
11119         }
11120         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11121 
11122         if (drain)
11123                 open_fill_data_vls(dd); /* reopen all VLs */
11124 
11125 err:
11126         mutex_unlock(&ppd->hls_lock);
11127 
11128         return ret;
11129 }
11130 
11131 /*
11132  * Read one credit merge VL register.
11133  */
11134 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11135                            struct vl_limit *vll)
11136 {
11137         u64 reg = read_csr(dd, csr);
11138 
11139         vll->dedicated = cpu_to_be16(
11140                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11141                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11142         vll->shared = cpu_to_be16(
11143                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11144                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11145 }
11146 
11147 /*
11148  * Read the current credit merge limits.
11149  */
11150 static int get_buffer_control(struct hfi1_devdata *dd,
11151                               struct buffer_control *bc, u16 *overall_limit)
11152 {
11153         u64 reg;
11154         int i;
11155 
11156         /* not all entries are filled in */
11157         memset(bc, 0, sizeof(*bc));
11158 
11159         /* OPA and HFI have a 1-1 mapping */
11160         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11161                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11162 
11163         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11164         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11165 
11166         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11167         bc->overall_shared_limit = cpu_to_be16(
11168                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11169                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11170         if (overall_limit)
11171                 *overall_limit = (reg
11172                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11173                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11174         return sizeof(struct buffer_control);
11175 }
11176 
11177 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11178 {
11179         u64 reg;
11180         int i;
11181 
11182         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11183         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11184         for (i = 0; i < sizeof(u64); i++) {
11185                 u8 byte = *(((u8 *)&reg) + i);
11186 
11187                 dp->vlnt[2 * i] = byte & 0xf;
11188                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11189         }
11190 
11191         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11192         for (i = 0; i < sizeof(u64); i++) {
11193                 u8 byte = *(((u8 *)&reg) + i);
11194 
11195                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11196                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11197         }
11198         return sizeof(struct sc2vlnt);
11199 }
11200 
11201 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11202                               struct ib_vl_weight_elem *vl)
11203 {
11204         unsigned int i;
11205 
11206         for (i = 0; i < nelems; i++, vl++) {
11207                 vl->vl = 0xf;
11208                 vl->weight = 0;
11209         }
11210 }
11211 
11212 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11213 {
11214         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11215                   DC_SC_VL_VAL(15_0,
11216                                0, dp->vlnt[0] & 0xf,
11217                                1, dp->vlnt[1] & 0xf,
11218                                2, dp->vlnt[2] & 0xf,
11219                                3, dp->vlnt[3] & 0xf,
11220                                4, dp->vlnt[4] & 0xf,
11221                                5, dp->vlnt[5] & 0xf,
11222                                6, dp->vlnt[6] & 0xf,
11223                                7, dp->vlnt[7] & 0xf,
11224                                8, dp->vlnt[8] & 0xf,
11225                                9, dp->vlnt[9] & 0xf,
11226                                10, dp->vlnt[10] & 0xf,
11227                                11, dp->vlnt[11] & 0xf,
11228                                12, dp->vlnt[12] & 0xf,
11229                                13, dp->vlnt[13] & 0xf,
11230                                14, dp->vlnt[14] & 0xf,
11231                                15, dp->vlnt[15] & 0xf));
11232         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11233                   DC_SC_VL_VAL(31_16,
11234                                16, dp->vlnt[16] & 0xf,
11235                                17, dp->vlnt[17] & 0xf,
11236                                18, dp->vlnt[18] & 0xf,
11237                                19, dp->vlnt[19] & 0xf,
11238                                20, dp->vlnt[20] & 0xf,
11239                                21, dp->vlnt[21] & 0xf,
11240                                22, dp->vlnt[22] & 0xf,
11241                                23, dp->vlnt[23] & 0xf,
11242                                24, dp->vlnt[24] & 0xf,
11243                                25, dp->vlnt[25] & 0xf,
11244                                26, dp->vlnt[26] & 0xf,
11245                                27, dp->vlnt[27] & 0xf,
11246                                28, dp->vlnt[28] & 0xf,
11247                                29, dp->vlnt[29] & 0xf,
11248                                30, dp->vlnt[30] & 0xf,
11249                                31, dp->vlnt[31] & 0xf));
11250 }
11251 
11252 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11253                         u16 limit)
11254 {
11255         if (limit != 0)
11256                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11257                             what, (int)limit, idx);
11258 }
11259 
11260 /* change only the shared limit portion of SendCmGLobalCredit */
11261 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11262 {
11263         u64 reg;
11264 
11265         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11266         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11267         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11268         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11269 }
11270 
11271 /* change only the total credit limit portion of SendCmGLobalCredit */
11272 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11273 {
11274         u64 reg;
11275 
11276         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11277         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11278         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11279         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11280 }
11281 
11282 /* set the given per-VL shared limit */
11283 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11284 {
11285         u64 reg;
11286         u32 addr;
11287 
11288         if (vl < TXE_NUM_DATA_VL)
11289                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11290         else
11291                 addr = SEND_CM_CREDIT_VL15;
11292 
11293         reg = read_csr(dd, addr);
11294         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11295         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11296         write_csr(dd, addr, reg);
11297 }
11298 
11299 /* set the given per-VL dedicated limit */
11300 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11301 {
11302         u64 reg;
11303         u32 addr;
11304 
11305         if (vl < TXE_NUM_DATA_VL)
11306                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11307         else
11308                 addr = SEND_CM_CREDIT_VL15;
11309 
11310         reg = read_csr(dd, addr);
11311         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11312         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11313         write_csr(dd, addr, reg);
11314 }
11315 
11316 /* spin until the given per-VL status mask bits clear */
11317 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11318                                      const char *which)
11319 {
11320         unsigned long timeout;
11321         u64 reg;
11322 
11323         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11324         while (1) {
11325                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11326 
11327                 if (reg == 0)
11328                         return; /* success */
11329                 if (time_after(jiffies, timeout))
11330                         break;          /* timed out */
11331                 udelay(1);
11332         }
11333 
11334         dd_dev_err(dd,
11335                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11336                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11337         /*
11338          * If this occurs, it is likely there was a credit loss on the link.
11339          * The only recovery from that is a link bounce.
11340          */
11341         dd_dev_err(dd,
11342                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11343 }
11344 
11345 /*
11346  * The number of credits on the VLs may be changed while everything
11347  * is "live", but the following algorithm must be followed due to
11348  * how the hardware is actually implemented.  In particular,
11349  * Return_Credit_Status[] is the only correct status check.
11350  *
11351  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11352  *     set Global_Shared_Credit_Limit = 0
11353  *     use_all_vl = 1
11354  * mask0 = all VLs that are changing either dedicated or shared limits
11355  * set Shared_Limit[mask0] = 0
11356  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11357  * if (changing any dedicated limit)
11358  *     mask1 = all VLs that are lowering dedicated limits
11359  *     lower Dedicated_Limit[mask1]
11360  *     spin until Return_Credit_Status[mask1] == 0
11361  *     raise Dedicated_Limits
11362  * raise Shared_Limits
11363  * raise Global_Shared_Credit_Limit
11364  *
11365  * lower = if the new limit is lower, set the limit to the new value
11366  * raise = if the new limit is higher than the current value (may be changed
11367  *      earlier in the algorithm), set the new limit to the new value
11368  */
11369 int set_buffer_control(struct hfi1_pportdata *ppd,
11370                        struct buffer_control *new_bc)
11371 {
11372         struct hfi1_devdata *dd = ppd->dd;
11373         u64 changing_mask, ld_mask, stat_mask;
11374         int change_count;
11375         int i, use_all_mask;
11376         int this_shared_changing;
11377         int vl_count = 0, ret;
11378         /*
11379          * A0: add the variable any_shared_limit_changing below and in the
11380          * algorithm above.  If removing A0 support, it can be removed.
11381          */
11382         int any_shared_limit_changing;
11383         struct buffer_control cur_bc;
11384         u8 changing[OPA_MAX_VLS];
11385         u8 lowering_dedicated[OPA_MAX_VLS];
11386         u16 cur_total;
11387         u32 new_total = 0;
11388         const u64 all_mask =
11389         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11390          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11391          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11392          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11393          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11394          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11395          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11396          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11397          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11398 
11399 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11400 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11401 
11402         /* find the new total credits, do sanity check on unused VLs */
11403         for (i = 0; i < OPA_MAX_VLS; i++) {
11404                 if (valid_vl(i)) {
11405                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11406                         continue;
11407                 }
11408                 nonzero_msg(dd, i, "dedicated",
11409                             be16_to_cpu(new_bc->vl[i].dedicated));
11410                 nonzero_msg(dd, i, "shared",
11411                             be16_to_cpu(new_bc->vl[i].shared));
11412                 new_bc->vl[i].dedicated = 0;
11413                 new_bc->vl[i].shared = 0;
11414         }
11415         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11416 
11417         /* fetch the current values */
11418         get_buffer_control(dd, &cur_bc, &cur_total);
11419 
11420         /*
11421          * Create the masks we will use.
11422          */
11423         memset(changing, 0, sizeof(changing));
11424         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11425         /*
11426          * NOTE: Assumes that the individual VL bits are adjacent and in
11427          * increasing order
11428          */
11429         stat_mask =
11430                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11431         changing_mask = 0;
11432         ld_mask = 0;
11433         change_count = 0;
11434         any_shared_limit_changing = 0;
11435         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11436                 if (!valid_vl(i))
11437                         continue;
11438                 this_shared_changing = new_bc->vl[i].shared
11439                                                 != cur_bc.vl[i].shared;
11440                 if (this_shared_changing)
11441                         any_shared_limit_changing = 1;
11442                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11443                     this_shared_changing) {
11444                         changing[i] = 1;
11445                         changing_mask |= stat_mask;
11446                         change_count++;
11447                 }
11448                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11449                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11450                         lowering_dedicated[i] = 1;
11451                         ld_mask |= stat_mask;
11452                 }
11453         }
11454 
11455         /* bracket the credit change with a total adjustment */
11456         if (new_total > cur_total)
11457                 set_global_limit(dd, new_total);
11458 
11459         /*
11460          * Start the credit change algorithm.
11461          */
11462         use_all_mask = 0;
11463         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11464              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11465             (is_ax(dd) && any_shared_limit_changing)) {
11466                 set_global_shared(dd, 0);
11467                 cur_bc.overall_shared_limit = 0;
11468                 use_all_mask = 1;
11469         }
11470 
11471         for (i = 0; i < NUM_USABLE_VLS; i++) {
11472                 if (!valid_vl(i))
11473                         continue;
11474 
11475                 if (changing[i]) {
11476                         set_vl_shared(dd, i, 0);
11477                         cur_bc.vl[i].shared = 0;
11478                 }
11479         }
11480 
11481         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11482                                  "shared");
11483 
11484         if (change_count > 0) {
11485                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11486                         if (!valid_vl(i))
11487                                 continue;
11488 
11489                         if (lowering_dedicated[i]) {
11490                                 set_vl_dedicated(dd, i,
11491                                                  be16_to_cpu(new_bc->
11492                                                              vl[i].dedicated));
11493                                 cur_bc.vl[i].dedicated =
11494                                                 new_bc->vl[i].dedicated;
11495                         }
11496                 }
11497 
11498                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11499 
11500                 /* now raise all dedicated that are going up */
11501                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11502                         if (!valid_vl(i))
11503                                 continue;
11504 
11505                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11506                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11507                                 set_vl_dedicated(dd, i,
11508                                                  be16_to_cpu(new_bc->
11509                                                              vl[i].dedicated));
11510                 }
11511         }
11512 
11513         /* next raise all shared that are going up */
11514         for (i = 0; i < NUM_USABLE_VLS; i++) {
11515                 if (!valid_vl(i))
11516                         continue;
11517 
11518                 if (be16_to_cpu(new_bc->vl[i].shared) >
11519                                 be16_to_cpu(cur_bc.vl[i].shared))
11520                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11521         }
11522 
11523         /* finally raise the global shared */
11524         if (be16_to_cpu(new_bc->overall_shared_limit) >
11525             be16_to_cpu(cur_bc.overall_shared_limit))
11526                 set_global_shared(dd,
11527                                   be16_to_cpu(new_bc->overall_shared_limit));
11528 
11529         /* bracket the credit change with a total adjustment */
11530         if (new_total < cur_total)
11531                 set_global_limit(dd, new_total);
11532 
11533         /*
11534          * Determine the actual number of operational VLS using the number of
11535          * dedicated and shared credits for each VL.
11536          */
11537         if (change_count > 0) {
11538                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11539                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11540                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11541                                 vl_count++;
11542                 ppd->actual_vls_operational = vl_count;
11543                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11544                                     ppd->actual_vls_operational :
11545                                     ppd->vls_operational,
11546                                     NULL);
11547                 if (ret == 0)
11548                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11549                                            ppd->actual_vls_operational :
11550                                            ppd->vls_operational, NULL);
11551                 if (ret)
11552                         return ret;
11553         }
11554         return 0;
11555 }
11556 
11557 /*
11558  * Read the given fabric manager table. Return the size of the
11559  * table (in bytes) on success, and a negative error code on
11560  * failure.
11561  */
11562 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11563 
11564 {
11565         int size;
11566         struct vl_arb_cache *vlc;
11567 
11568         switch (which) {
11569         case FM_TBL_VL_HIGH_ARB:
11570                 size = 256;
11571                 /*
11572                  * OPA specifies 128 elements (of 2 bytes each), though
11573                  * HFI supports only 16 elements in h/w.
11574                  */
11575                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11576                 vl_arb_get_cache(vlc, t);
11577                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11578                 break;
11579         case FM_TBL_VL_LOW_ARB:
11580                 size = 256;
11581                 /*
11582                  * OPA specifies 128 elements (of 2 bytes each), though
11583                  * HFI supports only 16 elements in h/w.
11584                  */
11585                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11586                 vl_arb_get_cache(vlc, t);
11587                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11588                 break;
11589         case FM_TBL_BUFFER_CONTROL:
11590                 size = get_buffer_control(ppd->dd, t, NULL);
11591                 break;
11592         case FM_TBL_SC2VLNT:
11593                 size = get_sc2vlnt(ppd->dd, t);
11594                 break;
11595         case FM_TBL_VL_PREEMPT_ELEMS:
11596                 size = 256;
11597                 /* OPA specifies 128 elements, of 2 bytes each */
11598                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11599                 break;
11600         case FM_TBL_VL_PREEMPT_MATRIX:
11601                 size = 256;
11602                 /*
11603                  * OPA specifies that this is the same size as the VL
11604                  * arbitration tables (i.e., 256 bytes).
11605                  */
11606                 break;
11607         default:
11608                 return -EINVAL;
11609         }
11610         return size;
11611 }
11612 
11613 /*
11614  * Write the given fabric manager table.
11615  */
11616 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11617 {
11618         int ret = 0;
11619         struct vl_arb_cache *vlc;
11620 
11621         switch (which) {
11622         case FM_TBL_VL_HIGH_ARB:
11623                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11624                 if (vl_arb_match_cache(vlc, t)) {
11625                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11626                         break;
11627                 }
11628                 vl_arb_set_cache(vlc, t);
11629                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11630                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11631                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11632                 break;
11633         case FM_TBL_VL_LOW_ARB:
11634                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11635                 if (vl_arb_match_cache(vlc, t)) {
11636                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11637                         break;
11638                 }
11639                 vl_arb_set_cache(vlc, t);
11640                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11641                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11642                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11643                 break;
11644         case FM_TBL_BUFFER_CONTROL:
11645                 ret = set_buffer_control(ppd, t);
11646                 break;
11647         case FM_TBL_SC2VLNT:
11648                 set_sc2vlnt(ppd->dd, t);
11649                 break;
11650         default:
11651                 ret = -EINVAL;
11652         }
11653         return ret;
11654 }
11655 
11656 /*
11657  * Disable all data VLs.
11658  *
11659  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11660  */
11661 static int disable_data_vls(struct hfi1_devdata *dd)
11662 {
11663         if (is_ax(dd))
11664                 return 1;
11665 
11666         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11667 
11668         return 0;
11669 }
11670 
11671 /*
11672  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11673  * Just re-enables all data VLs (the "fill" part happens
11674  * automatically - the name was chosen for symmetry with
11675  * stop_drain_data_vls()).
11676  *
11677  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11678  */
11679 int open_fill_data_vls(struct hfi1_devdata *dd)
11680 {
11681         if (is_ax(dd))
11682                 return 1;
11683 
11684         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11685 
11686         return 0;
11687 }
11688 
11689 /*
11690  * drain_data_vls() - assumes that disable_data_vls() has been called,
11691  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11692  * engines to drop to 0.
11693  */
11694 static void drain_data_vls(struct hfi1_devdata *dd)
11695 {
11696         sc_wait(dd);
11697         sdma_wait(dd);
11698         pause_for_credit_return(dd);
11699 }
11700 
11701 /*
11702  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11703  *
11704  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11705  * meant to be used like this:
11706  *
11707  * stop_drain_data_vls(dd);
11708  * // do things with per-VL resources
11709  * open_fill_data_vls(dd);
11710  */
11711 int stop_drain_data_vls(struct hfi1_devdata *dd)
11712 {
11713         int ret;
11714 
11715         ret = disable_data_vls(dd);
11716         if (ret == 0)
11717                 drain_data_vls(dd);
11718 
11719         return ret;
11720 }
11721 
11722 /*
11723  * Convert a nanosecond time to a cclock count.  No matter how slow
11724  * the cclock, a non-zero ns will always have a non-zero result.
11725  */
11726 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11727 {
11728         u32 cclocks;
11729 
11730         if (dd->icode == ICODE_FPGA_EMULATION)
11731                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11732         else  /* simulation pretends to be ASIC */
11733                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11734         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11735                 cclocks = 1;
11736         return cclocks;
11737 }
11738 
11739 /*
11740  * Convert a cclock count to nanoseconds. Not matter how slow
11741  * the cclock, a non-zero cclocks will always have a non-zero result.
11742  */
11743 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11744 {
11745         u32 ns;
11746 
11747         if (dd->icode == ICODE_FPGA_EMULATION)
11748                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11749         else  /* simulation pretends to be ASIC */
11750                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11751         if (cclocks && !ns)
11752                 ns = 1;
11753         return ns;
11754 }
11755 
11756 /*
11757  * Dynamically adjust the receive interrupt timeout for a context based on
11758  * incoming packet rate.
11759  *
11760  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11761  */
11762 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11763 {
11764         struct hfi1_devdata *dd = rcd->dd;
11765         u32 timeout = rcd->rcvavail_timeout;
11766 
11767         /*
11768          * This algorithm doubles or halves the timeout depending on whether
11769          * the number of packets received in this interrupt were less than or
11770          * greater equal the interrupt count.
11771          *
11772          * The calculations below do not allow a steady state to be achieved.
11773          * Only at the endpoints it is possible to have an unchanging
11774          * timeout.
11775          */
11776         if (npkts < rcv_intr_count) {
11777                 /*
11778                  * Not enough packets arrived before the timeout, adjust
11779                  * timeout downward.
11780                  */
11781                 if (timeout < 2) /* already at minimum? */
11782                         return;
11783                 timeout >>= 1;
11784         } else {
11785                 /*
11786                  * More than enough packets arrived before the timeout, adjust
11787                  * timeout upward.
11788                  */
11789                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11790                         return;
11791                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11792         }
11793 
11794         rcd->rcvavail_timeout = timeout;
11795         /*
11796          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11797          * been verified to be in range
11798          */
11799         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11800                         (u64)timeout <<
11801                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11802 }
11803 
11804 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11805                     u32 intr_adjust, u32 npkts)
11806 {
11807         struct hfi1_devdata *dd = rcd->dd;
11808         u64 reg;
11809         u32 ctxt = rcd->ctxt;
11810 
11811         /*
11812          * Need to write timeout register before updating RcvHdrHead to ensure
11813          * that a new value is used when the HW decides to restart counting.
11814          */
11815         if (intr_adjust)
11816                 adjust_rcv_timeout(rcd, npkts);
11817         if (updegr) {
11818                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11819                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11820                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11821         }
11822         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11823                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11824                         << RCV_HDR_HEAD_HEAD_SHIFT);
11825         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11826 }
11827 
11828 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11829 {
11830         u32 head, tail;
11831 
11832         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11833                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11834 
11835         if (rcd->rcvhdrtail_kvaddr)
11836                 tail = get_rcvhdrtail(rcd);
11837         else
11838                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11839 
11840         return head == tail;
11841 }
11842 
11843 /*
11844  * Context Control and Receive Array encoding for buffer size:
11845  *      0x0 invalid
11846  *      0x1   4 KB
11847  *      0x2   8 KB
11848  *      0x3  16 KB
11849  *      0x4  32 KB
11850  *      0x5  64 KB
11851  *      0x6 128 KB
11852  *      0x7 256 KB
11853  *      0x8 512 KB (Receive Array only)
11854  *      0x9   1 MB (Receive Array only)
11855  *      0xa   2 MB (Receive Array only)
11856  *
11857  *      0xB-0xF - reserved (Receive Array only)
11858  *
11859  *
11860  * This routine assumes that the value has already been sanity checked.
11861  */
11862 static u32 encoded_size(u32 size)
11863 {
11864         switch (size) {
11865         case   4 * 1024: return 0x1;
11866         case   8 * 1024: return 0x2;
11867         case  16 * 1024: return 0x3;
11868         case  32 * 1024: return 0x4;
11869         case  64 * 1024: return 0x5;
11870         case 128 * 1024: return 0x6;
11871         case 256 * 1024: return 0x7;
11872         case 512 * 1024: return 0x8;
11873         case   1 * 1024 * 1024: return 0x9;
11874         case   2 * 1024 * 1024: return 0xa;
11875         }
11876         return 0x1;     /* if invalid, go with the minimum size */
11877 }
11878 
11879 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11880                   struct hfi1_ctxtdata *rcd)
11881 {
11882         u64 rcvctrl, reg;
11883         int did_enable = 0;
11884         u16 ctxt;
11885 
11886         if (!rcd)
11887                 return;
11888 
11889         ctxt = rcd->ctxt;
11890 
11891         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11892 
11893         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11894         /* if the context already enabled, don't do the extra steps */
11895         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11896             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11897                 /* reset the tail and hdr addresses, and sequence count */
11898                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11899                                 rcd->rcvhdrq_dma);
11900                 if (rcd->rcvhdrtail_kvaddr)
11901                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11902                                         rcd->rcvhdrqtailaddr_dma);
11903                 rcd->seq_cnt = 1;
11904 
11905                 /* reset the cached receive header queue head value */
11906                 rcd->head = 0;
11907 
11908                 /*
11909                  * Zero the receive header queue so we don't get false
11910                  * positives when checking the sequence number.  The
11911                  * sequence numbers could land exactly on the same spot.
11912                  * E.g. a rcd restart before the receive header wrapped.
11913                  */
11914                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11915 
11916                 /* starting timeout */
11917                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11918 
11919                 /* enable the context */
11920                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11921 
11922                 /* clean the egr buffer size first */
11923                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11924                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11925                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11926                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11927 
11928                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11929                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11930                 did_enable = 1;
11931 
11932                 /* zero RcvEgrIndexHead */
11933                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11934 
11935                 /* set eager count and base index */
11936                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11937                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11938                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11939                         (((rcd->eager_base >> RCV_SHIFT)
11940                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11941                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11942                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11943 
11944                 /*
11945                  * Set TID (expected) count and base index.
11946                  * rcd->expected_count is set to individual RcvArray entries,
11947                  * not pairs, and the CSR takes a pair-count in groups of
11948                  * four, so divide by 8.
11949                  */
11950                 reg = (((rcd->expected_count >> RCV_SHIFT)
11951                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11952                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11953                       (((rcd->expected_base >> RCV_SHIFT)
11954                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11955                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11956                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11957                 if (ctxt == HFI1_CTRL_CTXT)
11958                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11959         }
11960         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11961                 write_csr(dd, RCV_VL15, 0);
11962                 /*
11963                  * When receive context is being disabled turn on tail
11964                  * update with a dummy tail address and then disable
11965                  * receive context.
11966                  */
11967                 if (dd->rcvhdrtail_dummy_dma) {
11968                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11969                                         dd->rcvhdrtail_dummy_dma);
11970                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11971                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11972                 }
11973 
11974                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11975         }
11976         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11977                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11978                               IS_RCVAVAIL_START + rcd->ctxt, true);
11979                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11980         }
11981         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11982                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11983                               IS_RCVAVAIL_START + rcd->ctxt, false);
11984                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11985         }
11986         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11987                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11988         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11989                 /* See comment on RcvCtxtCtrl.TailUpd above */
11990                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11991                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11992         }
11993         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11994                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11995         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11996                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11997         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11998                 /*
11999                  * In one-packet-per-eager mode, the size comes from
12000                  * the RcvArray entry.
12001                  */
12002                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12003                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12004         }
12005         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12006                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12007         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12008                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12009         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12010                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12011         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12012                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12013         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12014                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12015         if (op & HFI1_RCVCTRL_URGENT_ENB)
12016                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12017                               IS_RCVURGENT_START + rcd->ctxt, true);
12018         if (op & HFI1_RCVCTRL_URGENT_DIS)
12019                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12020                               IS_RCVURGENT_START + rcd->ctxt, false);
12021 
12022         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12023         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12024 
12025         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12026         if (did_enable &&
12027             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12028                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12029                 if (reg != 0) {
12030                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12031                                     ctxt, reg);
12032                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12033                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12034                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12035                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12036                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12037                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12038                                     ctxt, reg, reg == 0 ? "not" : "still");
12039                 }
12040         }
12041 
12042         if (did_enable) {
12043                 /*
12044                  * The interrupt timeout and count must be set after
12045                  * the context is enabled to take effect.
12046                  */
12047                 /* set interrupt timeout */
12048                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12049                                 (u64)rcd->rcvavail_timeout <<
12050                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12051 
12052                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12053                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12054                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12055         }
12056 
12057         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12058                 /*
12059                  * If the context has been disabled and the Tail Update has
12060                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12061                  * so it doesn't contain an address that is invalid.
12062                  */
12063                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12064                                 dd->rcvhdrtail_dummy_dma);
12065 }
12066 
12067 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12068 {
12069         int ret;
12070         u64 val = 0;
12071 
12072         if (namep) {
12073                 ret = dd->cntrnameslen;
12074                 *namep = dd->cntrnames;
12075         } else {
12076                 const struct cntr_entry *entry;
12077                 int i, j;
12078 
12079                 ret = (dd->ndevcntrs) * sizeof(u64);
12080 
12081                 /* Get the start of the block of counters */
12082                 *cntrp = dd->cntrs;
12083 
12084                 /*
12085                  * Now go and fill in each counter in the block.
12086                  */
12087                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12088                         entry = &dev_cntrs[i];
12089                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12090                         if (entry->flags & CNTR_DISABLED) {
12091                                 /* Nothing */
12092                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12093                         } else {
12094                                 if (entry->flags & CNTR_VL) {
12095                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12096                                         for (j = 0; j < C_VL_COUNT; j++) {
12097                                                 val = entry->rw_cntr(entry,
12098                                                                   dd, j,
12099                                                                   CNTR_MODE_R,
12100                                                                   0);
12101                                                 hfi1_cdbg(
12102                                                    CNTR,
12103                                                    "\t\tRead 0x%llx for %d\n",
12104                                                    val, j);
12105                                                 dd->cntrs[entry->offset + j] =
12106                                                                             val;
12107                                         }
12108                                 } else if (entry->flags & CNTR_SDMA) {
12109                                         hfi1_cdbg(CNTR,
12110                                                   "\t Per SDMA Engine\n");
12111                                         for (j = 0; j < chip_sdma_engines(dd);
12112                                              j++) {
12113                                                 val =
12114                                                 entry->rw_cntr(entry, dd, j,
12115                                                                CNTR_MODE_R, 0);
12116                                                 hfi1_cdbg(CNTR,
12117                                                           "\t\tRead 0x%llx for %d\n",
12118                                                           val, j);
12119                                                 dd->cntrs[entry->offset + j] =
12120                                                                         val;
12121                                         }
12122                                 } else {
12123                                         val = entry->rw_cntr(entry, dd,
12124                                                         CNTR_INVALID_VL,
12125                                                         CNTR_MODE_R, 0);
12126                                         dd->cntrs[entry->offset] = val;
12127                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12128                                 }
12129                         }
12130                 }
12131         }
12132         return ret;
12133 }
12134 
12135 /*
12136  * Used by sysfs to create files for hfi stats to read
12137  */
12138 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12139 {
12140         int ret;
12141         u64 val = 0;
12142 
12143         if (namep) {
12144                 ret = ppd->dd->portcntrnameslen;
12145                 *namep = ppd->dd->portcntrnames;
12146         } else {
12147                 const struct cntr_entry *entry;
12148                 int i, j;
12149 
12150                 ret = ppd->dd->nportcntrs * sizeof(u64);
12151                 *cntrp = ppd->cntrs;
12152 
12153                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12154                         entry = &port_cntrs[i];
12155                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12156                         if (entry->flags & CNTR_DISABLED) {
12157                                 /* Nothing */
12158                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12159                                 continue;
12160                         }
12161 
12162                         if (entry->flags & CNTR_VL) {
12163                                 hfi1_cdbg(CNTR, "\tPer VL");
12164                                 for (j = 0; j < C_VL_COUNT; j++) {
12165                                         val = entry->rw_cntr(entry, ppd, j,
12166                                                                CNTR_MODE_R,
12167                                                                0);
12168                                         hfi1_cdbg(
12169                                            CNTR,
12170                                            "\t\tRead 0x%llx for %d",
12171                                            val, j);
12172                                         ppd->cntrs[entry->offset + j] = val;
12173                                 }
12174                         } else {
12175                                 val = entry->rw_cntr(entry, ppd,
12176                                                        CNTR_INVALID_VL,
12177                                                        CNTR_MODE_R,
12178                                                        0);
12179                                 ppd->cntrs[entry->offset] = val;
12180                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12181                         }
12182                 }
12183         }
12184         return ret;
12185 }
12186 
12187 static void free_cntrs(struct hfi1_devdata *dd)
12188 {
12189         struct hfi1_pportdata *ppd;
12190         int i;
12191 
12192         if (dd->synth_stats_timer.function)
12193                 del_timer_sync(&dd->synth_stats_timer);
12194         ppd = (struct hfi1_pportdata *)(dd + 1);
12195         for (i = 0; i < dd->num_pports; i++, ppd++) {
12196                 kfree(ppd->cntrs);
12197                 kfree(ppd->scntrs);
12198                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12199                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12200                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12201                 ppd->cntrs = NULL;
12202                 ppd->scntrs = NULL;
12203                 ppd->ibport_data.rvp.rc_acks = NULL;
12204                 ppd->ibport_data.rvp.rc_qacks = NULL;
12205                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12206         }
12207         kfree(dd->portcntrnames);
12208         dd->portcntrnames = NULL;
12209         kfree(dd->cntrs);
12210         dd->cntrs = NULL;
12211         kfree(dd->scntrs);
12212         dd->scntrs = NULL;
12213         kfree(dd->cntrnames);
12214         dd->cntrnames = NULL;
12215         if (dd->update_cntr_wq) {
12216                 destroy_workqueue(dd->update_cntr_wq);
12217                 dd->update_cntr_wq = NULL;
12218         }
12219 }
12220 
12221 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12222                               u64 *psval, void *context, int vl)
12223 {
12224         u64 val;
12225         u64 sval = *psval;
12226 
12227         if (entry->flags & CNTR_DISABLED) {
12228                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12229                 return 0;
12230         }
12231 
12232         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12233 
12234         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12235 
12236         /* If its a synthetic counter there is more work we need to do */
12237         if (entry->flags & CNTR_SYNTH) {
12238                 if (sval == CNTR_MAX) {
12239                         /* No need to read already saturated */
12240                         return CNTR_MAX;
12241                 }
12242 
12243                 if (entry->flags & CNTR_32BIT) {
12244                         /* 32bit counters can wrap multiple times */
12245                         u64 upper = sval >> 32;
12246                         u64 lower = (sval << 32) >> 32;
12247 
12248                         if (lower > val) { /* hw wrapped */
12249                                 if (upper == CNTR_32BIT_MAX)
12250                                         val = CNTR_MAX;
12251                                 else
12252                                         upper++;
12253                         }
12254 
12255                         if (val != CNTR_MAX)
12256                                 val = (upper << 32) | val;
12257 
12258                 } else {
12259                         /* If we rolled we are saturated */
12260                         if ((val < sval) || (val > CNTR_MAX))
12261                                 val = CNTR_MAX;
12262                 }
12263         }
12264 
12265         *psval = val;
12266 
12267         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12268 
12269         return val;
12270 }
12271 
12272 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12273                                struct cntr_entry *entry,
12274                                u64 *psval, void *context, int vl, u64 data)
12275 {
12276         u64 val;
12277 
12278         if (entry->flags & CNTR_DISABLED) {
12279                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12280                 return 0;
12281         }
12282 
12283         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12284 
12285         if (entry->flags & CNTR_SYNTH) {
12286                 *psval = data;
12287                 if (entry->flags & CNTR_32BIT) {
12288                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12289                                              (data << 32) >> 32);
12290                         val = data; /* return the full 64bit value */
12291                 } else {
12292                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12293                                              data);
12294                 }
12295         } else {
12296                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12297         }
12298 
12299         *psval = val;
12300 
12301         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12302 
12303         return val;
12304 }
12305 
12306 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12307 {
12308         struct cntr_entry *entry;
12309         u64 *sval;
12310 
12311         entry = &dev_cntrs[index];
12312         sval = dd->scntrs + entry->offset;
12313 
12314         if (vl != CNTR_INVALID_VL)
12315                 sval += vl;
12316 
12317         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12318 }
12319 
12320 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12321 {
12322         struct cntr_entry *entry;
12323         u64 *sval;
12324 
12325         entry = &dev_cntrs[index];
12326         sval = dd->scntrs + entry->offset;
12327 
12328         if (vl != CNTR_INVALID_VL)
12329                 sval += vl;
12330 
12331         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12332 }
12333 
12334 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12335 {
12336         struct cntr_entry *entry;
12337         u64 *sval;
12338 
12339         entry = &port_cntrs[index];
12340         sval = ppd->scntrs + entry->offset;
12341 
12342         if (vl != CNTR_INVALID_VL)
12343                 sval += vl;
12344 
12345         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12346             (index <= C_RCV_HDR_OVF_LAST)) {
12347                 /* We do not want to bother for disabled contexts */
12348                 return 0;
12349         }
12350 
12351         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12352 }
12353 
12354 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12355 {
12356         struct cntr_entry *entry;
12357         u64 *sval;
12358 
12359         entry = &port_cntrs[index];
12360         sval = ppd->scntrs + entry->offset;
12361 
12362         if (vl != CNTR_INVALID_VL)
12363                 sval += vl;
12364 
12365         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12366             (index <= C_RCV_HDR_OVF_LAST)) {
12367                 /* We do not want to bother for disabled contexts */
12368                 return 0;
12369         }
12370 
12371         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12372 }
12373 
12374 static void do_update_synth_timer(struct work_struct *work)
12375 {
12376         u64 cur_tx;
12377         u64 cur_rx;
12378         u64 total_flits;
12379         u8 update = 0;
12380         int i, j, vl;
12381         struct hfi1_pportdata *ppd;
12382         struct cntr_entry *entry;
12383         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12384                                                update_cntr_work);
12385 
12386         /*
12387          * Rather than keep beating on the CSRs pick a minimal set that we can
12388          * check to watch for potential roll over. We can do this by looking at
12389          * the number of flits sent/recv. If the total flits exceeds 32bits then
12390          * we have to iterate all the counters and update.
12391          */
12392         entry = &dev_cntrs[C_DC_RCV_FLITS];
12393         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12394 
12395         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12396         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12397 
12398         hfi1_cdbg(
12399             CNTR,
12400             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12401             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12402 
12403         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12404                 /*
12405                  * May not be strictly necessary to update but it won't hurt and
12406                  * simplifies the logic here.
12407                  */
12408                 update = 1;
12409                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12410                           dd->unit);
12411         } else {
12412                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12413                 hfi1_cdbg(CNTR,
12414                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12415                           total_flits, (u64)CNTR_32BIT_MAX);
12416                 if (total_flits >= CNTR_32BIT_MAX) {
12417                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12418                                   dd->unit);
12419                         update = 1;
12420                 }
12421         }
12422 
12423         if (update) {
12424                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12425                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12426                         entry = &dev_cntrs[i];
12427                         if (entry->flags & CNTR_VL) {
12428                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12429                                         read_dev_cntr(dd, i, vl);
12430                         } else {
12431                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12432                         }
12433                 }
12434                 ppd = (struct hfi1_pportdata *)(dd + 1);
12435                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12436                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12437                                 entry = &port_cntrs[j];
12438                                 if (entry->flags & CNTR_VL) {
12439                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12440                                                 read_port_cntr(ppd, j, vl);
12441                                 } else {
12442                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12443                                 }
12444                         }
12445                 }
12446 
12447                 /*
12448                  * We want the value in the register. The goal is to keep track
12449                  * of the number of "ticks" not the counter value. In other
12450                  * words if the register rolls we want to notice it and go ahead
12451                  * and force an update.
12452                  */
12453                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12454                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12455                                                 CNTR_MODE_R, 0);
12456 
12457                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12458                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12459                                                 CNTR_MODE_R, 0);
12460 
12461                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12462                           dd->unit, dd->last_tx, dd->last_rx);
12463 
12464         } else {
12465                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12466         }
12467 }
12468 
12469 static void update_synth_timer(struct timer_list *t)
12470 {
12471         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12472 
12473         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12474         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12475 }
12476 
12477 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12478 static int init_cntrs(struct hfi1_devdata *dd)
12479 {
12480         int i, rcv_ctxts, j;
12481         size_t sz;
12482         char *p;
12483         char name[C_MAX_NAME];
12484         struct hfi1_pportdata *ppd;
12485         const char *bit_type_32 = ",32";
12486         const int bit_type_32_sz = strlen(bit_type_32);
12487         u32 sdma_engines = chip_sdma_engines(dd);
12488 
12489         /* set up the stats timer; the add_timer is done at the end */
12490         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12491 
12492         /***********************/
12493         /* per device counters */
12494         /***********************/
12495 
12496         /* size names and determine how many we have*/
12497         dd->ndevcntrs = 0;
12498         sz = 0;
12499 
12500         for (i = 0; i < DEV_CNTR_LAST; i++) {
12501                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12502                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12503                         continue;
12504                 }
12505 
12506                 if (dev_cntrs[i].flags & CNTR_VL) {
12507                         dev_cntrs[i].offset = dd->ndevcntrs;
12508                         for (j = 0; j < C_VL_COUNT; j++) {
12509                                 snprintf(name, C_MAX_NAME, "%s%d",
12510                                          dev_cntrs[i].name, vl_from_idx(j));
12511                                 sz += strlen(name);
12512                                 /* Add ",32" for 32-bit counters */
12513                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12514                                         sz += bit_type_32_sz;
12515                                 sz++;
12516                                 dd->ndevcntrs++;
12517                         }
12518                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12519                         dev_cntrs[i].offset = dd->ndevcntrs;
12520                         for (j = 0; j < sdma_engines; j++) {
12521                                 snprintf(name, C_MAX_NAME, "%s%d",
12522                                          dev_cntrs[i].name, j);
12523                                 sz += strlen(name);
12524                                 /* Add ",32" for 32-bit counters */
12525                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12526                                         sz += bit_type_32_sz;
12527                                 sz++;
12528                                 dd->ndevcntrs++;
12529                         }
12530                 } else {
12531                         /* +1 for newline. */
12532                         sz += strlen(dev_cntrs[i].name) + 1;
12533                         /* Add ",32" for 32-bit counters */
12534                         if (dev_cntrs[i].flags & CNTR_32BIT)
12535                                 sz += bit_type_32_sz;
12536                         dev_cntrs[i].offset = dd->ndevcntrs;
12537                         dd->ndevcntrs++;
12538                 }
12539         }
12540 
12541         /* allocate space for the counter values */
12542         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12543                             GFP_KERNEL);
12544         if (!dd->cntrs)
12545                 goto bail;
12546 
12547         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12548         if (!dd->scntrs)
12549                 goto bail;
12550 
12551         /* allocate space for the counter names */
12552         dd->cntrnameslen = sz;
12553         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12554         if (!dd->cntrnames)
12555                 goto bail;
12556 
12557         /* fill in the names */
12558         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12559                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12560                         /* Nothing */
12561                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12562                         for (j = 0; j < C_VL_COUNT; j++) {
12563                                 snprintf(name, C_MAX_NAME, "%s%d",
12564                                          dev_cntrs[i].name,
12565                                          vl_from_idx(j));
12566                                 memcpy(p, name, strlen(name));
12567                                 p += strlen(name);
12568 
12569                                 /* Counter is 32 bits */
12570                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12571                                         memcpy(p, bit_type_32, bit_type_32_sz);
12572                                         p += bit_type_32_sz;
12573                                 }
12574 
12575                                 *p++ = '\n';
12576                         }
12577                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12578                         for (j = 0; j < sdma_engines; j++) {
12579                                 snprintf(name, C_MAX_NAME, "%s%d",
12580                                          dev_cntrs[i].name, j);
12581                                 memcpy(p, name, strlen(name));
12582                                 p += strlen(name);
12583 
12584                                 /* Counter is 32 bits */
12585                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12586                                         memcpy(p, bit_type_32, bit_type_32_sz);
12587                                         p += bit_type_32_sz;
12588                                 }
12589 
12590                                 *p++ = '\n';
12591                         }
12592                 } else {
12593                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12594                         p += strlen(dev_cntrs[i].name);
12595 
12596                         /* Counter is 32 bits */
12597                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12598                                 memcpy(p, bit_type_32, bit_type_32_sz);
12599                                 p += bit_type_32_sz;
12600                         }
12601 
12602                         *p++ = '\n';
12603                 }
12604         }
12605 
12606         /*********************/
12607         /* per port counters */
12608         /*********************/
12609 
12610         /*
12611          * Go through the counters for the overflows and disable the ones we
12612          * don't need. This varies based on platform so we need to do it
12613          * dynamically here.
12614          */
12615         rcv_ctxts = dd->num_rcv_contexts;
12616         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12617              i <= C_RCV_HDR_OVF_LAST; i++) {
12618                 port_cntrs[i].flags |= CNTR_DISABLED;
12619         }
12620 
12621         /* size port counter names and determine how many we have*/
12622         sz = 0;
12623         dd->nportcntrs = 0;
12624         for (i = 0; i < PORT_CNTR_LAST; i++) {
12625                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12626                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12627                         continue;
12628                 }
12629 
12630                 if (port_cntrs[i].flags & CNTR_VL) {
12631                         port_cntrs[i].offset = dd->nportcntrs;
12632                         for (j = 0; j < C_VL_COUNT; j++) {
12633                                 snprintf(name, C_MAX_NAME, "%s%d",
12634                                          port_cntrs[i].name, vl_from_idx(j));
12635                                 sz += strlen(name);
12636                                 /* Add ",32" for 32-bit counters */
12637                                 if (port_cntrs[i].flags & CNTR_32BIT)
12638                                         sz += bit_type_32_sz;
12639                                 sz++;
12640                                 dd->nportcntrs++;
12641                         }
12642                 } else {
12643                         /* +1 for newline */
12644                         sz += strlen(port_cntrs[i].name) + 1;
12645                         /* Add ",32" for 32-bit counters */
12646                         if (port_cntrs[i].flags & CNTR_32BIT)
12647                                 sz += bit_type_32_sz;
12648                         port_cntrs[i].offset = dd->nportcntrs;
12649                         dd->nportcntrs++;
12650                 }
12651         }
12652 
12653         /* allocate space for the counter names */
12654         dd->portcntrnameslen = sz;
12655         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12656         if (!dd->portcntrnames)
12657                 goto bail;
12658 
12659         /* fill in port cntr names */
12660         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12661                 if (port_cntrs[i].flags & CNTR_DISABLED)
12662                         continue;
12663 
12664                 if (port_cntrs[i].flags & CNTR_VL) {
12665                         for (j = 0; j < C_VL_COUNT; j++) {
12666                                 snprintf(name, C_MAX_NAME, "%s%d",
12667                                          port_cntrs[i].name, vl_from_idx(j));
12668                                 memcpy(p, name, strlen(name));
12669                                 p += strlen(name);
12670 
12671                                 /* Counter is 32 bits */
12672                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12673                                         memcpy(p, bit_type_32, bit_type_32_sz);
12674                                         p += bit_type_32_sz;
12675                                 }
12676 
12677                                 *p++ = '\n';
12678                         }
12679                 } else {
12680                         memcpy(p, port_cntrs[i].name,
12681                                strlen(port_cntrs[i].name));
12682                         p += strlen(port_cntrs[i].name);
12683 
12684                         /* Counter is 32 bits */
12685                         if (port_cntrs[i].flags & CNTR_32BIT) {
12686                                 memcpy(p, bit_type_32, bit_type_32_sz);
12687                                 p += bit_type_32_sz;
12688                         }
12689 
12690                         *p++ = '\n';
12691                 }
12692         }
12693 
12694         /* allocate per port storage for counter values */
12695         ppd = (struct hfi1_pportdata *)(dd + 1);
12696         for (i = 0; i < dd->num_pports; i++, ppd++) {
12697                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12698                 if (!ppd->cntrs)
12699                         goto bail;
12700 
12701                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12702                 if (!ppd->scntrs)
12703                         goto bail;
12704         }
12705 
12706         /* CPU counters need to be allocated and zeroed */
12707         if (init_cpu_counters(dd))
12708                 goto bail;
12709 
12710         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12711                                                      WQ_MEM_RECLAIM, dd->unit);
12712         if (!dd->update_cntr_wq)
12713                 goto bail;
12714 
12715         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12716 
12717         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12718         return 0;
12719 bail:
12720         free_cntrs(dd);
12721         return -ENOMEM;
12722 }
12723 
12724 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12725 {
12726         switch (chip_lstate) {
12727         default:
12728                 dd_dev_err(dd,
12729                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12730                            chip_lstate);
12731                 /* fall through */
12732         case LSTATE_DOWN:
12733                 return IB_PORT_DOWN;
12734         case LSTATE_INIT:
12735                 return IB_PORT_INIT;
12736         case LSTATE_ARMED:
12737                 return IB_PORT_ARMED;
12738         case LSTATE_ACTIVE:
12739                 return IB_PORT_ACTIVE;
12740         }
12741 }
12742 
12743 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12744 {
12745         /* look at the HFI meta-states only */
12746         switch (chip_pstate & 0xf0) {
12747         default:
12748                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12749                            chip_pstate);
12750                 /* fall through */
12751         case PLS_DISABLED:
12752                 return IB_PORTPHYSSTATE_DISABLED;
12753         case PLS_OFFLINE:
12754                 return OPA_PORTPHYSSTATE_OFFLINE;
12755         case PLS_POLLING:
12756                 return IB_PORTPHYSSTATE_POLLING;
12757         case PLS_CONFIGPHY:
12758                 return IB_PORTPHYSSTATE_TRAINING;
12759         case PLS_LINKUP:
12760                 return IB_PORTPHYSSTATE_LINKUP;
12761         case PLS_PHYTEST:
12762                 return IB_PORTPHYSSTATE_PHY_TEST;
12763         }
12764 }
12765 
12766 /* return the OPA port logical state name */
12767 const char *opa_lstate_name(u32 lstate)
12768 {
12769         static const char * const port_logical_names[] = {
12770                 "PORT_NOP",
12771                 "PORT_DOWN",
12772                 "PORT_INIT",
12773                 "PORT_ARMED",
12774                 "PORT_ACTIVE",
12775                 "PORT_ACTIVE_DEFER",
12776         };
12777         if (lstate < ARRAY_SIZE(port_logical_names))
12778                 return port_logical_names[lstate];
12779         return "unknown";
12780 }
12781 
12782 /* return the OPA port physical state name */
12783 const char *opa_pstate_name(u32 pstate)
12784 {
12785         static const char * const port_physical_names[] = {
12786                 "PHYS_NOP",
12787                 "reserved1",
12788                 "PHYS_POLL",
12789                 "PHYS_DISABLED",
12790                 "PHYS_TRAINING",
12791                 "PHYS_LINKUP",
12792                 "PHYS_LINK_ERR_RECOVER",
12793                 "PHYS_PHY_TEST",
12794                 "reserved8",
12795                 "PHYS_OFFLINE",
12796                 "PHYS_GANGED",
12797                 "PHYS_TEST",
12798         };
12799         if (pstate < ARRAY_SIZE(port_physical_names))
12800                 return port_physical_names[pstate];
12801         return "unknown";
12802 }
12803 
12804 /**
12805  * update_statusp - Update userspace status flag
12806  * @ppd: Port data structure
12807  * @state: port state information
12808  *
12809  * Actual port status is determined by the host_link_state value
12810  * in the ppd.
12811  *
12812  * host_link_state MUST be updated before updating the user space
12813  * statusp.
12814  */
12815 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12816 {
12817         /*
12818          * Set port status flags in the page mapped into userspace
12819          * memory. Do it here to ensure a reliable state - this is
12820          * the only function called by all state handling code.
12821          * Always set the flags due to the fact that the cache value
12822          * might have been changed explicitly outside of this
12823          * function.
12824          */
12825         if (ppd->statusp) {
12826                 switch (state) {
12827                 case IB_PORT_DOWN:
12828                 case IB_PORT_INIT:
12829                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12830                                            HFI1_STATUS_IB_READY);
12831                         break;
12832                 case IB_PORT_ARMED:
12833                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12834                         break;
12835                 case IB_PORT_ACTIVE:
12836                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12837                         break;
12838                 }
12839         }
12840         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12841                     opa_lstate_name(state), state);
12842 }
12843 
12844 /**
12845  * wait_logical_linkstate - wait for an IB link state change to occur
12846  * @ppd: port device
12847  * @state: the state to wait for
12848  * @msecs: the number of milliseconds to wait
12849  *
12850  * Wait up to msecs milliseconds for IB link state change to occur.
12851  * For now, take the easy polling route.
12852  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12853  */
12854 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12855                                   int msecs)
12856 {
12857         unsigned long timeout;
12858         u32 new_state;
12859 
12860         timeout = jiffies + msecs_to_jiffies(msecs);
12861         while (1) {
12862                 new_state = chip_to_opa_lstate(ppd->dd,
12863                                                read_logical_state(ppd->dd));
12864                 if (new_state == state)
12865                         break;
12866                 if (time_after(jiffies, timeout)) {
12867                         dd_dev_err(ppd->dd,
12868                                    "timeout waiting for link state 0x%x\n",
12869                                    state);
12870                         return -ETIMEDOUT;
12871                 }
12872                 msleep(20);
12873         }
12874 
12875         return 0;
12876 }
12877 
12878 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12879 {
12880         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12881 
12882         dd_dev_info(ppd->dd,
12883                     "physical state changed to %s (0x%x), phy 0x%x\n",
12884                     opa_pstate_name(ib_pstate), ib_pstate, state);
12885 }
12886 
12887 /*
12888  * Read the physical hardware link state and check if it matches host
12889  * drivers anticipated state.
12890  */
12891 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12892 {
12893         u32 read_state = read_physical_state(ppd->dd);
12894 
12895         if (read_state == state) {
12896                 log_state_transition(ppd, state);
12897         } else {
12898                 dd_dev_err(ppd->dd,
12899                            "anticipated phy link state 0x%x, read 0x%x\n",
12900                            state, read_state);
12901         }
12902 }
12903 
12904 /*
12905  * wait_physical_linkstate - wait for an physical link state change to occur
12906  * @ppd: port device
12907  * @state: the state to wait for
12908  * @msecs: the number of milliseconds to wait
12909  *
12910  * Wait up to msecs milliseconds for physical link state change to occur.
12911  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12912  */
12913 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12914                                    int msecs)
12915 {
12916         u32 read_state;
12917         unsigned long timeout;
12918 
12919         timeout = jiffies + msecs_to_jiffies(msecs);
12920         while (1) {
12921                 read_state = read_physical_state(ppd->dd);
12922                 if (read_state == state)
12923                         break;
12924                 if (time_after(jiffies, timeout)) {
12925                         dd_dev_err(ppd->dd,
12926                                    "timeout waiting for phy link state 0x%x\n",
12927                                    state);
12928                         return -ETIMEDOUT;
12929                 }
12930                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12931         }
12932 
12933         log_state_transition(ppd, state);
12934         return 0;
12935 }
12936 
12937 /*
12938  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12939  * @ppd: port device
12940  * @msecs: the number of milliseconds to wait
12941  *
12942  * Wait up to msecs milliseconds for any offline physical link
12943  * state change to occur.
12944  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12945  */
12946 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12947                                             int msecs)
12948 {
12949         u32 read_state;
12950         unsigned long timeout;
12951 
12952         timeout = jiffies + msecs_to_jiffies(msecs);
12953         while (1) {
12954                 read_state = read_physical_state(ppd->dd);
12955                 if ((read_state & 0xF0) == PLS_OFFLINE)
12956                         break;
12957                 if (time_after(jiffies, timeout)) {
12958                         dd_dev_err(ppd->dd,
12959                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12960                                    read_state, msecs);
12961                         return -ETIMEDOUT;
12962                 }
12963                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12964         }
12965 
12966         log_state_transition(ppd, read_state);
12967         return read_state;
12968 }
12969 
12970 /*
12971  * wait_phys_link_out_of_offline - wait for any out of offline state
12972  * @ppd: port device
12973  * @msecs: the number of milliseconds to wait
12974  *
12975  * Wait up to msecs milliseconds for any out of offline physical link
12976  * state change to occur.
12977  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12978  */
12979 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12980                                          int msecs)
12981 {
12982         u32 read_state;
12983         unsigned long timeout;
12984 
12985         timeout = jiffies + msecs_to_jiffies(msecs);
12986         while (1) {
12987                 read_state = read_physical_state(ppd->dd);
12988                 if ((read_state & 0xF0) != PLS_OFFLINE)
12989                         break;
12990                 if (time_after(jiffies, timeout)) {
12991                         dd_dev_err(ppd->dd,
12992                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12993                                    read_state, msecs);
12994                         return -ETIMEDOUT;
12995                 }
12996                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12997         }
12998 
12999         log_state_transition(ppd, read_state);
13000         return read_state;
13001 }
13002 
13003 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
13004 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13005 
13006 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
13007 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13008 
13009 void hfi1_init_ctxt(struct send_context *sc)
13010 {
13011         if (sc) {
13012                 struct hfi1_devdata *dd = sc->dd;
13013                 u64 reg;
13014                 u8 set = (sc->type == SC_USER ?
13015                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13016                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13017                 reg = read_kctxt_csr(dd, sc->hw_context,
13018                                      SEND_CTXT_CHECK_ENABLE);
13019                 if (set)
13020                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13021                 else
13022                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13023                 write_kctxt_csr(dd, sc->hw_context,
13024                                 SEND_CTXT_CHECK_ENABLE, reg);
13025         }
13026 }
13027 
13028 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13029 {
13030         int ret = 0;
13031         u64 reg;
13032 
13033         if (dd->icode != ICODE_RTL_SILICON) {
13034                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13035                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13036                                     __func__);
13037                 return -EINVAL;
13038         }
13039         reg = read_csr(dd, ASIC_STS_THERM);
13040         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13041                       ASIC_STS_THERM_CURR_TEMP_MASK);
13042         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13043                         ASIC_STS_THERM_LO_TEMP_MASK);
13044         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13045                         ASIC_STS_THERM_HI_TEMP_MASK);
13046         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13047                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13048         /* triggers is a 3-bit value - 1 bit per trigger. */
13049         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13050 
13051         return ret;
13052 }
13053 
13054 /* ========================================================================= */
13055 
13056 /**
13057  * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13058  * @dd: valid devdata
13059  * @src: IRQ source to determine register index from
13060  * @bits: the bits to set or clear
13061  * @set: true == set the bits, false == clear the bits
13062  *
13063  */
13064 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13065                            bool set)
13066 {
13067         u64 reg;
13068         u16 idx = src / BITS_PER_REGISTER;
13069 
13070         spin_lock(&dd->irq_src_lock);
13071         reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13072         if (set)
13073                 reg |= bits;
13074         else
13075                 reg &= ~bits;
13076         write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13077         spin_unlock(&dd->irq_src_lock);
13078 }
13079 
13080 /**
13081  * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13082  * @dd: valid devdata
13083  * @first: first IRQ source to set/clear
13084  * @last: last IRQ source (inclusive) to set/clear
13085  * @set: true == set the bits, false == clear the bits
13086  *
13087  * If first == last, set the exact source.
13088  */
13089 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13090 {
13091         u64 bits = 0;
13092         u64 bit;
13093         u16 src;
13094 
13095         if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13096                 return -EINVAL;
13097 
13098         if (last < first)
13099                 return -ERANGE;
13100 
13101         for (src = first; src <= last; src++) {
13102                 bit = src % BITS_PER_REGISTER;
13103                 /* wrapped to next register? */
13104                 if (!bit && bits) {
13105                         read_mod_write(dd, src - 1, bits, set);
13106                         bits = 0;
13107                 }
13108                 bits |= BIT_ULL(bit);
13109         }
13110         read_mod_write(dd, last, bits, set);
13111 
13112         return 0;
13113 }
13114 
13115 /*
13116  * Clear all interrupt sources on the chip.
13117  */
13118 void clear_all_interrupts(struct hfi1_devdata *dd)
13119 {
13120         int i;
13121 
13122         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13123                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13124 
13125         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13126         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13127         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13128         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13129         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13130         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13131         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13132         for (i = 0; i < chip_send_contexts(dd); i++)
13133                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13134         for (i = 0; i < chip_sdma_engines(dd); i++)
13135                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13136 
13137         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13138         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13139         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13140 }
13141 
13142 /*
13143  * Remap the interrupt source from the general handler to the given MSI-X
13144  * interrupt.
13145  */
13146 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13147 {
13148         u64 reg;
13149         int m, n;
13150 
13151         /* clear from the handled mask of the general interrupt */
13152         m = isrc / 64;
13153         n = isrc % 64;
13154         if (likely(m < CCE_NUM_INT_CSRS)) {
13155                 dd->gi_mask[m] &= ~((u64)1 << n);
13156         } else {
13157                 dd_dev_err(dd, "remap interrupt err\n");
13158                 return;
13159         }
13160 
13161         /* direct the chip source to the given MSI-X interrupt */
13162         m = isrc / 8;
13163         n = isrc % 8;
13164         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13165         reg &= ~((u64)0xff << (8 * n));
13166         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13167         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13168 }
13169 
13170 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13171 {
13172         /*
13173          * SDMA engine interrupt sources grouped by type, rather than
13174          * engine.  Per-engine interrupts are as follows:
13175          *      SDMA
13176          *      SDMAProgress
13177          *      SDMAIdle
13178          */
13179         remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13180         remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13181         remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13182 }
13183 
13184 /*
13185  * Set the general handler to accept all interrupts, remap all
13186  * chip interrupts back to MSI-X 0.
13187  */
13188 void reset_interrupts(struct hfi1_devdata *dd)
13189 {
13190         int i;
13191 
13192         /* all interrupts handled by the general handler */
13193         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13194                 dd->gi_mask[i] = ~(u64)0;
13195 
13196         /* all chip interrupts map to MSI-X 0 */
13197         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13198                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13199 }
13200 
13201 /**
13202  * set_up_interrupts() - Initialize the IRQ resources and state
13203  * @dd: valid devdata
13204  *
13205  */
13206 static int set_up_interrupts(struct hfi1_devdata *dd)
13207 {
13208         int ret;
13209 
13210         /* mask all interrupts */
13211         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13212 
13213         /* clear all pending interrupts */
13214         clear_all_interrupts(dd);
13215 
13216         /* reset general handler mask, chip MSI-X mappings */
13217         reset_interrupts(dd);
13218 
13219         /* ask for MSI-X interrupts */
13220         ret = msix_initialize(dd);
13221         if (ret)
13222                 return ret;
13223 
13224         ret = msix_request_irqs(dd);
13225         if (ret)
13226                 msix_clean_up_interrupts(dd);
13227 
13228         return ret;
13229 }
13230 
13231 /*
13232  * Set up context values in dd.  Sets:
13233  *
13234  *      num_rcv_contexts - number of contexts being used
13235  *      n_krcv_queues - number of kernel contexts
13236  *      first_dyn_alloc_ctxt - first dynamically allocated context
13237  *                             in array of contexts
13238  *      freectxts  - number of free user contexts
13239  *      num_send_contexts - number of PIO send contexts being used
13240  *      num_vnic_contexts - number of contexts reserved for VNIC
13241  */
13242 static int set_up_context_variables(struct hfi1_devdata *dd)
13243 {
13244         unsigned long num_kernel_contexts;
13245         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13246         int total_contexts;
13247         int ret;
13248         unsigned ngroups;
13249         int rmt_count;
13250         int user_rmt_reduced;
13251         u32 n_usr_ctxts;
13252         u32 send_contexts = chip_send_contexts(dd);
13253         u32 rcv_contexts = chip_rcv_contexts(dd);
13254 
13255         /*
13256          * Kernel receive contexts:
13257          * - Context 0 - control context (VL15/multicast/error)
13258          * - Context 1 - first kernel context
13259          * - Context 2 - second kernel context
13260          * ...
13261          */
13262         if (n_krcvqs)
13263                 /*
13264                  * n_krcvqs is the sum of module parameter kernel receive
13265                  * contexts, krcvqs[].  It does not include the control
13266                  * context, so add that.
13267                  */
13268                 num_kernel_contexts = n_krcvqs + 1;
13269         else
13270                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13271         /*
13272          * Every kernel receive context needs an ACK send context.
13273          * one send context is allocated for each VL{0-7} and VL15
13274          */
13275         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13276                 dd_dev_err(dd,
13277                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13278                            send_contexts - num_vls - 1,
13279                            num_kernel_contexts);
13280                 num_kernel_contexts = send_contexts - num_vls - 1;
13281         }
13282 
13283         /* Accommodate VNIC contexts if possible */
13284         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13285                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13286                 num_vnic_contexts = 0;
13287         }
13288         total_contexts = num_kernel_contexts + num_vnic_contexts;
13289 
13290         /*
13291          * User contexts:
13292          *      - default to 1 user context per real (non-HT) CPU core if
13293          *        num_user_contexts is negative
13294          */
13295         if (num_user_contexts < 0)
13296                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13297         else
13298                 n_usr_ctxts = num_user_contexts;
13299         /*
13300          * Adjust the counts given a global max.
13301          */
13302         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13303                 dd_dev_err(dd,
13304                            "Reducing # user receive contexts to: %d, from %u\n",
13305                            rcv_contexts - total_contexts,
13306                            n_usr_ctxts);
13307                 /* recalculate */
13308                 n_usr_ctxts = rcv_contexts - total_contexts;
13309         }
13310 
13311         /*
13312          * The RMT entries are currently allocated as shown below:
13313          * 1. QOS (0 to 128 entries);
13314          * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13315          *    num_vnic_contexts);
13316          * 3. VNIC (num_vnic_contexts).
13317          * It should be noted that FECN oversubscribe num_vnic_contexts
13318          * entries of RMT because both VNIC and PSM could allocate any receive
13319          * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13320          * and PSM FECN must reserve an RMT entry for each possible PSM receive
13321          * context.
13322          */
13323         rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13324         if (HFI1_CAP_IS_KSET(TID_RDMA))
13325                 rmt_count += num_kernel_contexts - 1;
13326         if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13327                 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13328                 dd_dev_err(dd,
13329                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13330                            n_usr_ctxts,
13331                            user_rmt_reduced);
13332                 /* recalculate */
13333                 n_usr_ctxts = user_rmt_reduced;
13334         }
13335 
13336         total_contexts += n_usr_ctxts;
13337 
13338         /* the first N are kernel contexts, the rest are user/vnic contexts */
13339         dd->num_rcv_contexts = total_contexts;
13340         dd->n_krcv_queues = num_kernel_contexts;
13341         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13342         dd->num_vnic_contexts = num_vnic_contexts;
13343         dd->num_user_contexts = n_usr_ctxts;
13344         dd->freectxts = n_usr_ctxts;
13345         dd_dev_info(dd,
13346                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13347                     rcv_contexts,
13348                     (int)dd->num_rcv_contexts,
13349                     (int)dd->n_krcv_queues,
13350                     dd->num_vnic_contexts,
13351                     dd->num_user_contexts);
13352 
13353         /*
13354          * Receive array allocation:
13355          *   All RcvArray entries are divided into groups of 8. This
13356          *   is required by the hardware and will speed up writes to
13357          *   consecutive entries by using write-combining of the entire
13358          *   cacheline.
13359          *
13360          *   The number of groups are evenly divided among all contexts.
13361          *   any left over groups will be given to the first N user
13362          *   contexts.
13363          */
13364         dd->rcv_entries.group_size = RCV_INCREMENT;
13365         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13366         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13367         dd->rcv_entries.nctxt_extra = ngroups -
13368                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13369         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13370                     dd->rcv_entries.ngroups,
13371                     dd->rcv_entries.nctxt_extra);
13372         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13373             MAX_EAGER_ENTRIES * 2) {
13374                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13375                         dd->rcv_entries.group_size;
13376                 dd_dev_info(dd,
13377                             "RcvArray group count too high, change to %u\n",
13378                             dd->rcv_entries.ngroups);
13379                 dd->rcv_entries.nctxt_extra = 0;
13380         }
13381         /*
13382          * PIO send contexts
13383          */
13384         ret = init_sc_pools_and_sizes(dd);
13385         if (ret >= 0) { /* success */
13386                 dd->num_send_contexts = ret;
13387                 dd_dev_info(
13388                         dd,
13389                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13390                         send_contexts,
13391                         dd->num_send_contexts,
13392                         dd->sc_sizes[SC_KERNEL].count,
13393                         dd->sc_sizes[SC_ACK].count,
13394                         dd->sc_sizes[SC_USER].count,
13395                         dd->sc_sizes[SC_VL15].count);
13396                 ret = 0;        /* success */
13397         }
13398 
13399         return ret;
13400 }
13401 
13402 /*
13403  * Set the device/port partition key table. The MAD code
13404  * will ensure that, at least, the partial management
13405  * partition key is present in the table.
13406  */
13407 static void set_partition_keys(struct hfi1_pportdata *ppd)
13408 {
13409         struct hfi1_devdata *dd = ppd->dd;
13410         u64 reg = 0;
13411         int i;
13412 
13413         dd_dev_info(dd, "Setting partition keys\n");
13414         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13415                 reg |= (ppd->pkeys[i] &
13416                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13417                         ((i % 4) *
13418                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13419                 /* Each register holds 4 PKey values. */
13420                 if ((i % 4) == 3) {
13421                         write_csr(dd, RCV_PARTITION_KEY +
13422                                   ((i - 3) * 2), reg);
13423                         reg = 0;
13424                 }
13425         }
13426 
13427         /* Always enable HW pkeys check when pkeys table is set */
13428         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13429 }
13430 
13431 /*
13432  * These CSRs and memories are uninitialized on reset and must be
13433  * written before reading to set the ECC/parity bits.
13434  *
13435  * NOTE: All user context CSRs that are not mmaped write-only
13436  * (e.g. the TID flows) must be initialized even if the driver never
13437  * reads them.
13438  */
13439 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13440 {
13441         int i, j;
13442 
13443         /* CceIntMap */
13444         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13445                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13446 
13447         /* SendCtxtCreditReturnAddr */
13448         for (i = 0; i < chip_send_contexts(dd); i++)
13449                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13450 
13451         /* PIO Send buffers */
13452         /* SDMA Send buffers */
13453         /*
13454          * These are not normally read, and (presently) have no method
13455          * to be read, so are not pre-initialized
13456          */
13457 
13458         /* RcvHdrAddr */
13459         /* RcvHdrTailAddr */
13460         /* RcvTidFlowTable */
13461         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13462                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13463                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13464                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13465                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13466         }
13467 
13468         /* RcvArray */
13469         for (i = 0; i < chip_rcv_array_count(dd); i++)
13470                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13471 
13472         /* RcvQPMapTable */
13473         for (i = 0; i < 32; i++)
13474                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13475 }
13476 
13477 /*
13478  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13479  */
13480 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13481                              u64 ctrl_bits)
13482 {
13483         unsigned long timeout;
13484         u64 reg;
13485 
13486         /* is the condition present? */
13487         reg = read_csr(dd, CCE_STATUS);
13488         if ((reg & status_bits) == 0)
13489                 return;
13490 
13491         /* clear the condition */
13492         write_csr(dd, CCE_CTRL, ctrl_bits);
13493 
13494         /* wait for the condition to clear */
13495         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13496         while (1) {
13497                 reg = read_csr(dd, CCE_STATUS);
13498                 if ((reg & status_bits) == 0)
13499                         return;
13500                 if (time_after(jiffies, timeout)) {
13501                         dd_dev_err(dd,
13502                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13503                                    status_bits, reg & status_bits);
13504                         return;
13505                 }
13506                 udelay(1);
13507         }
13508 }
13509 
13510 /* set CCE CSRs to chip reset defaults */
13511 static void reset_cce_csrs(struct hfi1_devdata *dd)
13512 {
13513         int i;
13514 
13515         /* CCE_REVISION read-only */
13516         /* CCE_REVISION2 read-only */
13517         /* CCE_CTRL - bits clear automatically */
13518         /* CCE_STATUS read-only, use CceCtrl to clear */
13519         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13520         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13521         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13522         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13523                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13524         /* CCE_ERR_STATUS read-only */
13525         write_csr(dd, CCE_ERR_MASK, 0);
13526         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13527         /* CCE_ERR_FORCE leave alone */
13528         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13529                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13530         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13531         /* CCE_PCIE_CTRL leave alone */
13532         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13533                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13534                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13535                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13536         }
13537         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13538                 /* CCE_MSIX_PBA read-only */
13539                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13540                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13541         }
13542         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13543                 write_csr(dd, CCE_INT_MAP, 0);
13544         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13545                 /* CCE_INT_STATUS read-only */
13546                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13547                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13548                 /* CCE_INT_FORCE leave alone */
13549                 /* CCE_INT_BLOCKED read-only */
13550         }
13551         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13552                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13553 }
13554 
13555 /* set MISC CSRs to chip reset defaults */
13556 static void reset_misc_csrs(struct hfi1_devdata *dd)
13557 {
13558         int i;
13559 
13560         for (i = 0; i < 32; i++) {
13561                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13562                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13563                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13564         }
13565         /*
13566          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13567          * only be written 128-byte chunks
13568          */
13569         /* init RSA engine to clear lingering errors */
13570         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13571         write_csr(dd, MISC_CFG_RSA_MU, 0);
13572         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13573         /* MISC_STS_8051_DIGEST read-only */
13574         /* MISC_STS_SBM_DIGEST read-only */
13575         /* MISC_STS_PCIE_DIGEST read-only */
13576         /* MISC_STS_FAB_DIGEST read-only */
13577         /* MISC_ERR_STATUS read-only */
13578         write_csr(dd, MISC_ERR_MASK, 0);
13579         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13580         /* MISC_ERR_FORCE leave alone */
13581 }
13582 
13583 /* set TXE CSRs to chip reset defaults */
13584 static void reset_txe_csrs(struct hfi1_devdata *dd)
13585 {
13586         int i;
13587 
13588         /*
13589          * TXE Kernel CSRs
13590          */
13591         write_csr(dd, SEND_CTRL, 0);
13592         __cm_reset(dd, 0);      /* reset CM internal state */
13593         /* SEND_CONTEXTS read-only */
13594         /* SEND_DMA_ENGINES read-only */
13595         /* SEND_PIO_MEM_SIZE read-only */
13596         /* SEND_DMA_MEM_SIZE read-only */
13597         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13598         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13599         /* SEND_PIO_ERR_STATUS read-only */
13600         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13601         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13602         /* SEND_PIO_ERR_FORCE leave alone */
13603         /* SEND_DMA_ERR_STATUS read-only */
13604         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13605         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13606         /* SEND_DMA_ERR_FORCE leave alone */
13607         /* SEND_EGRESS_ERR_STATUS read-only */
13608         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13609         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13610         /* SEND_EGRESS_ERR_FORCE leave alone */
13611         write_csr(dd, SEND_BTH_QP, 0);
13612         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13613         write_csr(dd, SEND_SC2VLT0, 0);
13614         write_csr(dd, SEND_SC2VLT1, 0);
13615         write_csr(dd, SEND_SC2VLT2, 0);
13616         write_csr(dd, SEND_SC2VLT3, 0);
13617         write_csr(dd, SEND_LEN_CHECK0, 0);
13618         write_csr(dd, SEND_LEN_CHECK1, 0);
13619         /* SEND_ERR_STATUS read-only */
13620         write_csr(dd, SEND_ERR_MASK, 0);
13621         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13622         /* SEND_ERR_FORCE read-only */
13623         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13624                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13625         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13626                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13627         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13628                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13629         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13630                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13631         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13632                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13633         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13634         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13635         /* SEND_CM_CREDIT_USED_STATUS read-only */
13636         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13637         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13638         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13639         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13640         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13641         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13642                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13643         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13644         /* SEND_CM_CREDIT_USED_VL read-only */
13645         /* SEND_CM_CREDIT_USED_VL15 read-only */
13646         /* SEND_EGRESS_CTXT_STATUS read-only */
13647         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13648         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13649         /* SEND_EGRESS_ERR_INFO read-only */
13650         /* SEND_EGRESS_ERR_SOURCE read-only */
13651 
13652         /*
13653          * TXE Per-Context CSRs
13654          */
13655         for (i = 0; i < chip_send_contexts(dd); i++) {
13656                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13657                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13658                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13659                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13660                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13661                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13662                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13663                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13664                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13665                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13666                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13667                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13668         }
13669 
13670         /*
13671          * TXE Per-SDMA CSRs
13672          */
13673         for (i = 0; i < chip_sdma_engines(dd); i++) {
13674                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13675                 /* SEND_DMA_STATUS read-only */
13676                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13677                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13678                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13679                 /* SEND_DMA_HEAD read-only */
13680                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13681                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13682                 /* SEND_DMA_IDLE_CNT read-only */
13683                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13684                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13685                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13686                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13687                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13688                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13689                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13690                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13691                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13692                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13693                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13694                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13695                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13696                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13697         }
13698 }
13699 
13700 /*
13701  * Expect on entry:
13702  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13703  */
13704 static void init_rbufs(struct hfi1_devdata *dd)
13705 {
13706         u64 reg;
13707         int count;
13708 
13709         /*
13710          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13711          * clear.
13712          */
13713         count = 0;
13714         while (1) {
13715                 reg = read_csr(dd, RCV_STATUS);
13716                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13717                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13718                         break;
13719                 /*
13720                  * Give up after 1ms - maximum wait time.
13721                  *
13722                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13723                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13724                  *      136 KB / (66% * 250MB/s) = 844us
13725                  */
13726                 if (count++ > 500) {
13727                         dd_dev_err(dd,
13728                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13729                                    __func__, reg);
13730                         break;
13731                 }
13732                 udelay(2); /* do not busy-wait the CSR */
13733         }
13734 
13735         /* start the init - expect RcvCtrl to be 0 */
13736         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13737 
13738         /*
13739          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13740          * period after the write before RcvStatus.RxRbufInitDone is valid.
13741          * The delay in the first run through the loop below is sufficient and
13742          * required before the first read of RcvStatus.RxRbufInintDone.
13743          */
13744         read_csr(dd, RCV_CTRL);
13745 
13746         /* wait for the init to finish */
13747         count = 0;
13748         while (1) {
13749                 /* delay is required first time through - see above */
13750                 udelay(2); /* do not busy-wait the CSR */
13751                 reg = read_csr(dd, RCV_STATUS);
13752                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13753                         break;
13754 
13755                 /* give up after 100us - slowest possible at 33MHz is 73us */
13756                 if (count++ > 50) {
13757                         dd_dev_err(dd,
13758                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13759                                    __func__);
13760                         break;
13761                 }
13762         }
13763 }
13764 
13765 /* set RXE CSRs to chip reset defaults */
13766 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13767 {
13768         int i, j;
13769 
13770         /*
13771          * RXE Kernel CSRs
13772          */
13773         write_csr(dd, RCV_CTRL, 0);
13774         init_rbufs(dd);
13775         /* RCV_STATUS read-only */
13776         /* RCV_CONTEXTS read-only */
13777         /* RCV_ARRAY_CNT read-only */
13778         /* RCV_BUF_SIZE read-only */
13779         write_csr(dd, RCV_BTH_QP, 0);
13780         write_csr(dd, RCV_MULTICAST, 0);
13781         write_csr(dd, RCV_BYPASS, 0);
13782         write_csr(dd, RCV_VL15, 0);
13783         /* this is a clear-down */
13784         write_csr(dd, RCV_ERR_INFO,
13785                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13786         /* RCV_ERR_STATUS read-only */
13787         write_csr(dd, RCV_ERR_MASK, 0);
13788         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13789         /* RCV_ERR_FORCE leave alone */
13790         for (i = 0; i < 32; i++)
13791                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13792         for (i = 0; i < 4; i++)
13793                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13794         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13795                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13796         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13797                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13798         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13799                 clear_rsm_rule(dd, i);
13800         for (i = 0; i < 32; i++)
13801                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13802 
13803         /*
13804          * RXE Kernel and User Per-Context CSRs
13805          */
13806         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13807                 /* kernel */
13808                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13809                 /* RCV_CTXT_STATUS read-only */
13810                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13811                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13812                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13813                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13814                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13815                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13816                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13817                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13818                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13819                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13820 
13821                 /* user */
13822                 /* RCV_HDR_TAIL read-only */
13823                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13824                 /* RCV_EGR_INDEX_TAIL read-only */
13825                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13826                 /* RCV_EGR_OFFSET_TAIL read-only */
13827                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13828                         write_uctxt_csr(dd, i,
13829                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13830                 }
13831         }
13832 }
13833 
13834 /*
13835  * Set sc2vl tables.
13836  *
13837  * They power on to zeros, so to avoid send context errors
13838  * they need to be set:
13839  *
13840  * SC 0-7 -> VL 0-7 (respectively)
13841  * SC 15  -> VL 15
13842  * otherwise
13843  *        -> VL 0
13844  */
13845 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13846 {
13847         int i;
13848         /* init per architecture spec, constrained by hardware capability */
13849 
13850         /* HFI maps sent packets */
13851         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13852                 0,
13853                 0, 0, 1, 1,
13854                 2, 2, 3, 3,
13855                 4, 4, 5, 5,
13856                 6, 6, 7, 7));
13857         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13858                 1,
13859                 8, 0, 9, 0,
13860                 10, 0, 11, 0,
13861                 12, 0, 13, 0,
13862                 14, 0, 15, 15));
13863         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13864                 2,
13865                 16, 0, 17, 0,
13866                 18, 0, 19, 0,
13867                 20, 0, 21, 0,
13868                 22, 0, 23, 0));
13869         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13870                 3,
13871                 24, 0, 25, 0,
13872                 26, 0, 27, 0,
13873                 28, 0, 29, 0,
13874                 30, 0, 31, 0));
13875 
13876         /* DC maps received packets */
13877         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13878                 15_0,
13879                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13880                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13881         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13882                 31_16,
13883                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13884                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13885 
13886         /* initialize the cached sc2vl values consistently with h/w */
13887         for (i = 0; i < 32; i++) {
13888                 if (i < 8 || i == 15)
13889                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13890                 else
13891                         *((u8 *)(dd->sc2vl) + i) = 0;
13892         }
13893 }
13894 
13895 /*
13896  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13897  * depend on the chip going through a power-on reset - a driver may be loaded
13898  * and unloaded many times.
13899  *
13900  * Do not write any CSR values to the chip in this routine - there may be
13901  * a reset following the (possible) FLR in this routine.
13902  *
13903  */
13904 static int init_chip(struct hfi1_devdata *dd)
13905 {
13906         int i;
13907         int ret = 0;
13908 
13909         /*
13910          * Put the HFI CSRs in a known state.
13911          * Combine this with a DC reset.
13912          *
13913          * Stop the device from doing anything while we do a
13914          * reset.  We know there are no other active users of
13915          * the device since we are now in charge.  Turn off
13916          * off all outbound and inbound traffic and make sure
13917          * the device does not generate any interrupts.
13918          */
13919 
13920         /* disable send contexts and SDMA engines */
13921         write_csr(dd, SEND_CTRL, 0);
13922         for (i = 0; i < chip_send_contexts(dd); i++)
13923                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13924         for (i = 0; i < chip_sdma_engines(dd); i++)
13925                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13926         /* disable port (turn off RXE inbound traffic) and contexts */
13927         write_csr(dd, RCV_CTRL, 0);
13928         for (i = 0; i < chip_rcv_contexts(dd); i++)
13929                 write_csr(dd, RCV_CTXT_CTRL, 0);
13930         /* mask all interrupt sources */
13931         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13932                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13933 
13934         /*
13935          * DC Reset: do a full DC reset before the register clear.
13936          * A recommended length of time to hold is one CSR read,
13937          * so reread the CceDcCtrl.  Then, hold the DC in reset
13938          * across the clear.
13939          */
13940         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13941         (void)read_csr(dd, CCE_DC_CTRL);
13942 
13943         if (use_flr) {
13944                 /*
13945                  * A FLR will reset the SPC core and part of the PCIe.
13946                  * The parts that need to be restored have already been
13947                  * saved.
13948                  */
13949                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13950 
13951                 /* do the FLR, the DC reset will remain */
13952                 pcie_flr(dd->pcidev);
13953 
13954                 /* restore command and BARs */
13955                 ret = restore_pci_variables(dd);
13956                 if (ret) {
13957                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13958                                    __func__);
13959                         return ret;
13960                 }
13961 
13962                 if (is_ax(dd)) {
13963                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13964                         pcie_flr(dd->pcidev);
13965                         ret = restore_pci_variables(dd);
13966                         if (ret) {
13967                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13968                                            __func__);
13969                                 return ret;
13970                         }
13971                 }
13972         } else {
13973                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13974                 reset_cce_csrs(dd);
13975                 reset_txe_csrs(dd);
13976                 reset_rxe_csrs(dd);
13977                 reset_misc_csrs(dd);
13978         }
13979         /* clear the DC reset */
13980         write_csr(dd, CCE_DC_CTRL, 0);
13981 
13982         /* Set the LED off */
13983         setextled(dd, 0);
13984 
13985         /*
13986          * Clear the QSFP reset.
13987          * An FLR enforces a 0 on all out pins. The driver does not touch
13988          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13989          * anything plugged constantly in reset, if it pays attention
13990          * to RESET_N.
13991          * Prime examples of this are optical cables. Set all pins high.
13992          * I2CCLK and I2CDAT will change per direction, and INT_N and
13993          * MODPRS_N are input only and their value is ignored.
13994          */
13995         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13996         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13997         init_chip_resources(dd);
13998         return ret;
13999 }
14000 
14001 static void init_early_variables(struct hfi1_devdata *dd)
14002 {
14003         int i;
14004 
14005         /* assign link credit variables */
14006         dd->vau = CM_VAU;
14007         dd->link_credits = CM_GLOBAL_CREDITS;
14008         if (is_ax(dd))
14009                 dd->link_credits--;
14010         dd->vcu = cu_to_vcu(hfi1_cu);
14011         /* enough room for 8 MAD packets plus header - 17K */
14012         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14013         if (dd->vl15_init > dd->link_credits)
14014                 dd->vl15_init = dd->link_credits;
14015 
14016         write_uninitialized_csrs_and_memories(dd);
14017 
14018         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14019                 for (i = 0; i < dd->num_pports; i++) {
14020                         struct hfi1_pportdata *ppd = &dd->pport[i];
14021 
14022                         set_partition_keys(ppd);
14023                 }
14024         init_sc2vl_tables(dd);
14025 }
14026 
14027 static void init_kdeth_qp(struct hfi1_devdata *dd)
14028 {
14029         /* user changed the KDETH_QP */
14030         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14031                 /* out of range or illegal value */
14032                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14033                 kdeth_qp = 0;
14034         }
14035         if (kdeth_qp == 0)      /* not set, or failed range check */
14036                 kdeth_qp = DEFAULT_KDETH_QP;
14037 
14038         write_csr(dd, SEND_BTH_QP,
14039                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14040                   SEND_BTH_QP_KDETH_QP_SHIFT);
14041 
14042         write_csr(dd, RCV_BTH_QP,
14043                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14044                   RCV_BTH_QP_KDETH_QP_SHIFT);
14045 }
14046 
14047 /**
14048  * hfi1_get_qp_map
14049  * @dd: device data
14050  * @idx: index to read
14051  */
14052 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14053 {
14054         u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14055 
14056         reg >>= (idx % 8) * 8;
14057         return reg;
14058 }
14059 
14060 /**
14061  * init_qpmap_table
14062  * @dd - device data
14063  * @first_ctxt - first context
14064  * @last_ctxt - first context
14065  *
14066  * This return sets the qpn mapping table that
14067  * is indexed by qpn[8:1].
14068  *
14069  * The routine will round robin the 256 settings
14070  * from first_ctxt to last_ctxt.
14071  *
14072  * The first/last looks ahead to having specialized
14073  * receive contexts for mgmt and bypass.  Normal
14074  * verbs traffic will assumed to be on a range
14075  * of receive contexts.
14076  */
14077 static void init_qpmap_table(struct hfi1_devdata *dd,
14078                              u32 first_ctxt,
14079                              u32 last_ctxt)
14080 {
14081         u64 reg = 0;
14082         u64 regno = RCV_QP_MAP_TABLE;
14083         int i;
14084         u64 ctxt = first_ctxt;
14085 
14086         for (i = 0; i < 256; i++) {
14087                 reg |= ctxt << (8 * (i % 8));
14088                 ctxt++;
14089                 if (ctxt > last_ctxt)
14090                         ctxt = first_ctxt;
14091                 if (i % 8 == 7) {
14092                         write_csr(dd, regno, reg);
14093                         reg = 0;
14094                         regno += 8;
14095                 }
14096         }
14097 
14098         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14099                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14100 }
14101 
14102 struct rsm_map_table {
14103         u64 map[NUM_MAP_REGS];
14104         unsigned int used;
14105 };
14106 
14107 struct rsm_rule_data {
14108         u8 offset;
14109         u8 pkt_type;
14110         u32 field1_off;
14111         u32 field2_off;
14112         u32 index1_off;
14113         u32 index1_width;
14114         u32 index2_off;
14115         u32 index2_width;
14116         u32 mask1;
14117         u32 value1;
14118         u32 mask2;
14119         u32 value2;
14120 };
14121 
14122 /*
14123  * Return an initialized RMT map table for users to fill in.  OK if it
14124  * returns NULL, indicating no table.
14125  */
14126 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14127 {
14128         struct rsm_map_table *rmt;
14129         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14130 
14131         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14132         if (rmt) {
14133                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14134                 rmt->used = 0;
14135         }
14136 
14137         return rmt;
14138 }
14139 
14140 /*
14141  * Write the final RMT map table to the chip and free the table.  OK if
14142  * table is NULL.
14143  */
14144 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14145                                    struct rsm_map_table *rmt)
14146 {
14147         int i;
14148 
14149         if (rmt) {
14150                 /* write table to chip */
14151                 for (i = 0; i < NUM_MAP_REGS; i++)
14152                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14153 
14154                 /* enable RSM */
14155                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14156         }
14157 }
14158 
14159 /*
14160  * Add a receive side mapping rule.
14161  */
14162 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14163                          struct rsm_rule_data *rrd)
14164 {
14165         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14166                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14167                   1ull << rule_index | /* enable bit */
14168                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14169         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14170                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14171                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14172                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14173                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14174                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14175                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14176         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14177                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14178                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14179                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14180                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14181 }
14182 
14183 /*
14184  * Clear a receive side mapping rule.
14185  */
14186 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14187 {
14188         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14189         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14190         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14191 }
14192 
14193 /* return the number of RSM map table entries that will be used for QOS */
14194 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14195                            unsigned int *np)
14196 {
14197         int i;
14198         unsigned int m, n;
14199         u8 max_by_vl = 0;
14200 
14201         /* is QOS active at all? */
14202         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14203             num_vls == 1 ||
14204             krcvqsset <= 1)
14205                 goto no_qos;
14206 
14207         /* determine bits for qpn */
14208         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14209                 if (krcvqs[i] > max_by_vl)
14210                         max_by_vl = krcvqs[i];
14211         if (max_by_vl > 32)
14212                 goto no_qos;
14213         m = ilog2(__roundup_pow_of_two(max_by_vl));
14214 
14215         /* determine bits for vl */
14216         n = ilog2(__roundup_pow_of_two(num_vls));
14217 
14218         /* reject if too much is used */
14219         if ((m + n) > 7)
14220                 goto no_qos;
14221 
14222         if (mp)
14223                 *mp = m;
14224         if (np)
14225                 *np = n;
14226 
14227         return 1 << (m + n);
14228 
14229 no_qos:
14230         if (mp)
14231                 *mp = 0;
14232         if (np)
14233                 *np = 0;
14234         return 0;
14235 }
14236 
14237 /**
14238  * init_qos - init RX qos
14239  * @dd - device data
14240  * @rmt - RSM map table
14241  *
14242  * This routine initializes Rule 0 and the RSM map table to implement
14243  * quality of service (qos).
14244  *
14245  * If all of the limit tests succeed, qos is applied based on the array
14246  * interpretation of krcvqs where entry 0 is VL0.
14247  *
14248  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14249  * feed both the RSM map table and the single rule.
14250  */
14251 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14252 {
14253         struct rsm_rule_data rrd;
14254         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14255         unsigned int rmt_entries;
14256         u64 reg;
14257 
14258         if (!rmt)
14259                 goto bail;
14260         rmt_entries = qos_rmt_entries(dd, &m, &n);
14261         if (rmt_entries == 0)
14262                 goto bail;
14263         qpns_per_vl = 1 << m;
14264 
14265         /* enough room in the map table? */
14266         rmt_entries = 1 << (m + n);
14267         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14268                 goto bail;
14269 
14270         /* add qos entries to the the RSM map table */
14271         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14272                 unsigned tctxt;
14273 
14274                 for (qpn = 0, tctxt = ctxt;
14275                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14276                         unsigned idx, regoff, regidx;
14277 
14278                         /* generate the index the hardware will produce */
14279                         idx = rmt->used + ((qpn << n) ^ i);
14280                         regoff = (idx % 8) * 8;
14281                         regidx = idx / 8;
14282                         /* replace default with context number */
14283                         reg = rmt->map[regidx];
14284                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14285                                 << regoff);
14286                         reg |= (u64)(tctxt++) << regoff;
14287                         rmt->map[regidx] = reg;
14288                         if (tctxt == ctxt + krcvqs[i])
14289                                 tctxt = ctxt;
14290                 }
14291                 ctxt += krcvqs[i];
14292         }
14293 
14294         rrd.offset = rmt->used;
14295         rrd.pkt_type = 2;
14296         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14297         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14298         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14299         rrd.index1_width = n;
14300         rrd.index2_off = QPN_SELECT_OFFSET;
14301         rrd.index2_width = m + n;
14302         rrd.mask1 = LRH_BTH_MASK;
14303         rrd.value1 = LRH_BTH_VALUE;
14304         rrd.mask2 = LRH_SC_MASK;
14305         rrd.value2 = LRH_SC_VALUE;
14306 
14307         /* add rule 0 */
14308         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14309 
14310         /* mark RSM map entries as used */
14311         rmt->used += rmt_entries;
14312         /* map everything else to the mcast/err/vl15 context */
14313         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14314         dd->qos_shift = n + 1;
14315         return;
14316 bail:
14317         dd->qos_shift = 1;
14318         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14319 }
14320 
14321 static void init_fecn_handling(struct hfi1_devdata *dd,
14322                                struct rsm_map_table *rmt)
14323 {
14324         struct rsm_rule_data rrd;
14325         u64 reg;
14326         int i, idx, regoff, regidx, start;
14327         u8 offset;
14328         u32 total_cnt;
14329 
14330         if (HFI1_CAP_IS_KSET(TID_RDMA))
14331                 /* Exclude context 0 */
14332                 start = 1;
14333         else
14334                 start = dd->first_dyn_alloc_ctxt;
14335 
14336         total_cnt = dd->num_rcv_contexts - start;
14337 
14338         /* there needs to be enough room in the map table */
14339         if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14340                 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14341                 return;
14342         }
14343 
14344         /*
14345          * RSM will extract the destination context as an index into the
14346          * map table.  The destination contexts are a sequential block
14347          * in the range start...num_rcv_contexts-1 (inclusive).
14348          * Map entries are accessed as offset + extracted value.  Adjust
14349          * the added offset so this sequence can be placed anywhere in
14350          * the table - as long as the entries themselves do not wrap.
14351          * There are only enough bits in offset for the table size, so
14352          * start with that to allow for a "negative" offset.
14353          */
14354         offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14355 
14356         for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14357              i++, idx++) {
14358                 /* replace with identity mapping */
14359                 regoff = (idx % 8) * 8;
14360                 regidx = idx / 8;
14361                 reg = rmt->map[regidx];
14362                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14363                 reg |= (u64)i << regoff;
14364                 rmt->map[regidx] = reg;
14365         }
14366 
14367         /*
14368          * For RSM intercept of Expected FECN packets:
14369          * o packet type 0 - expected
14370          * o match on F (bit 95), using select/match 1, and
14371          * o match on SH (bit 133), using select/match 2.
14372          *
14373          * Use index 1 to extract the 8-bit receive context from DestQP
14374          * (start at bit 64).  Use that as the RSM map table index.
14375          */
14376         rrd.offset = offset;
14377         rrd.pkt_type = 0;
14378         rrd.field1_off = 95;
14379         rrd.field2_off = 133;
14380         rrd.index1_off = 64;
14381         rrd.index1_width = 8;
14382         rrd.index2_off = 0;
14383         rrd.index2_width = 0;
14384         rrd.mask1 = 1;
14385         rrd.value1 = 1;
14386         rrd.mask2 = 1;
14387         rrd.value2 = 1;
14388 
14389         /* add rule 1 */
14390         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14391 
14392         rmt->used += total_cnt;
14393 }
14394 
14395 /* Initialize RSM for VNIC */
14396 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14397 {
14398         u8 i, j;
14399         u8 ctx_id = 0;
14400         u64 reg;
14401         u32 regoff;
14402         struct rsm_rule_data rrd;
14403 
14404         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14405                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14406                            dd->vnic.rmt_start);
14407                 return;
14408         }
14409 
14410         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14411                 dd->vnic.rmt_start,
14412                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14413 
14414         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14415         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14416         reg = read_csr(dd, regoff);
14417         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14418                 /* Update map register with vnic context */
14419                 j = (dd->vnic.rmt_start + i) % 8;
14420                 reg &= ~(0xffllu << (j * 8));
14421                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14422                 /* Wrap up vnic ctx index */
14423                 ctx_id %= dd->vnic.num_ctxt;
14424                 /* Write back map register */
14425                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14426                         dev_dbg(&(dd)->pcidev->dev,
14427                                 "Vnic rsm map reg[%d] =0x%llx\n",
14428                                 regoff - RCV_RSM_MAP_TABLE, reg);
14429 
14430                         write_csr(dd, regoff, reg);
14431                         regoff += 8;
14432                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14433                                 reg = read_csr(dd, regoff);
14434                 }
14435         }
14436 
14437         /* Add rule for vnic */
14438         rrd.offset = dd->vnic.rmt_start;
14439         rrd.pkt_type = 4;
14440         /* Match 16B packets */
14441         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14442         rrd.mask1 = L2_TYPE_MASK;
14443         rrd.value1 = L2_16B_VALUE;
14444         /* Match ETH L4 packets */
14445         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14446         rrd.mask2 = L4_16B_TYPE_MASK;
14447         rrd.value2 = L4_16B_ETH_VALUE;
14448         /* Calc context from veswid and entropy */
14449         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14450         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14451         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14452         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14453         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14454 
14455         /* Enable RSM if not already enabled */
14456         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14457 }
14458 
14459 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14460 {
14461         clear_rsm_rule(dd, RSM_INS_VNIC);
14462 
14463         /* Disable RSM if used only by vnic */
14464         if (dd->vnic.rmt_start == 0)
14465                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14466 }
14467 
14468 static int init_rxe(struct hfi1_devdata *dd)
14469 {
14470         struct rsm_map_table *rmt;
14471         u64 val;
14472 
14473         /* enable all receive errors */
14474         write_csr(dd, RCV_ERR_MASK, ~0ull);
14475 
14476         rmt = alloc_rsm_map_table(dd);
14477         if (!rmt)
14478                 return -ENOMEM;
14479 
14480         /* set up QOS, including the QPN map table */
14481         init_qos(dd, rmt);
14482         init_fecn_handling(dd, rmt);
14483         complete_rsm_map_table(dd, rmt);
14484         /* record number of used rsm map entries for vnic */
14485         dd->vnic.rmt_start = rmt->used;
14486         kfree(rmt);
14487 
14488         /*
14489          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14490          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14491          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14492          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14493          * Max_PayLoad_Size set to its minimum of 128.
14494          *
14495          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14496          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14497          * tune_pcie_caps() which is called after this routine.
14498          */
14499 
14500         /* Have 16 bytes (4DW) of bypass header available in header queue */
14501         val = read_csr(dd, RCV_BYPASS);
14502         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14503         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14504                 RCV_BYPASS_HDR_SIZE_SHIFT);
14505         write_csr(dd, RCV_BYPASS, val);
14506         return 0;
14507 }
14508 
14509 static void init_other(struct hfi1_devdata *dd)
14510 {
14511         /* enable all CCE errors */
14512         write_csr(dd, CCE_ERR_MASK, ~0ull);
14513         /* enable *some* Misc errors */
14514         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14515         /* enable all DC errors, except LCB */
14516         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14517         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14518 }
14519 
14520 /*
14521  * Fill out the given AU table using the given CU.  A CU is defined in terms
14522  * AUs.  The table is a an encoding: given the index, how many AUs does that
14523  * represent?
14524  *
14525  * NOTE: Assumes that the register layout is the same for the
14526  * local and remote tables.
14527  */
14528 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14529                                u32 csr0to3, u32 csr4to7)
14530 {
14531         write_csr(dd, csr0to3,
14532                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14533                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14534                   2ull * cu <<
14535                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14536                   4ull * cu <<
14537                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14538         write_csr(dd, csr4to7,
14539                   8ull * cu <<
14540                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14541                   16ull * cu <<
14542                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14543                   32ull * cu <<
14544                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14545                   64ull * cu <<
14546                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14547 }
14548 
14549 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14550 {
14551         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14552                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14553 }
14554 
14555 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14556 {
14557         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14558                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14559 }
14560 
14561 static void init_txe(struct hfi1_devdata *dd)
14562 {
14563         int i;
14564 
14565         /* enable all PIO, SDMA, general, and Egress errors */
14566         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14567         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14568         write_csr(dd, SEND_ERR_MASK, ~0ull);
14569         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14570 
14571         /* enable all per-context and per-SDMA engine errors */
14572         for (i = 0; i < chip_send_contexts(dd); i++)
14573                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14574         for (i = 0; i < chip_sdma_engines(dd); i++)
14575                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14576 
14577         /* set the local CU to AU mapping */
14578         assign_local_cm_au_table(dd, dd->vcu);
14579 
14580         /*
14581          * Set reasonable default for Credit Return Timer
14582          * Don't set on Simulator - causes it to choke.
14583          */
14584         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14585                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14586 }
14587 
14588 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14589                        u16 jkey)
14590 {
14591         u8 hw_ctxt;
14592         u64 reg;
14593 
14594         if (!rcd || !rcd->sc)
14595                 return -EINVAL;
14596 
14597         hw_ctxt = rcd->sc->hw_context;
14598         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14599                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14600                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14601         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14602         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14603                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14604         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14605         /*
14606          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14607          */
14608         if (!is_ax(dd)) {
14609                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14610                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14611                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14612         }
14613 
14614         /* Enable J_KEY check on receive context. */
14615         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14616                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14617                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14618         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14619 
14620         return 0;
14621 }
14622 
14623 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14624 {
14625         u8 hw_ctxt;
14626         u64 reg;
14627 
14628         if (!rcd || !rcd->sc)
14629                 return -EINVAL;
14630 
14631         hw_ctxt = rcd->sc->hw_context;
14632         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14633         /*
14634          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14635          * This check would not have been enabled for A0 h/w, see
14636          * set_ctxt_jkey().
14637          */
14638         if (!is_ax(dd)) {
14639                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14640                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14641                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14642         }
14643         /* Turn off the J_KEY on the receive side */
14644         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14645 
14646         return 0;
14647 }
14648 
14649 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14650                        u16 pkey)
14651 {
14652         u8 hw_ctxt;
14653         u64 reg;
14654 
14655         if (!rcd || !rcd->sc)
14656                 return -EINVAL;
14657 
14658         hw_ctxt = rcd->sc->hw_context;
14659         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14660                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14661         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14662         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14663         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14664         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14665         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14666 
14667         return 0;
14668 }
14669 
14670 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14671 {
14672         u8 hw_ctxt;
14673         u64 reg;
14674 
14675         if (!ctxt || !ctxt->sc)
14676                 return -EINVAL;
14677 
14678         hw_ctxt = ctxt->sc->hw_context;
14679         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14680         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14681         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14682         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14683 
14684         return 0;
14685 }
14686 
14687 /*
14688  * Start doing the clean up the the chip. Our clean up happens in multiple
14689  * stages and this is just the first.
14690  */
14691 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14692 {
14693         aspm_exit(dd);
14694         free_cntrs(dd);
14695         free_rcverr(dd);
14696         finish_chip_resources(dd);
14697 }
14698 
14699 #define HFI_BASE_GUID(dev) \
14700         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14701 
14702 /*
14703  * Information can be shared between the two HFIs on the same ASIC
14704  * in the same OS.  This function finds the peer device and sets
14705  * up a shared structure.
14706  */
14707 static int init_asic_data(struct hfi1_devdata *dd)
14708 {
14709         unsigned long index;
14710         struct hfi1_devdata *peer;
14711         struct hfi1_asic_data *asic_data;
14712         int ret = 0;
14713 
14714         /* pre-allocate the asic structure in case we are the first device */
14715         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14716         if (!asic_data)
14717                 return -ENOMEM;
14718 
14719         xa_lock_irq(&hfi1_dev_table);
14720         /* Find our peer device */
14721         xa_for_each(&hfi1_dev_table, index, peer) {
14722                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14723                     dd->unit != peer->unit)
14724                         break;
14725         }
14726 
14727         if (peer) {
14728                 /* use already allocated structure */
14729                 dd->asic_data = peer->asic_data;
14730                 kfree(asic_data);
14731         } else {
14732                 dd->asic_data = asic_data;
14733                 mutex_init(&dd->asic_data->asic_resource_mutex);
14734         }
14735         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14736         xa_unlock_irq(&hfi1_dev_table);
14737 
14738         /* first one through - set up i2c devices */
14739         if (!peer)
14740                 ret = set_up_i2c(dd, dd->asic_data);
14741 
14742         return ret;
14743 }
14744 
14745 /*
14746  * Set dd->boardname.  Use a generic name if a name is not returned from
14747  * EFI variable space.
14748  *
14749  * Return 0 on success, -ENOMEM if space could not be allocated.
14750  */
14751 static int obtain_boardname(struct hfi1_devdata *dd)
14752 {
14753         /* generic board description */
14754         const char generic[] =
14755                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14756         unsigned long size;
14757         int ret;
14758 
14759         ret = read_hfi1_efi_var(dd, "description", &size,
14760                                 (void **)&dd->boardname);
14761         if (ret) {
14762                 dd_dev_info(dd, "Board description not found\n");
14763                 /* use generic description */
14764                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14765                 if (!dd->boardname)
14766                         return -ENOMEM;
14767         }
14768         return 0;
14769 }
14770 
14771 /*
14772  * Check the interrupt registers to make sure that they are mapped correctly.
14773  * It is intended to help user identify any mismapping by VMM when the driver
14774  * is running in a VM. This function should only be called before interrupt
14775  * is set up properly.
14776  *
14777  * Return 0 on success, -EINVAL on failure.
14778  */
14779 static int check_int_registers(struct hfi1_devdata *dd)
14780 {
14781         u64 reg;
14782         u64 all_bits = ~(u64)0;
14783         u64 mask;
14784 
14785         /* Clear CceIntMask[0] to avoid raising any interrupts */
14786         mask = read_csr(dd, CCE_INT_MASK);
14787         write_csr(dd, CCE_INT_MASK, 0ull);
14788         reg = read_csr(dd, CCE_INT_MASK);
14789         if (reg)
14790                 goto err_exit;
14791 
14792         /* Clear all interrupt status bits */
14793         write_csr(dd, CCE_INT_CLEAR, all_bits);
14794         reg = read_csr(dd, CCE_INT_STATUS);
14795         if (reg)
14796                 goto err_exit;
14797 
14798         /* Set all interrupt status bits */
14799         write_csr(dd, CCE_INT_FORCE, all_bits);
14800         reg = read_csr(dd, CCE_INT_STATUS);
14801         if (reg != all_bits)
14802                 goto err_exit;
14803 
14804         /* Restore the interrupt mask */
14805         write_csr(dd, CCE_INT_CLEAR, all_bits);
14806         write_csr(dd, CCE_INT_MASK, mask);
14807 
14808         return 0;
14809 err_exit:
14810         write_csr(dd, CCE_INT_MASK, mask);
14811         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14812         return -EINVAL;
14813 }
14814 
14815 /**
14816  * hfi1_init_dd() - Initialize most of the dd structure.
14817  * @dev: the pci_dev for hfi1_ib device
14818  * @ent: pci_device_id struct for this dev
14819  *
14820  * This is global, and is called directly at init to set up the
14821  * chip-specific function pointers for later use.
14822  */
14823 int hfi1_init_dd(struct hfi1_devdata *dd)
14824 {
14825         struct pci_dev *pdev = dd->pcidev;
14826         struct hfi1_pportdata *ppd;
14827         u64 reg;
14828         int i, ret;
14829         static const char * const inames[] = { /* implementation names */
14830                 "RTL silicon",
14831                 "RTL VCS simulation",
14832                 "RTL FPGA emulation",
14833                 "Functional simulator"
14834         };
14835         struct pci_dev *parent = pdev->bus->self;
14836         u32 sdma_engines = chip_sdma_engines(dd);
14837 
14838         ppd = dd->pport;
14839         for (i = 0; i < dd->num_pports; i++, ppd++) {
14840                 int vl;
14841                 /* init common fields */
14842                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14843                 /* DC supports 4 link widths */
14844                 ppd->link_width_supported =
14845                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14846                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14847                 ppd->link_width_downgrade_supported =
14848                         ppd->link_width_supported;
14849                 /* start out enabling only 4X */
14850                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14851                 ppd->link_width_downgrade_enabled =
14852                                         ppd->link_width_downgrade_supported;
14853                 /* link width active is 0 when link is down */
14854                 /* link width downgrade active is 0 when link is down */
14855 
14856                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14857                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14858                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14859                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14860                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14861                 }
14862                 ppd->vls_supported = num_vls;
14863                 ppd->vls_operational = ppd->vls_supported;
14864                 /* Set the default MTU. */
14865                 for (vl = 0; vl < num_vls; vl++)
14866                         dd->vld[vl].mtu = hfi1_max_mtu;
14867                 dd->vld[15].mtu = MAX_MAD_PACKET;
14868                 /*
14869                  * Set the initial values to reasonable default, will be set
14870                  * for real when link is up.
14871                  */
14872                 ppd->overrun_threshold = 0x4;
14873                 ppd->phy_error_threshold = 0xf;
14874                 ppd->port_crc_mode_enabled = link_crc_mask;
14875                 /* initialize supported LTP CRC mode */
14876                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14877                 /* initialize enabled LTP CRC mode */
14878                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14879                 /* start in offline */
14880                 ppd->host_link_state = HLS_DN_OFFLINE;
14881                 init_vl_arb_caches(ppd);
14882         }
14883 
14884         /*
14885          * Do remaining PCIe setup and save PCIe values in dd.
14886          * Any error printing is already done by the init code.
14887          * On return, we have the chip mapped.
14888          */
14889         ret = hfi1_pcie_ddinit(dd, pdev);
14890         if (ret < 0)
14891                 goto bail_free;
14892 
14893         /* Save PCI space registers to rewrite after device reset */
14894         ret = save_pci_variables(dd);
14895         if (ret < 0)
14896                 goto bail_cleanup;
14897 
14898         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14899                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14900         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14901                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14902 
14903         /*
14904          * Check interrupt registers mapping if the driver has no access to
14905          * the upstream component. In this case, it is likely that the driver
14906          * is running in a VM.
14907          */
14908         if (!parent) {
14909                 ret = check_int_registers(dd);
14910                 if (ret)
14911                         goto bail_cleanup;
14912         }
14913 
14914         /*
14915          * obtain the hardware ID - NOT related to unit, which is a
14916          * software enumeration
14917          */
14918         reg = read_csr(dd, CCE_REVISION2);
14919         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14920                                         & CCE_REVISION2_HFI_ID_MASK;
14921         /* the variable size will remove unwanted bits */
14922         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14923         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14924         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14925                     dd->icode < ARRAY_SIZE(inames) ?
14926                     inames[dd->icode] : "unknown", (int)dd->irev);
14927 
14928         /* speeds the hardware can support */
14929         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14930         /* speeds allowed to run at */
14931         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14932         /* give a reasonable active value, will be set on link up */
14933         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14934 
14935         /* fix up link widths for emulation _p */
14936         ppd = dd->pport;
14937         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14938                 ppd->link_width_supported =
14939                         ppd->link_width_enabled =
14940                         ppd->link_width_downgrade_supported =
14941                         ppd->link_width_downgrade_enabled =
14942                                 OPA_LINK_WIDTH_1X;
14943         }
14944         /* insure num_vls isn't larger than number of sdma engines */
14945         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14946                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14947                            num_vls, sdma_engines);
14948                 num_vls = sdma_engines;
14949                 ppd->vls_supported = sdma_engines;
14950                 ppd->vls_operational = ppd->vls_supported;
14951         }
14952 
14953         /*
14954          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14955          * Limit the max if larger than the field holds.  If timeout is
14956          * non-zero, then the calculated field will be at least 1.
14957          *
14958          * Must be after icode is set up - the cclock rate depends
14959          * on knowing the hardware being used.
14960          */
14961         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14962         if (dd->rcv_intr_timeout_csr >
14963                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14964                 dd->rcv_intr_timeout_csr =
14965                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14966         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14967                 dd->rcv_intr_timeout_csr = 1;
14968 
14969         /* needs to be done before we look for the peer device */
14970         read_guid(dd);
14971 
14972         /* set up shared ASIC data with peer device */
14973         ret = init_asic_data(dd);
14974         if (ret)
14975                 goto bail_cleanup;
14976 
14977         /* obtain chip sizes, reset chip CSRs */
14978         ret = init_chip(dd);
14979         if (ret)
14980                 goto bail_cleanup;
14981 
14982         /* read in the PCIe link speed information */
14983         ret = pcie_speeds(dd);
14984         if (ret)
14985                 goto bail_cleanup;
14986 
14987         /* call before get_platform_config(), after init_chip_resources() */
14988         ret = eprom_init(dd);
14989         if (ret)
14990                 goto bail_free_rcverr;
14991 
14992         /* Needs to be called before hfi1_firmware_init */
14993         get_platform_config(dd);
14994 
14995         /* read in firmware */
14996         ret = hfi1_firmware_init(dd);
14997         if (ret)
14998                 goto bail_cleanup;
14999 
15000         /*
15001          * In general, the PCIe Gen3 transition must occur after the
15002          * chip has been idled (so it won't initiate any PCIe transactions
15003          * e.g. an interrupt) and before the driver changes any registers
15004          * (the transition will reset the registers).
15005          *
15006          * In particular, place this call after:
15007          * - init_chip()     - the chip will not initiate any PCIe transactions
15008          * - pcie_speeds()   - reads the current link speed
15009          * - hfi1_firmware_init() - the needed firmware is ready to be
15010          *                          downloaded
15011          */
15012         ret = do_pcie_gen3_transition(dd);
15013         if (ret)
15014                 goto bail_cleanup;
15015 
15016         /*
15017          * This should probably occur in hfi1_pcie_init(), but historically
15018          * occurs after the do_pcie_gen3_transition() code.
15019          */
15020         tune_pcie_caps(dd);
15021 
15022         /* start setting dd values and adjusting CSRs */
15023         init_early_variables(dd);
15024 
15025         parse_platform_config(dd);
15026 
15027         ret = obtain_boardname(dd);
15028         if (ret)
15029                 goto bail_cleanup;
15030 
15031         snprintf(dd->boardversion, BOARD_VERS_MAX,
15032                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15033                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15034                  (u32)dd->majrev,
15035                  (u32)dd->minrev,
15036                  (dd->revision >> CCE_REVISION_SW_SHIFT)
15037                     & CCE_REVISION_SW_MASK);
15038 
15039         ret = set_up_context_variables(dd);
15040         if (ret)
15041                 goto bail_cleanup;
15042 
15043         /* set initial RXE CSRs */
15044         ret = init_rxe(dd);
15045         if (ret)
15046                 goto bail_cleanup;
15047 
15048         /* set initial TXE CSRs */
15049         init_txe(dd);
15050         /* set initial non-RXE, non-TXE CSRs */
15051         init_other(dd);
15052         /* set up KDETH QP prefix in both RX and TX CSRs */
15053         init_kdeth_qp(dd);
15054 
15055         ret = hfi1_dev_affinity_init(dd);
15056         if (ret)
15057                 goto bail_cleanup;
15058 
15059         /* send contexts must be set up before receive contexts */
15060         ret = init_send_contexts(dd);
15061         if (ret)
15062                 goto bail_cleanup;
15063 
15064         ret = hfi1_create_kctxts(dd);
15065         if (ret)
15066                 goto bail_cleanup;
15067 
15068         /*
15069          * Initialize aspm, to be done after gen3 transition and setting up
15070          * contexts and before enabling interrupts
15071          */
15072         aspm_init(dd);
15073 
15074         ret = init_pervl_scs(dd);
15075         if (ret)
15076                 goto bail_cleanup;
15077 
15078         /* sdma init */
15079         for (i = 0; i < dd->num_pports; ++i) {
15080                 ret = sdma_init(dd, i);
15081                 if (ret)
15082                         goto bail_cleanup;
15083         }
15084 
15085         /* use contexts created by hfi1_create_kctxts */
15086         ret = set_up_interrupts(dd);
15087         if (ret)
15088                 goto bail_cleanup;
15089 
15090         ret = hfi1_comp_vectors_set_up(dd);
15091         if (ret)
15092                 goto bail_clear_intr;
15093 
15094         /* set up LCB access - must be after set_up_interrupts() */
15095         init_lcb_access(dd);
15096 
15097         /*
15098          * Serial number is created from the base guid:
15099          * [27:24] = base guid [38:35]
15100          * [23: 0] = base guid [23: 0]
15101          */
15102         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15103                  (dd->base_guid & 0xFFFFFF) |
15104                      ((dd->base_guid >> 11) & 0xF000000));
15105 
15106         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15107         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15108         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15109 
15110         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15111         if (ret)
15112                 goto bail_clear_intr;
15113 
15114         thermal_init(dd);
15115 
15116         ret = init_cntrs(dd);
15117         if (ret)
15118                 goto bail_clear_intr;
15119 
15120         ret = init_rcverr(dd);
15121         if (ret)
15122                 goto bail_free_cntrs;
15123 
15124         init_completion(&dd->user_comp);
15125 
15126         /* The user refcount starts with one to inidicate an active device */
15127         atomic_set(&dd->user_refcount, 1);
15128 
15129         goto bail;
15130 
15131 bail_free_rcverr:
15132         free_rcverr(dd);
15133 bail_free_cntrs:
15134         free_cntrs(dd);
15135 bail_clear_intr:
15136         hfi1_comp_vectors_clean_up(dd);
15137         msix_clean_up_interrupts(dd);
15138 bail_cleanup:
15139         hfi1_pcie_ddcleanup(dd);
15140 bail_free:
15141         hfi1_free_devdata(dd);
15142 bail:
15143         return ret;
15144 }
15145 
15146 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15147                         u32 dw_len)
15148 {
15149         u32 delta_cycles;
15150         u32 current_egress_rate = ppd->current_egress_rate;
15151         /* rates here are in units of 10^6 bits/sec */
15152 
15153         if (desired_egress_rate == -1)
15154                 return 0; /* shouldn't happen */
15155 
15156         if (desired_egress_rate >= current_egress_rate)
15157                 return 0; /* we can't help go faster, only slower */
15158 
15159         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15160                         egress_cycles(dw_len * 4, current_egress_rate);
15161 
15162         return (u16)delta_cycles;
15163 }
15164 
15165 /**
15166  * create_pbc - build a pbc for transmission
15167  * @flags: special case flags or-ed in built pbc
15168  * @srate: static rate
15169  * @vl: vl
15170  * @dwlen: dword length (header words + data words + pbc words)
15171  *
15172  * Create a PBC with the given flags, rate, VL, and length.
15173  *
15174  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15175  * for verbs, which does not use this PSM feature.  The lone other caller
15176  * is for the diagnostic interface which calls this if the user does not
15177  * supply their own PBC.
15178  */
15179 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15180                u32 dw_len)
15181 {
15182         u64 pbc, delay = 0;
15183 
15184         if (unlikely(srate_mbs))
15185                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15186 
15187         pbc = flags
15188                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15189                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15190                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15191                 | (dw_len & PBC_LENGTH_DWS_MASK)
15192                         << PBC_LENGTH_DWS_SHIFT;
15193 
15194         return pbc;
15195 }
15196 
15197 #define SBUS_THERMAL    0x4f
15198 #define SBUS_THERM_MONITOR_MODE 0x1
15199 
15200 #define THERM_FAILURE(dev, ret, reason) \
15201         dd_dev_err((dd),                                                \
15202                    "Thermal sensor initialization failed: %s (%d)\n",   \
15203                    (reason), (ret))
15204 
15205 /*
15206  * Initialize the thermal sensor.
15207  *
15208  * After initialization, enable polling of thermal sensor through
15209  * SBus interface. In order for this to work, the SBus Master
15210  * firmware has to be loaded due to the fact that the HW polling
15211  * logic uses SBus interrupts, which are not supported with
15212  * default firmware. Otherwise, no data will be returned through
15213  * the ASIC_STS_THERM CSR.
15214  */
15215 static int thermal_init(struct hfi1_devdata *dd)
15216 {
15217         int ret = 0;
15218 
15219         if (dd->icode != ICODE_RTL_SILICON ||
15220             check_chip_resource(dd, CR_THERM_INIT, NULL))
15221                 return ret;
15222 
15223         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15224         if (ret) {
15225                 THERM_FAILURE(dd, ret, "Acquire SBus");
15226                 return ret;
15227         }
15228 
15229         dd_dev_info(dd, "Initializing thermal sensor\n");
15230         /* Disable polling of thermal readings */
15231         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15232         msleep(100);
15233         /* Thermal Sensor Initialization */
15234         /*    Step 1: Reset the Thermal SBus Receiver */
15235         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15236                                 RESET_SBUS_RECEIVER, 0);
15237         if (ret) {
15238                 THERM_FAILURE(dd, ret, "Bus Reset");
15239                 goto done;
15240         }
15241         /*    Step 2: Set Reset bit in Thermal block */
15242         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15243                                 WRITE_SBUS_RECEIVER, 0x1);
15244         if (ret) {
15245                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15246                 goto done;
15247         }
15248         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15249         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15250                                 WRITE_SBUS_RECEIVER, 0x32);
15251         if (ret) {
15252                 THERM_FAILURE(dd, ret, "Write Clock Div");
15253                 goto done;
15254         }
15255         /*    Step 4: Select temperature mode */
15256         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15257                                 WRITE_SBUS_RECEIVER,
15258                                 SBUS_THERM_MONITOR_MODE);
15259         if (ret) {
15260                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15261                 goto done;
15262         }
15263         /*    Step 5: De-assert block reset and start conversion */
15264         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15265                                 WRITE_SBUS_RECEIVER, 0x2);
15266         if (ret) {
15267                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15268                 goto done;
15269         }
15270         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15271         msleep(22);
15272 
15273         /* Enable polling of thermal readings */
15274         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15275 
15276         /* Set initialized flag */
15277         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15278         if (ret)
15279                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15280 
15281 done:
15282         release_chip_resource(dd, CR_SBUS);
15283         return ret;
15284 }
15285 
15286 static void handle_temp_err(struct hfi1_devdata *dd)
15287 {
15288         struct hfi1_pportdata *ppd = &dd->pport[0];
15289         /*
15290          * Thermal Critical Interrupt
15291          * Put the device into forced freeze mode, take link down to
15292          * offline, and put DC into reset.
15293          */
15294         dd_dev_emerg(dd,
15295                      "Critical temperature reached! Forcing device into freeze mode!\n");
15296         dd->flags |= HFI1_FORCED_FREEZE;
15297         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15298         /*
15299          * Shut DC down as much and as quickly as possible.
15300          *
15301          * Step 1: Take the link down to OFFLINE. This will cause the
15302          *         8051 to put the Serdes in reset. However, we don't want to
15303          *         go through the entire link state machine since we want to
15304          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15305          *         but rather an attempt to save the chip.
15306          *         Code below is almost the same as quiet_serdes() but avoids
15307          *         all the extra work and the sleeps.
15308          */
15309         ppd->driver_link_ready = 0;
15310         ppd->link_enabled = 0;
15311         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15312                                 PLS_OFFLINE);
15313         /*
15314          * Step 2: Shutdown LCB and 8051
15315          *         After shutdown, do not restore DC_CFG_RESET value.
15316          */
15317         dc_shutdown(dd);
15318 }

/* [<][>][^][v][top][bottom][index][help] */