Searched refs:p_hwfn (Results 1 - 21 of 21) sorted by relevance

/linux-4.4.14/drivers/net/ethernet/qlogic/qed/
H A Dqed_dev.c43 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_init_dp() local
45 p_hwfn->dp_level = dp_level; qed_init_dp()
46 p_hwfn->dp_module = dp_module; qed_init_dp()
55 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_init_struct() local
57 p_hwfn->cdev = cdev; qed_init_struct()
58 p_hwfn->my_id = i; qed_init_struct()
59 p_hwfn->b_active = false; qed_init_struct()
61 mutex_init(&p_hwfn->dmae_info.mutex); qed_init_struct()
71 static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qed_qm_info_free() argument
73 struct qed_qm_info *qm_info = &p_hwfn->qm_info; qed_qm_info_free()
93 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
95 kfree(p_hwfn->p_tx_cids); for_each_hwfn()
96 p_hwfn->p_tx_cids = NULL; for_each_hwfn()
97 kfree(p_hwfn->p_rx_cids); for_each_hwfn()
98 p_hwfn->p_rx_cids = NULL; for_each_hwfn()
102 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
104 qed_cxt_mngr_free(p_hwfn); for_each_hwfn()
105 qed_qm_info_free(p_hwfn); for_each_hwfn()
106 qed_spq_free(p_hwfn); for_each_hwfn()
107 qed_eq_free(p_hwfn, p_hwfn->p_eq); for_each_hwfn()
108 qed_consq_free(p_hwfn, p_hwfn->p_consq); for_each_hwfn()
109 qed_int_free(p_hwfn); for_each_hwfn()
110 qed_dmae_info_free(p_hwfn); for_each_hwfn()
114 static int qed_init_qm_info(struct qed_hwfn *p_hwfn) qed_init_qm_info() argument
116 struct qed_qm_info *qm_info = &p_hwfn->qm_info; qed_init_qm_info()
124 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); qed_init_qm_info()
127 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { qed_init_qm_info()
128 DP_ERR(p_hwfn, qed_init_qm_info()
130 num_pqs, RESC_NUM(p_hwfn, QED_PQ)); qed_init_qm_info()
151 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); qed_init_qm_info()
158 params->tc_id = p_hwfn->hw_info.non_offload_tc; qed_init_qm_info()
164 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); qed_init_qm_info()
174 num_ports = p_hwfn->cdev->num_ports_in_engines; qed_init_qm_info()
185 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); qed_init_qm_info()
187 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); qed_init_qm_info()
196 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); qed_init_qm_info()
216 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
218 RESC_NUM(p_hwfn, QED_L2_QUEUE); for_each_hwfn()
220 RESC_NUM(p_hwfn, QED_L2_QUEUE); for_each_hwfn()
222 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL); for_each_hwfn()
223 if (!p_hwfn->p_tx_cids) { for_each_hwfn()
224 DP_NOTICE(p_hwfn, for_each_hwfn()
230 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); for_each_hwfn()
231 if (!p_hwfn->p_rx_cids) { for_each_hwfn()
232 DP_NOTICE(p_hwfn, for_each_hwfn()
240 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
243 rc = qed_cxt_mngr_alloc(p_hwfn); for_each_hwfn()
250 rc = qed_cxt_set_pf_params(p_hwfn); for_each_hwfn()
255 rc = qed_init_qm_info(p_hwfn); for_each_hwfn()
260 rc = qed_cxt_cfg_ilt_compute(p_hwfn); for_each_hwfn()
267 rc = qed_cxt_tables_alloc(p_hwfn); for_each_hwfn()
272 rc = qed_spq_alloc(p_hwfn); for_each_hwfn()
277 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, for_each_hwfn()
280 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); for_each_hwfn()
285 p_eq = qed_eq_alloc(p_hwfn, 256); for_each_hwfn()
290 p_hwfn->p_eq = p_eq; for_each_hwfn()
292 p_consq = qed_consq_alloc(p_hwfn); for_each_hwfn()
297 p_hwfn->p_consq = p_consq; for_each_hwfn()
300 rc = qed_dmae_info_alloc(p_hwfn); for_each_hwfn()
302 DP_NOTICE(p_hwfn, for_each_hwfn()
327 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
329 qed_cxt_mngr_setup(p_hwfn); for_each_hwfn()
330 qed_spq_setup(p_hwfn); for_each_hwfn()
331 qed_eq_setup(p_hwfn, p_hwfn->p_eq); for_each_hwfn()
332 qed_consq_setup(p_hwfn, p_hwfn->p_consq); for_each_hwfn()
335 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); for_each_hwfn()
336 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, for_each_hwfn()
337 p_hwfn->mcp_info->mfw_mb_cur, for_each_hwfn()
338 p_hwfn->mcp_info->mfw_mb_length); for_each_hwfn()
340 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); for_each_hwfn()
351 int qed_final_cleanup(struct qed_hwfn *p_hwfn, qed_final_cleanup() argument
366 if (REG_RD(p_hwfn, addr)) { qed_final_cleanup()
368 p_hwfn, qed_final_cleanup()
370 REG_WR(p_hwfn, addr, 0); qed_final_cleanup()
373 DP_VERBOSE(p_hwfn, QED_MSG_IOV, qed_final_cleanup()
377 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); qed_final_cleanup()
380 while (!REG_RD(p_hwfn, addr) && count--) qed_final_cleanup()
383 if (REG_RD(p_hwfn, addr)) qed_final_cleanup()
386 DP_NOTICE(p_hwfn, qed_final_cleanup()
390 REG_WR(p_hwfn, addr, 0); qed_final_cleanup()
395 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) qed_calc_hw_mode() argument
401 switch (p_hwfn->cdev->num_ports_in_engines) { qed_calc_hw_mode()
412 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", qed_calc_hw_mode()
413 p_hwfn->cdev->num_ports_in_engines); qed_calc_hw_mode()
417 switch (p_hwfn->cdev->mf_mode) { qed_calc_hw_mode()
428 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n"); qed_calc_hw_mode()
434 p_hwfn->hw_info.hw_mode = hw_mode; qed_calc_hw_mode()
444 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
449 p_igu_info = p_hwfn->hw_info.p_igu_info; for_each_hwfn()
457 qed_init_cau_sb_entry(p_hwfn, &sb_entry, for_each_hwfn()
460 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, for_each_hwfn()
466 static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_hw_init_common() argument
470 struct qed_qm_info *qm_info = &p_hwfn->qm_info; qed_hw_init_common()
472 struct qed_dev *cdev = p_hwfn->cdev; qed_hw_init_common()
478 qed_gtt_init(p_hwfn); qed_hw_init_common()
480 if (p_hwfn->mcp_info) { qed_hw_init_common()
481 if (p_hwfn->mcp_info->func_info.bandwidth_max) qed_hw_init_common()
483 if (p_hwfn->mcp_info->func_info.bandwidth_min) qed_hw_init_common()
488 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; qed_hw_init_common()
496 qed_qm_common_rt_init(p_hwfn, &params); qed_hw_init_common()
498 qed_cxt_hw_init_common(p_hwfn); qed_hw_init_common()
505 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); qed_hw_init_common()
506 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); qed_hw_init_common()
507 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1); qed_hw_init_common()
508 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); qed_hw_init_common()
509 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); qed_hw_init_common()
510 qed_port_unpretend(p_hwfn, p_ptt); qed_hw_init_common()
512 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); qed_hw_init_common()
516 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_hw_init_common()
517 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); qed_hw_init_common()
520 qed_wr(p_hwfn, p_ptt, 0x20b4, qed_hw_init_common()
521 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); qed_hw_init_common()
526 static int qed_hw_init_port(struct qed_hwfn *p_hwfn, qed_hw_init_port() argument
532 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, qed_hw_init_port()
537 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_hw_init_pf() argument
544 u8 rel_pf_id = p_hwfn->rel_pf_id; qed_hw_init_pf()
547 if (p_hwfn->mcp_info) { qed_hw_init_pf()
550 p_info = &p_hwfn->mcp_info->func_info; qed_hw_init_pf()
552 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; qed_hw_init_pf()
555 p_hwfn->qm_info.pf_rl = 100; qed_hw_init_pf()
558 qed_cxt_hw_init_pf(p_hwfn); qed_hw_init_pf()
560 qed_int_igu_init_rt(p_hwfn); qed_hw_init_pf()
564 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); qed_hw_init_pf()
565 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); qed_hw_init_pf()
566 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, qed_hw_init_pf()
567 p_hwfn->hw_info.ovlan); qed_hw_init_pf()
572 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_hw_init_pf()
574 STORE_RT_REG(p_hwfn, qed_hw_init_pf()
579 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); qed_hw_init_pf()
580 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); qed_hw_init_pf()
581 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); qed_hw_init_pf()
584 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); qed_hw_init_pf()
589 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); qed_hw_init_pf()
594 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); qed_hw_init_pf()
599 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); qed_hw_init_pf()
603 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); qed_hw_init_pf()
606 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); qed_hw_init_pf()
608 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); qed_hw_init_pf()
613 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, qed_change_pci_hwfn() argument
620 qed_wr(p_hwfn, p_ptt, qed_change_pci_hwfn()
625 val = qed_rd(p_hwfn, p_ptt, qed_change_pci_hwfn()
634 DP_NOTICE(p_hwfn, qed_change_pci_hwfn()
642 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, qed_reset_mb_shadow() argument
646 qed_mcp_read_mb(p_hwfn, p_main_ptt); qed_reset_mb_shadow()
647 memcpy(p_hwfn->mcp_info->mfw_mb_shadow, qed_reset_mb_shadow()
648 p_hwfn->mcp_info->mfw_mb_cur, qed_reset_mb_shadow()
649 p_hwfn->mcp_info->mfw_mb_length); qed_reset_mb_shadow()
668 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
670 rc = qed_fw_vport(p_hwfn, 0, &fw_vport); for_each_hwfn()
675 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); for_each_hwfn()
677 qed_calc_hw_mode(p_hwfn); for_each_hwfn()
679 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
682 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); for_each_hwfn()
686 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); for_each_hwfn()
688 DP_VERBOSE(p_hwfn, QED_MSG_SP, for_each_hwfn()
692 p_hwfn->first_on_engine = (load_code == for_each_hwfn()
697 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
698 p_hwfn->hw_info.hw_mode); for_each_hwfn()
703 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
704 p_hwfn->hw_info.hw_mode); for_each_hwfn()
710 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
711 p_hwfn->hw_info.hw_mode, for_each_hwfn()
721 DP_NOTICE(p_hwfn, for_each_hwfn()
726 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
732 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); for_each_hwfn()
736 p_hwfn->hw_init_done = true; for_each_hwfn()
739 p_stat = &p_hwfn->storm_stats; for_each_hwfn()
754 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); for_each_hwfn()
768 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; for_each_hwfn() local
769 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; for_each_hwfn()
771 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); for_each_hwfn()
774 p_hwfn->hw_init_done = false; for_each_hwfn()
776 rc = qed_sp_pf_stop(p_hwfn); for_each_hwfn()
780 qed_wr(p_hwfn, p_ptt, for_each_hwfn()
783 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); for_each_hwfn()
784 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); for_each_hwfn()
785 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); for_each_hwfn()
786 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); for_each_hwfn()
787 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); for_each_hwfn()
789 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); for_each_hwfn()
790 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); for_each_hwfn()
792 if ((!qed_rd(p_hwfn, p_ptt, for_each_hwfn()
794 (!qed_rd(p_hwfn, p_ptt, for_each_hwfn()
801 DP_NOTICE(p_hwfn, for_each_hwfn()
803 (u8)qed_rd(p_hwfn, p_ptt, for_each_hwfn()
805 (u8)qed_rd(p_hwfn, p_ptt, for_each_hwfn()
809 qed_int_igu_disable_int(p_hwfn, p_ptt); for_each_hwfn()
811 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); for_each_hwfn()
812 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); for_each_hwfn()
814 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); for_each_hwfn()
838 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; for_each_hwfn() local
839 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; for_each_hwfn()
841 DP_VERBOSE(p_hwfn, for_each_hwfn()
845 qed_wr(p_hwfn, p_ptt, for_each_hwfn()
848 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); for_each_hwfn()
849 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); for_each_hwfn()
850 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); for_each_hwfn()
851 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); for_each_hwfn()
852 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); for_each_hwfn()
854 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); for_each_hwfn()
855 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); for_each_hwfn()
857 if ((!qed_rd(p_hwfn, p_ptt, for_each_hwfn()
859 (!qed_rd(p_hwfn, p_ptt, for_each_hwfn()
866 DP_NOTICE(p_hwfn, for_each_hwfn()
868 (u8)qed_rd(p_hwfn, p_ptt, for_each_hwfn()
870 (u8)qed_rd(p_hwfn, p_ptt, for_each_hwfn()
873 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); for_each_hwfn()
880 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) qed_hw_start_fastpath() argument
883 qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_hw_start_fastpath()
909 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
911 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); for_each_hwfn()
914 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
916 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
920 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0); for_each_hwfn()
921 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0); for_each_hwfn()
922 qed_wr(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
924 qed_wr(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
928 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
933 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n"); for_each_hwfn()
937 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, for_each_hwfn()
941 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n"); for_each_hwfn()
950 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) qed_hw_hwfn_free() argument
952 qed_ptt_pool_free(p_hwfn); qed_hw_hwfn_free()
953 kfree(p_hwfn->hw_info.p_igu_info); qed_hw_hwfn_free()
957 static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) qed_hw_hwfn_prepare() argument
962 rc = qed_ptt_pool_alloc(p_hwfn); qed_hw_hwfn_prepare()
967 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); qed_hw_hwfn_prepare()
970 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); qed_hw_hwfn_prepare()
971 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); qed_hw_hwfn_prepare()
972 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); qed_hw_hwfn_prepare()
973 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); qed_hw_hwfn_prepare()
976 qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_hw_hwfn_prepare()
978 1 << p_hwfn->abs_pf_id); qed_hw_hwfn_prepare()
981 qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_hw_hwfn_prepare()
987 static void get_function_id(struct qed_hwfn *p_hwfn) get_function_id() argument
990 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); get_function_id()
992 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); get_function_id()
994 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; get_function_id()
995 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, get_function_id()
997 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, get_function_id()
1001 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) qed_hw_set_feat() argument
1003 u32 *feat_num = p_hwfn->hw_info.feat_num; qed_hw_set_feat()
1006 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / qed_hw_set_feat()
1008 RESC_NUM(p_hwfn, QED_L2_QUEUE)); qed_hw_set_feat()
1009 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, qed_hw_set_feat()
1011 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB), qed_hw_set_feat()
1015 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) qed_hw_get_resc() argument
1017 u32 *resc_start = p_hwfn->hw_info.resc_start; qed_hw_get_resc()
1018 u32 *resc_num = p_hwfn->hw_info.resc_num; qed_hw_get_resc()
1021 num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB qed_hw_get_resc()
1022 : p_hwfn->cdev->num_ports_in_engines; qed_hw_get_resc()
1026 qed_int_get_num_sbs(p_hwfn, NULL)); qed_hw_get_resc()
1038 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; qed_hw_get_resc()
1040 qed_hw_set_feat(p_hwfn); qed_hw_get_resc()
1042 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, qed_hw_get_resc()
1052 p_hwfn->hw_info.resc_num[QED_SB], qed_hw_get_resc()
1053 p_hwfn->hw_info.resc_start[QED_SB], qed_hw_get_resc()
1054 p_hwfn->hw_info.resc_num[QED_L2_QUEUE], qed_hw_get_resc()
1055 p_hwfn->hw_info.resc_start[QED_L2_QUEUE], qed_hw_get_resc()
1056 p_hwfn->hw_info.resc_num[QED_VPORT], qed_hw_get_resc()
1057 p_hwfn->hw_info.resc_start[QED_VPORT], qed_hw_get_resc()
1058 p_hwfn->hw_info.resc_num[QED_PQ], qed_hw_get_resc()
1059 p_hwfn->hw_info.resc_start[QED_PQ], qed_hw_get_resc()
1060 p_hwfn->hw_info.resc_num[QED_RL], qed_hw_get_resc()
1061 p_hwfn->hw_info.resc_start[QED_RL], qed_hw_get_resc()
1062 p_hwfn->hw_info.resc_num[QED_MAC], qed_hw_get_resc()
1063 p_hwfn->hw_info.resc_start[QED_MAC], qed_hw_get_resc()
1064 p_hwfn->hw_info.resc_num[QED_VLAN], qed_hw_get_resc()
1065 p_hwfn->hw_info.resc_start[QED_VLAN], qed_hw_get_resc()
1066 p_hwfn->hw_info.resc_num[QED_ILT], qed_hw_get_resc()
1067 p_hwfn->hw_info.resc_start[QED_ILT]); qed_hw_get_resc()
1070 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, qed_hw_get_nvm_info() argument
1078 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); qed_hw_get_nvm_info()
1082 DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); qed_hw_get_nvm_info()
1087 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); qed_hw_get_nvm_info()
1093 p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & qed_hw_get_nvm_info()
1100 core_cfg = qed_rd(p_hwfn, p_ptt, addr); qed_hw_get_nvm_info()
1105 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; qed_hw_get_nvm_info()
1108 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; qed_hw_get_nvm_info()
1111 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; qed_hw_get_nvm_info()
1114 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; qed_hw_get_nvm_info()
1117 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; qed_hw_get_nvm_info()
1120 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; qed_hw_get_nvm_info()
1123 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; qed_hw_get_nvm_info()
1126 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; qed_hw_get_nvm_info()
1129 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; qed_hw_get_nvm_info()
1132 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", qed_hw_get_nvm_info()
1138 offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + qed_hw_get_nvm_info()
1140 val = qed_rd(p_hwfn, p_ptt, addr); qed_hw_get_nvm_info()
1142 if (IS_MF(p_hwfn)) { qed_hw_get_nvm_info()
1143 p_hwfn->hw_info.device_id = qed_hw_get_nvm_info()
1147 p_hwfn->hw_info.device_id = qed_hw_get_nvm_info()
1153 link = &p_hwfn->mcp_info->link_input; qed_hw_get_nvm_info()
1155 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); qed_hw_get_nvm_info()
1156 link_temp = qed_rd(p_hwfn, p_ptt, qed_hw_get_nvm_info()
1162 p_hwfn->mcp_info->link_capabilities.speed_capabilities = qed_hw_get_nvm_info()
1165 link_temp = qed_rd(p_hwfn, p_ptt, qed_hw_get_nvm_info()
1192 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", qed_hw_get_nvm_info()
1206 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_hw_get_nvm_info()
1216 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); qed_hw_get_nvm_info()
1223 p_hwfn->cdev->mf_mode = MF_OVLAN; qed_hw_get_nvm_info()
1226 p_hwfn->cdev->mf_mode = MF_NPAR; qed_hw_get_nvm_info()
1229 p_hwfn->cdev->mf_mode = SF; qed_hw_get_nvm_info()
1232 DP_INFO(p_hwfn, "Multi function mode is %08x\n", qed_hw_get_nvm_info()
1233 p_hwfn->cdev->mf_mode); qed_hw_get_nvm_info()
1235 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); qed_hw_get_nvm_info()
1239 qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_hw_info() argument
1247 port_mode = qed_rd(p_hwfn, p_ptt, qed_get_hw_info()
1251 p_hwfn->cdev->num_ports_in_engines = 1; qed_get_hw_info()
1253 p_hwfn->cdev->num_ports_in_engines = 2; qed_get_hw_info()
1255 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", qed_get_hw_info()
1256 p_hwfn->cdev->num_ports_in_engines); qed_get_hw_info()
1259 p_hwfn->cdev->num_ports_in_engines = 1; qed_get_hw_info()
1262 qed_hw_get_nvm_info(p_hwfn, p_ptt); qed_get_hw_info()
1264 rc = qed_int_igu_read_cam(p_hwfn, p_ptt); qed_get_hw_info()
1268 if (qed_mcp_is_init(p_hwfn)) qed_get_hw_info()
1269 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, qed_get_hw_info()
1270 p_hwfn->mcp_info->func_info.mac); qed_get_hw_info()
1272 eth_random_addr(p_hwfn->hw_info.hw_mac_addr); qed_get_hw_info()
1274 if (qed_mcp_is_init(p_hwfn)) { qed_get_hw_info()
1275 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) qed_get_hw_info()
1276 p_hwfn->hw_info.ovlan = qed_get_hw_info()
1277 p_hwfn->mcp_info->func_info.ovlan; qed_get_hw_info()
1279 qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_get_hw_info()
1282 if (qed_mcp_is_init(p_hwfn)) { qed_get_hw_info()
1285 protocol = p_hwfn->mcp_info->func_info.protocol; qed_get_hw_info()
1286 p_hwfn->hw_info.personality = protocol; qed_get_hw_info()
1289 qed_hw_get_resc(p_hwfn); qed_get_hw_info()
1328 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, qed_hw_prepare_single() argument
1336 p_hwfn->regview = p_regview; qed_hw_prepare_single()
1337 p_hwfn->doorbells = p_doorbells; qed_hw_prepare_single()
1340 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { qed_hw_prepare_single()
1341 DP_ERR(p_hwfn, qed_hw_prepare_single()
1346 get_function_id(p_hwfn); qed_hw_prepare_single()
1348 rc = qed_hw_hwfn_prepare(p_hwfn); qed_hw_prepare_single()
1350 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); qed_hw_prepare_single()
1355 if (!p_hwfn->my_id) qed_hw_prepare_single()
1356 qed_get_dev_info(p_hwfn->cdev); qed_hw_prepare_single()
1359 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); qed_hw_prepare_single()
1361 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); qed_hw_prepare_single()
1366 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); qed_hw_prepare_single()
1368 DP_NOTICE(p_hwfn, "Failed to get HW information\n"); qed_hw_prepare_single()
1373 rc = qed_init_alloc(p_hwfn); qed_hw_prepare_single()
1375 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n"); qed_hw_prepare_single()
1381 qed_mcp_free(p_hwfn); qed_hw_prepare_single()
1383 qed_hw_hwfn_free(p_hwfn); qed_hw_prepare_single()
1388 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, qed_hw_bar_size() argument
1393 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); qed_hw_bar_size()
1402 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); qed_hw_prepare() local
1409 rc = qed_hw_prepare_single(p_hwfn, qed_hw_prepare()
1415 personality = p_hwfn->hw_info.personality; qed_hw_prepare()
1423 addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; qed_hw_prepare()
1427 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; qed_hw_prepare()
1438 qed_init_free(p_hwfn); qed_hw_prepare()
1439 qed_mcp_free(p_hwfn); qed_hw_prepare()
1440 qed_hw_hwfn_free(p_hwfn); qed_hw_prepare()
1452 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1454 qed_init_free(p_hwfn); for_each_hwfn()
1455 qed_hw_hwfn_free(p_hwfn); for_each_hwfn()
1456 qed_mcp_free(p_hwfn); for_each_hwfn()
1547 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1553 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); for_each_hwfn()
1556 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); for_each_hwfn()
1561 qed_memcpy_from(p_hwfn, p_ptt, &mstats, for_each_hwfn()
1562 p_hwfn->storm_stats.mstats.address, for_each_hwfn()
1563 p_hwfn->storm_stats.mstats.len); for_each_hwfn()
1566 qed_memcpy_from(p_hwfn, p_ptt, &ustats, for_each_hwfn()
1567 p_hwfn->storm_stats.ustats.address, for_each_hwfn()
1568 p_hwfn->storm_stats.ustats.len); for_each_hwfn()
1571 qed_memcpy_from(p_hwfn, p_ptt, &pstats, for_each_hwfn()
1572 p_hwfn->storm_stats.pstats.address, for_each_hwfn()
1573 p_hwfn->storm_stats.pstats.len); for_each_hwfn()
1576 qed_memcpy_from(p_hwfn, p_ptt, &tstats, for_each_hwfn()
1577 p_hwfn->storm_stats.tstats.address, for_each_hwfn()
1578 p_hwfn->storm_stats.tstats.len); for_each_hwfn()
1582 if (p_hwfn->mcp_info) for_each_hwfn()
1583 qed_memcpy_from(p_hwfn, p_ptt, &port_stats, for_each_hwfn()
1584 p_hwfn->mcp_info->port_addr + for_each_hwfn()
1587 qed_ptt_release(p_hwfn, p_ptt); for_each_hwfn()
1715 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1719 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); for_each_hwfn()
1722 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); for_each_hwfn()
1727 qed_memcpy_to(p_hwfn, p_ptt, for_each_hwfn()
1728 p_hwfn->storm_stats.mstats.address, for_each_hwfn()
1730 p_hwfn->storm_stats.mstats.len); for_each_hwfn()
1733 qed_memcpy_to(p_hwfn, p_ptt, for_each_hwfn()
1734 p_hwfn->storm_stats.ustats.address, for_each_hwfn()
1736 p_hwfn->storm_stats.ustats.len); for_each_hwfn()
1739 qed_memcpy_to(p_hwfn, p_ptt, for_each_hwfn()
1740 p_hwfn->storm_stats.pstats.address, for_each_hwfn()
1742 p_hwfn->storm_stats.pstats.len); for_each_hwfn()
1744 qed_ptt_release(p_hwfn, p_ptt); for_each_hwfn()
1756 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, qed_fw_l2_queue() argument
1759 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { qed_fw_l2_queue()
1762 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); qed_fw_l2_queue()
1763 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); qed_fw_l2_queue()
1764 DP_NOTICE(p_hwfn, qed_fw_l2_queue()
1771 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; qed_fw_l2_queue()
1776 int qed_fw_vport(struct qed_hwfn *p_hwfn, qed_fw_vport() argument
1779 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { qed_fw_vport()
1782 min = (u8)RESC_START(p_hwfn, QED_VPORT); qed_fw_vport()
1783 max = min + RESC_NUM(p_hwfn, QED_VPORT); qed_fw_vport()
1784 DP_NOTICE(p_hwfn, qed_fw_vport()
1791 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; qed_fw_vport()
1796 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, qed_fw_rss_eng() argument
1799 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { qed_fw_rss_eng()
1802 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); qed_fw_rss_eng()
1803 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); qed_fw_rss_eng()
1804 DP_NOTICE(p_hwfn, qed_fw_rss_eng()
1811 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; qed_fw_rss_eng()
H A Dqed_cxt.h27 * @param p_hwfn
33 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
41 * @param p_hwfn
46 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
57 * @param p_hwfn
61 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
66 * @param p_hwfn
70 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
75 * @param p_hwfn
79 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
84 * @param p_hwfn
86 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
91 * @param p_hwfn
95 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
100 * @param p_hwfn
102 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
109 * @param p_hwfn
111 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
118 * @param p_hwfn
120 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
125 * @param p_hwfn
128 void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
133 * @param p_hwfn
136 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
H A Dqed_mcp.c35 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
47 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) qed_mcp_is_init() argument
49 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) qed_mcp_is_init()
54 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, qed_mcp_cmd_port_init() argument
57 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, qed_mcp_cmd_port_init()
59 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); qed_mcp_cmd_port_init()
61 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, qed_mcp_cmd_port_init()
62 MFW_PORT(p_hwfn)); qed_mcp_cmd_port_init()
63 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_mcp_cmd_port_init()
65 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); qed_mcp_cmd_port_init()
68 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, qed_mcp_read_mb() argument
71 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); qed_mcp_read_mb()
74 if (!p_hwfn->mcp_info->public_base) qed_mcp_read_mb()
78 tmp = qed_rd(p_hwfn, p_ptt, qed_mcp_read_mb()
79 p_hwfn->mcp_info->mfw_mb_addr + qed_mcp_read_mb()
83 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = qed_mcp_read_mb()
88 int qed_mcp_free(struct qed_hwfn *p_hwfn) qed_mcp_free() argument
90 if (p_hwfn->mcp_info) { qed_mcp_free()
91 kfree(p_hwfn->mcp_info->mfw_mb_cur); qed_mcp_free()
92 kfree(p_hwfn->mcp_info->mfw_mb_shadow); qed_mcp_free()
94 kfree(p_hwfn->mcp_info); qed_mcp_free()
99 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, qed_load_mcp_offsets() argument
102 struct qed_mcp_info *p_info = p_hwfn->mcp_info; qed_load_mcp_offsets()
104 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); qed_load_mcp_offsets()
106 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); qed_load_mcp_offsets()
113 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, qed_load_mcp_offsets()
117 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_load_mcp_offsets()
122 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, qed_load_mcp_offsets()
126 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); qed_load_mcp_offsets()
131 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & qed_load_mcp_offsets()
135 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & qed_load_mcp_offsets()
138 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); qed_load_mcp_offsets()
143 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, qed_mcp_cmd_init() argument
150 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC); qed_mcp_cmd_init()
151 if (!p_hwfn->mcp_info) qed_mcp_cmd_init()
153 p_info = p_hwfn->mcp_info; qed_mcp_cmd_init()
155 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { qed_mcp_cmd_init()
156 DP_NOTICE(p_hwfn, "MCP is not initialized\n"); qed_mcp_cmd_init()
177 DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n"); qed_mcp_cmd_init()
178 qed_mcp_free(p_hwfn); qed_mcp_cmd_init()
182 int qed_mcp_reset(struct qed_hwfn *p_hwfn, qed_mcp_reset() argument
185 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; qed_mcp_reset()
191 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); qed_mcp_reset()
192 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, qed_mcp_reset()
199 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, qed_mcp_reset()
204 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { qed_mcp_reset()
205 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_mcp_reset()
208 DP_ERR(p_hwfn, "Failed to reset MCP\n"); qed_mcp_reset()
215 static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, qed_do_mcp_cmd() argument
227 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & qed_do_mcp_cmd()
233 if (p_hwfn->mcp_info->mcp_hist != qed_do_mcp_cmd()
234 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { qed_do_mcp_cmd()
235 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); qed_do_mcp_cmd()
236 qed_load_mcp_offsets(p_hwfn, p_ptt); qed_do_mcp_cmd()
237 qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_do_mcp_cmd()
239 seq = ++p_hwfn->mcp_info->drv_mb_seq; qed_do_mcp_cmd()
242 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); qed_do_mcp_cmd()
245 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); qed_do_mcp_cmd()
247 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_do_mcp_cmd()
254 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); qed_do_mcp_cmd()
260 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_do_mcp_cmd()
268 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); qed_do_mcp_cmd()
271 DP_ERR(p_hwfn, "MFW failed to respond!\n"); qed_do_mcp_cmd()
278 int qed_mcp_cmd(struct qed_hwfn *p_hwfn, qed_mcp_cmd() argument
288 if (!qed_mcp_is_init(p_hwfn)) { qed_mcp_cmd()
289 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); qed_mcp_cmd()
296 mutex_lock(&p_hwfn->mcp_info->mutex); qed_mcp_cmd()
297 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, qed_mcp_cmd()
300 mutex_unlock(&p_hwfn->mcp_info->mutex); qed_mcp_cmd()
306 struct qed_hwfn *p_hwfn, qed_mcp_set_drv_ver()
313 DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i], qed_mcp_set_drv_ver()
317 int qed_mcp_load_req(struct qed_hwfn *p_hwfn, qed_mcp_load_req() argument
321 struct qed_dev *cdev = p_hwfn->cdev; qed_mcp_load_req()
325 if (!qed_mcp_is_init(p_hwfn)) { qed_mcp_load_req()
326 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); qed_mcp_load_req()
331 qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt); qed_mcp_load_req()
333 DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", qed_mcp_load_req()
334 p_hwfn->mcp_info->drv_mb_seq, qed_mcp_load_req()
335 p_hwfn->mcp_info->drv_pulse_seq); qed_mcp_load_req()
338 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ, qed_mcp_load_req()
345 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); qed_mcp_load_req()
361 DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); qed_mcp_load_req()
368 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, qed_mcp_handle_link_change() argument
375 p_link = &p_hwfn->mcp_info->link_output; qed_mcp_handle_link_change()
378 status = qed_rd(p_hwfn, p_ptt, qed_mcp_handle_link_change()
379 p_hwfn->mcp_info->port_addr + qed_mcp_handle_link_change()
381 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), qed_mcp_handle_link_change()
384 (u32)(p_hwfn->mcp_info->port_addr + qed_mcp_handle_link_change()
388 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_mcp_handle_link_change()
426 if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { qed_mcp_handle_link_change()
428 p_hwfn->mcp_info->func_info.bandwidth_max / qed_mcp_handle_link_change()
430 qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, qed_mcp_handle_link_change()
432 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_mcp_handle_link_change()
487 qed_link_update(p_hwfn); qed_mcp_handle_link_change()
490 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, qed_mcp_set_link() argument
494 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; qed_mcp_set_link()
500 if (!qed_mcp_is_init(p_hwfn)) { qed_mcp_set_link()
501 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); qed_mcp_set_link()
518 qed_wr(p_hwfn, p_ptt, qed_mcp_set_link()
519 p_hwfn->mcp_info->drv_mb_addr + qed_mcp_set_link()
524 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_mcp_set_link()
532 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_mcp_set_link()
536 DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", qed_mcp_set_link()
537 p_hwfn->mcp_info->drv_mb_seq, qed_mcp_set_link()
538 p_hwfn->mcp_info->drv_pulse_seq); qed_mcp_set_link()
541 rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param); qed_mcp_set_link()
545 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); qed_mcp_set_link()
551 qed_mcp_handle_link_change(p_hwfn, p_ptt, true); qed_mcp_set_link()
556 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, qed_mcp_handle_events() argument
559 struct qed_mcp_info *info = p_hwfn->mcp_info; qed_mcp_handle_events()
564 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); qed_mcp_handle_events()
567 qed_mcp_read_mb(p_hwfn, p_ptt); qed_mcp_handle_events()
576 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, qed_mcp_handle_events()
582 qed_mcp_handle_link_change(p_hwfn, p_ptt, false); qed_mcp_handle_events()
585 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); qed_mcp_handle_events()
595 qed_wr(p_hwfn, p_ptt, qed_mcp_handle_events()
603 DP_NOTICE(p_hwfn, qed_mcp_handle_events()
617 struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; qed_mcp_get_mfw_ver() local
621 p_ptt = qed_ptt_acquire(p_hwfn); qed_mcp_get_mfw_ver()
625 global_offsize = qed_rd(p_hwfn, p_ptt, qed_mcp_get_mfw_ver()
626 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> qed_mcp_get_mfw_ver()
629 *p_mfw_ver = qed_rd(p_hwfn, p_ptt, qed_mcp_get_mfw_ver()
633 qed_ptt_release(p_hwfn, p_ptt); qed_mcp_get_mfw_ver()
641 struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; qed_mcp_get_media_type() local
644 if (!qed_mcp_is_init(p_hwfn)) { qed_mcp_get_media_type()
645 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); qed_mcp_get_media_type()
651 p_ptt = qed_ptt_acquire(p_hwfn); qed_mcp_get_media_type()
655 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + qed_mcp_get_media_type()
658 qed_ptt_release(p_hwfn, p_ptt); qed_mcp_get_media_type()
663 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, qed_mcp_get_shmem_func() argument
668 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, qed_mcp_get_shmem_func()
670 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); qed_mcp_get_shmem_func()
679 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, qed_mcp_get_shmem_func()
686 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, qed_mcp_get_shmem_proto() argument
703 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, qed_mcp_fill_shmem_func_info() argument
709 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, qed_mcp_fill_shmem_func_info()
710 MCP_PF_ID(p_hwfn)); qed_mcp_fill_shmem_func_info()
711 info = &p_hwfn->mcp_info->func_info; qed_mcp_fill_shmem_func_info()
716 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, qed_mcp_fill_shmem_func_info()
718 DP_ERR(p_hwfn, "Unknown personality %08x\n", qed_mcp_fill_shmem_func_info()
723 if (p_hwfn->cdev->mf_mode != SF) { qed_mcp_fill_shmem_func_info()
728 DP_INFO(p_hwfn, qed_mcp_fill_shmem_func_info()
738 DP_INFO(p_hwfn, qed_mcp_fill_shmem_func_info()
753 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); qed_mcp_fill_shmem_func_info()
763 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), qed_mcp_fill_shmem_func_info()
775 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) qed_mcp_get_link_params() argument
777 if (!p_hwfn || !p_hwfn->mcp_info) qed_mcp_get_link_params()
779 return &p_hwfn->mcp_info->link_input; qed_mcp_get_link_params()
783 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) qed_mcp_get_link_state() argument
785 if (!p_hwfn || !p_hwfn->mcp_info) qed_mcp_get_link_state()
787 return &p_hwfn->mcp_info->link_output; qed_mcp_get_link_state()
791 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) qed_mcp_get_link_capabilities() argument
793 if (!p_hwfn || !p_hwfn->mcp_info) qed_mcp_get_link_capabilities()
795 return &p_hwfn->mcp_info->link_capabilities; qed_mcp_get_link_capabilities()
798 int qed_mcp_drain(struct qed_hwfn *p_hwfn, qed_mcp_drain() argument
804 rc = qed_mcp_cmd(p_hwfn, p_ptt, qed_mcp_drain()
814 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, qed_mcp_get_flash_size() argument
820 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); qed_mcp_get_flash_size()
831 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, qed_mcp_send_drv_version() argument
838 if (!qed_mcp_is_init(p_hwfn)) { qed_mcp_send_drv_version()
839 DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); qed_mcp_send_drv_version()
843 DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version, qed_mcp_send_drv_version()
847 DRV_MB_WR(p_hwfn, p_ptt, qed_mcp_send_drv_version()
852 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply, qed_mcp_send_drv_version()
855 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); qed_mcp_send_drv_version()
305 qed_mcp_set_drv_ver(struct qed_dev *cdev, struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_mcp_set_drv_ver() argument
H A Dqed_sp_commands.c24 int qed_sp_init_request(struct qed_hwfn *p_hwfn, qed_sp_init_request() argument
39 rc = qed_spq_get_entry(p_hwfn, pp_ent); qed_sp_init_request()
74 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", qed_sp_init_request()
79 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_sp_init_request()
92 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_sp_pf_start() argument
97 u16 sb = qed_int_get_sp_sb_id(p_hwfn); qed_sp_pf_start()
98 u8 sb_index = p_hwfn->p_eq->eq_sb_index; qed_sp_pf_start()
103 qed_eq_prod_update(p_hwfn, qed_sp_pf_start()
104 qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); qed_sp_pf_start()
110 rc = qed_sp_init_request(p_hwfn, qed_sp_pf_start()
112 qed_spq_get_cid(p_hwfn), qed_sp_pf_start()
113 p_hwfn->hw_info.opaque_fid, qed_sp_pf_start()
124 p_ramrod->path_id = QED_PATH_ID(p_hwfn); qed_sp_pf_start()
128 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; qed_sp_pf_start()
132 DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); qed_sp_pf_start()
134 DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); qed_sp_pf_start()
135 p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; qed_sp_pf_start()
138 DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); qed_sp_pf_start()
140 DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); qed_sp_pf_start()
142 p_hwfn->hw_info.personality = PERSONALITY_ETH; qed_sp_pf_start()
144 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_sp_pf_start()
150 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_pf_start()
153 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) qed_sp_pf_stop() argument
162 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn), qed_sp_pf_stop()
163 p_hwfn->hw_info.opaque_fid, qed_sp_pf_stop()
169 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_pf_stop()
H A Dqed_int.c42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
62 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, qed_attn_update_idx() argument
86 * @param p_hwfn
90 static int qed_int_assertion(struct qed_hwfn *p_hwfn, qed_int_assertion() argument
93 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; qed_int_assertion()
97 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, qed_int_assertion()
99 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", qed_int_assertion()
102 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); qed_int_assertion()
104 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_assertion()
112 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); qed_int_assertion()
114 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, qed_int_assertion()
118 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + qed_int_assertion()
124 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", qed_int_assertion()
133 * @param p_hwfn
138 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, qed_int_deassertion() argument
141 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; qed_int_deassertion()
145 DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); qed_int_deassertion()
148 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + qed_int_deassertion()
155 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, qed_int_deassertion()
158 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); qed_int_deassertion()
166 static int qed_int_attentions(struct qed_hwfn *p_hwfn) qed_int_attentions() argument
168 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; qed_int_attentions()
196 DP_INFO(p_hwfn, qed_int_attentions()
201 DP_INFO(p_hwfn, qed_int_attentions()
204 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_attentions()
209 rc = qed_int_assertion(p_hwfn, asserted_bits); qed_int_attentions()
215 rc = qed_int_deassertion(p_hwfn, deasserted_bits); qed_int_attentions()
223 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, qed_sb_ack_attn() argument
247 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; qed_int_sp_dpc() local
254 if (!p_hwfn->p_sp_sb) { qed_int_sp_dpc()
255 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); qed_int_sp_dpc()
259 sb_info = &p_hwfn->p_sp_sb->sb_info; qed_int_sp_dpc()
260 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); qed_int_sp_dpc()
262 DP_ERR(p_hwfn->cdev, qed_int_sp_dpc()
267 if (!p_hwfn->p_sb_attn) { qed_int_sp_dpc()
268 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); qed_int_sp_dpc()
271 sb_attn = p_hwfn->p_sb_attn; qed_int_sp_dpc()
273 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", qed_int_sp_dpc()
274 p_hwfn, p_hwfn->my_id); qed_int_sp_dpc()
284 p_hwfn->cdev, qed_int_sp_dpc()
290 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, qed_int_sp_dpc()
297 p_hwfn->cdev, qed_int_sp_dpc()
302 rc |= qed_attn_update_idx(p_hwfn, sb_attn); qed_int_sp_dpc()
303 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, qed_int_sp_dpc()
315 if (!p_hwfn->p_dpc_ptt) { qed_int_sp_dpc()
316 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); qed_int_sp_dpc()
322 qed_int_attentions(p_hwfn); qed_int_sp_dpc()
329 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; qed_int_sp_dpc()
331 pi_info->comp_cb(p_hwfn, pi_info->cookie); qed_int_sp_dpc()
339 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); qed_int_sp_dpc()
344 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) qed_int_sb_attn_free() argument
346 struct qed_dev *cdev = p_hwfn->cdev; qed_int_sb_attn_free()
347 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; qed_int_sb_attn_free()
352 SB_ATTN_ALIGNED_SIZE(p_hwfn), qed_int_sb_attn_free()
359 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, qed_int_sb_attn_setup() argument
362 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; qed_int_sb_attn_setup()
370 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, qed_int_sb_attn_setup()
371 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); qed_int_sb_attn_setup()
372 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, qed_int_sb_attn_setup()
373 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); qed_int_sb_attn_setup()
376 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, qed_int_sb_attn_init() argument
381 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; qed_int_sb_attn_init()
387 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + qed_int_sb_attn_init()
390 qed_int_sb_attn_setup(p_hwfn, p_ptt); qed_int_sb_attn_init()
393 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, qed_int_sb_attn_alloc() argument
396 struct qed_dev *cdev = p_hwfn->cdev; qed_int_sb_attn_alloc()
410 SB_ATTN_ALIGNED_SIZE(p_hwfn), qed_int_sb_attn_alloc()
420 p_hwfn->p_sb_attn = p_sb; qed_int_sb_attn_alloc()
421 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); qed_int_sb_attn_alloc()
430 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, qed_init_cau_sb_entry() argument
454 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { qed_init_cau_sb_entry()
456 if (!p_hwfn->cdev->rx_coalesce_usecs) qed_init_cau_sb_entry()
457 p_hwfn->cdev->rx_coalesce_usecs = qed_init_cau_sb_entry()
459 if (!p_hwfn->cdev->tx_coalesce_usecs) qed_init_cau_sb_entry()
460 p_hwfn->cdev->tx_coalesce_usecs = qed_init_cau_sb_entry()
468 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, qed_int_cau_conf_sb() argument
478 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, qed_int_cau_conf_sb()
481 if (p_hwfn->hw_init_done) { qed_int_cau_conf_sb()
483 qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); qed_int_cau_conf_sb()
484 qed_wr(p_hwfn, p_ptt, val + sizeof(u32), qed_int_cau_conf_sb()
488 qed_wr(p_hwfn, p_ptt, val, sb_entry.data); qed_int_cau_conf_sb()
489 qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); qed_int_cau_conf_sb()
492 STORE_RT_REG_AGG(p_hwfn, qed_int_cau_conf_sb()
497 STORE_RT_REG_AGG(p_hwfn, qed_int_cau_conf_sb()
504 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { qed_int_cau_conf_sb()
505 u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> qed_int_cau_conf_sb()
509 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, qed_int_cau_conf_sb()
513 timeset = p_hwfn->cdev->tx_coalesce_usecs >> qed_int_cau_conf_sb()
517 qed_int_cau_conf_pi(p_hwfn, p_ptt, qed_int_cau_conf_sb()
525 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, qed_int_cau_conf_pi() argument
546 if (p_hwfn->hw_init_done) { qed_int_cau_conf_pi()
547 qed_wr(p_hwfn, p_ptt, qed_int_cau_conf_pi()
551 STORE_RT_REG(p_hwfn, qed_int_cau_conf_pi()
557 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, qed_int_sb_setup() argument
565 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, qed_int_sb_setup()
573 * @param p_hwfn
578 static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, qed_get_igu_sb_id() argument
585 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; qed_get_igu_sb_id()
587 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; qed_get_igu_sb_id()
589 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", qed_get_igu_sb_id()
595 int qed_int_sb_init(struct qed_hwfn *p_hwfn, qed_int_sb_init() argument
605 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); qed_int_sb_init()
608 p_hwfn->sbs_info[sb_id] = sb_info; qed_int_sb_init()
609 p_hwfn->num_sbs++; qed_int_sb_init()
612 sb_info->cdev = p_hwfn->cdev; qed_int_sb_init()
617 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + qed_int_sb_init()
623 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); qed_int_sb_init()
628 int qed_int_sb_release(struct qed_hwfn *p_hwfn, qed_int_sb_release() argument
633 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); qed_int_sb_release()
641 p_hwfn->sbs_info[sb_id] = NULL; qed_int_sb_release()
642 p_hwfn->num_sbs--; qed_int_sb_release()
647 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) qed_int_sp_sb_free() argument
649 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; qed_int_sp_sb_free()
653 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_int_sp_sb_free()
654 SB_ALIGNED_SIZE(p_hwfn), qed_int_sp_sb_free()
661 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, qed_int_sp_sb_alloc() argument
671 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); qed_int_sp_sb_alloc()
676 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_int_sp_sb_alloc()
677 SB_ALIGNED_SIZE(p_hwfn), qed_int_sp_sb_alloc()
680 DP_NOTICE(p_hwfn, "Failed to allocate status block\n"); qed_int_sp_sb_alloc()
686 p_hwfn->p_sp_sb = p_sb; qed_int_sp_sb_alloc()
687 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, qed_int_sp_sb_alloc()
695 static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, qed_int_sp_sb_setup() argument
698 if (!p_hwfn) qed_int_sp_sb_setup()
701 if (p_hwfn->p_sp_sb) qed_int_sp_sb_setup()
702 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); qed_int_sp_sb_setup()
704 DP_NOTICE(p_hwfn->cdev, qed_int_sp_sb_setup()
707 if (p_hwfn->p_sb_attn) qed_int_sp_sb_setup()
708 qed_int_sb_attn_setup(p_hwfn, p_ptt); qed_int_sp_sb_setup()
710 DP_NOTICE(p_hwfn->cdev, qed_int_sp_sb_setup()
714 int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_register_cb() argument
720 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; qed_int_register_cb()
739 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) qed_int_unregister_cb() argument
741 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; qed_int_unregister_cb()
753 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) qed_int_get_sp_sb_id() argument
755 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; qed_int_get_sp_sb_id()
758 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_int_igu_enable_int() argument
764 p_hwfn->cdev->int_mode = int_mode; qed_int_igu_enable_int()
765 switch (p_hwfn->cdev->int_mode) { qed_int_igu_enable_int()
783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); qed_int_igu_enable_int()
786 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_int_igu_enable() argument
793 qed_wr(p_hwfn, p_ptt, qed_int_igu_enable()
797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_int_igu_enable()
798 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_int_igu_enable()
804 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); qed_int_igu_enable()
805 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { qed_int_igu_enable()
806 rc = qed_slowpath_irq_req(p_hwfn); qed_int_igu_enable()
808 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); qed_int_igu_enable()
811 p_hwfn->b_int_requested = true; qed_int_igu_enable()
814 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); qed_int_igu_enable()
815 p_hwfn->b_int_enabled = 1; qed_int_igu_enable()
820 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, qed_int_igu_disable_int() argument
823 p_hwfn->b_int_enabled = 0; qed_int_igu_disable_int()
825 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); qed_int_igu_disable_int()
829 void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, qed_int_igu_cleanup_sb() argument
854 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); qed_int_igu_cleanup_sb()
858 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); qed_int_igu_cleanup_sb()
871 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); qed_int_igu_cleanup_sb()
880 DP_NOTICE(p_hwfn, qed_int_igu_cleanup_sb()
885 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, qed_int_igu_init_pure_rt_single() argument
895 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); qed_int_igu_init_pure_rt_single()
898 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); qed_int_igu_init_pure_rt_single()
902 qed_wr(p_hwfn, p_ptt, qed_int_igu_init_pure_rt_single()
906 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, qed_int_igu_init_pure_rt() argument
911 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; qed_int_igu_init_pure_rt()
912 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; qed_int_igu_init_pure_rt()
916 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); qed_int_igu_init_pure_rt()
919 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); qed_int_igu_init_pure_rt()
921 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_igu_init_pure_rt()
926 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, qed_int_igu_init_pure_rt()
927 p_hwfn->hw_info.opaque_fid, qed_int_igu_init_pure_rt()
931 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; qed_int_igu_init_pure_rt()
932 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_igu_init_pure_rt()
934 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, qed_int_igu_init_pure_rt()
935 p_hwfn->hw_info.opaque_fid, qed_int_igu_init_pure_rt()
940 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, qed_int_igu_read_cam() argument
949 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); qed_int_igu_read_cam()
951 if (!p_hwfn->hw_info.p_igu_info) qed_int_igu_read_cam()
954 p_igu_info = p_hwfn->hw_info.p_igu_info; qed_int_igu_read_cam()
962 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); qed_int_igu_read_cam()
966 val = qed_rd(p_hwfn, p_ptt, qed_int_igu_read_cam()
981 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_igu_read_cam()
987 if (blk->function_id == p_hwfn->rel_pf_id) { qed_int_igu_read_cam()
998 DP_NOTICE(p_hwfn->cdev, qed_int_igu_read_cam()
1000 p_hwfn->rel_pf_id); qed_int_igu_read_cam()
1011 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, qed_int_igu_read_cam()
1020 DP_NOTICE(p_hwfn, qed_int_igu_read_cam()
1034 * @param p_hwfn
1036 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) qed_int_igu_init_rt() argument
1042 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); qed_int_igu_init_rt()
1045 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) qed_int_igu_read_sisr_reg() argument
1055 intr_status_lo = REG_RD(p_hwfn, qed_int_igu_read_sisr_reg()
1058 intr_status_hi = REG_RD(p_hwfn, qed_int_igu_read_sisr_reg()
1066 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) qed_int_sp_dpc_setup() argument
1068 tasklet_init(p_hwfn->sp_dpc, qed_int_sp_dpc_setup()
1069 qed_int_sp_dpc, (unsigned long)p_hwfn); qed_int_sp_dpc_setup()
1070 p_hwfn->b_sp_dpc_enabled = true; qed_int_sp_dpc_setup()
1073 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) qed_int_sp_dpc_alloc() argument
1075 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); qed_int_sp_dpc_alloc()
1076 if (!p_hwfn->sp_dpc) qed_int_sp_dpc_alloc()
1082 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) qed_int_sp_dpc_free() argument
1084 kfree(p_hwfn->sp_dpc); qed_int_sp_dpc_free()
1087 int qed_int_alloc(struct qed_hwfn *p_hwfn, qed_int_alloc() argument
1092 rc = qed_int_sp_dpc_alloc(p_hwfn); qed_int_alloc()
1094 DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n"); qed_int_alloc()
1097 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); qed_int_alloc()
1099 DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n"); qed_int_alloc()
1102 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); qed_int_alloc()
1104 DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); qed_int_alloc()
1110 void qed_int_free(struct qed_hwfn *p_hwfn) qed_int_free() argument
1112 qed_int_sp_sb_free(p_hwfn); qed_int_free()
1113 qed_int_sb_attn_free(p_hwfn); qed_int_free()
1114 qed_int_sp_dpc_free(p_hwfn); qed_int_free()
1117 void qed_int_setup(struct qed_hwfn *p_hwfn, qed_int_setup() argument
1120 qed_int_sp_sb_setup(p_hwfn, p_ptt); qed_int_setup()
1121 qed_int_sp_dpc_setup(p_hwfn); qed_int_setup()
1124 int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, qed_int_get_num_sbs() argument
1127 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; qed_int_get_num_sbs()
H A Dqed_hw.h60 * @param p_hwfn
62 void qed_gtt_init(struct qed_hwfn *p_hwfn);
67 * @param p_hwfn
69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
74 * @param p_hwfn
78 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
83 * @param p_hwfn
85 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
90 * @param p_hwfn
95 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
101 * @param p_hwfn
111 * @param p_hwfn
115 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
122 * @param p_hwfn
127 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
133 * @param p_hwfn
138 void qed_wr(struct qed_hwfn *p_hwfn,
146 * @param p_hwfn
151 u32 qed_rd(struct qed_hwfn *p_hwfn,
159 * @param p_hwfn
165 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
175 * @param p_hwfn
181 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
192 * @param p_hwfn
197 void qed_fid_pretend(struct qed_hwfn *p_hwfn,
205 * @param p_hwfn
209 void qed_port_pretend(struct qed_hwfn *p_hwfn,
217 * @param p_hwfn
220 void qed_port_unpretend(struct qed_hwfn *p_hwfn,
232 * which is part of p_hwfn.
233 * @param p_hwfn
235 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
239 * which is part of p_hwfn
241 * @param p_hwfn
243 void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
257 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
H A Dqed_init_ops.h28 * @param p_hwfn
35 int qed_init_run(struct qed_hwfn *p_hwfn,
45 * @param p_hwfn
49 int qed_init_alloc(struct qed_hwfn *p_hwfn);
55 * @param p_hwfn
57 void qed_init_free(struct qed_hwfn *p_hwfn);
63 * @param p_hwfn
65 void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
71 * @param p_hwfn
75 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
89 * @param p_hwfn
94 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
107 * @param p_hwfn
109 void qed_gtt_init(struct qed_hwfn *p_hwfn);
H A Dqed_init_ops.c53 void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) qed_init_clear_rt_data() argument
58 p_hwfn->rt_data[i].b_valid = false; qed_init_clear_rt_data()
61 void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, qed_init_store_rt_reg() argument
65 p_hwfn->rt_data[rt_offset].init_val = val; qed_init_store_rt_reg()
66 p_hwfn->rt_data[rt_offset].b_valid = true; qed_init_store_rt_reg()
69 void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, qed_init_store_rt_agg() argument
77 p_hwfn->rt_data[rt_offset + i].init_val = val[i]; qed_init_store_rt_agg()
78 p_hwfn->rt_data[rt_offset + i].b_valid = true; qed_init_store_rt_agg()
82 static void qed_init_rt(struct qed_hwfn *p_hwfn, qed_init_rt() argument
88 struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset; qed_init_rt()
94 qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val); qed_init_rt()
98 int qed_init_alloc(struct qed_hwfn *p_hwfn) qed_init_alloc() argument
106 p_hwfn->rt_data = rt_data; qed_init_alloc()
111 void qed_init_free(struct qed_hwfn *p_hwfn) qed_init_free() argument
113 kfree(p_hwfn->rt_data); qed_init_free()
114 p_hwfn->rt_data = NULL; qed_init_free()
117 static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, qed_init_array_dmae() argument
134 qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); qed_init_array_dmae()
136 rc = qed_dmae_host2grc(p_hwfn, p_ptt, qed_init_array_dmae()
144 static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, qed_init_fill_dmae() argument
161 return qed_dmae_host2grc(p_hwfn, p_ptt, qed_init_fill_dmae()
167 static void qed_init_fill(struct qed_hwfn *p_hwfn, qed_init_fill() argument
176 qed_wr(p_hwfn, p_ptt, addr, fill); qed_init_fill()
179 static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, qed_init_cmd_array() argument
189 struct qed_dev *cdev = p_hwfn->cdev; qed_init_cmd_array()
206 memset(p_hwfn->unzip_buf, 0, max_size); qed_init_cmd_array()
208 output_len = qed_unzip_data(p_hwfn, input_len, qed_init_cmd_array()
210 max_size, (u8 *)p_hwfn->unzip_buf); qed_init_cmd_array()
212 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0, qed_init_cmd_array()
214 p_hwfn->unzip_buf, qed_init_cmd_array()
217 DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n"); qed_init_cmd_array()
230 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, qed_init_cmd_array()
241 rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, qed_init_cmd_array()
252 static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, qed_init_cmd_wr() argument
265 DP_NOTICE(p_hwfn, qed_init_cmd_wr()
273 qed_wr(p_hwfn, p_ptt, addr, qed_init_cmd_wr()
279 rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, qed_init_cmd_wr()
282 qed_init_fill(p_hwfn, p_ptt, addr, 0, qed_init_cmd_wr()
286 rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd, qed_init_cmd_wr()
290 qed_init_rt(p_hwfn, p_ptt, addr, qed_init_cmd_wr()
315 static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, qed_init_cmd_rd() argument
326 val = qed_rd(p_hwfn, p_ptt, addr); qed_init_cmd_rd()
344 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", qed_init_cmd_rd()
354 val = qed_rd(p_hwfn, p_ptt, addr); qed_init_cmd_rd()
358 DP_ERR(p_hwfn, qed_init_cmd_rd()
366 static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, qed_init_cmd_cb() argument
370 DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); qed_init_cmd_cb()
373 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, qed_init_cmd_mode_match() argument
377 struct qed_dev *cdev = p_hwfn->cdev; qed_init_cmd_mode_match()
385 return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1; qed_init_cmd_mode_match()
387 arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); qed_init_cmd_mode_match()
388 arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); qed_init_cmd_mode_match()
391 arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); qed_init_cmd_mode_match()
392 arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); qed_init_cmd_mode_match()
400 static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, qed_init_cmd_mode() argument
406 if (qed_init_cmd_mode_match(p_hwfn, &offset, modes)) qed_init_cmd_mode()
413 static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, qed_init_cmd_phase() argument
429 int qed_init_run(struct qed_hwfn *p_hwfn, qed_init_run() argument
435 struct qed_dev *cdev = p_hwfn->cdev; qed_init_run()
444 p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC); qed_init_run()
445 if (!p_hwfn->unzip_buf) { qed_init_run()
446 DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n"); qed_init_run()
456 rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, qed_init_run()
460 qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); qed_init_run()
463 cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode, qed_init_run()
467 cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, qed_init_run()
479 qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); qed_init_run()
487 kfree(p_hwfn->unzip_buf); qed_init_run()
491 void qed_gtt_init(struct qed_hwfn *p_hwfn) qed_gtt_init() argument
501 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, qed_gtt_init()
H A Dqed_cxt.c59 #define CONN_CXT_SIZE(p_hwfn) \
60 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
144 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, qed_cxt_qm_iids() argument
147 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_qm_iids()
153 DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); qed_cxt_qm_iids()
157 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, qed_cxt_set_proto_cid_count() argument
161 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; qed_cxt_set_proto_cid_count()
185 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, qed_ilt_cli_adv_line() argument
201 DP_VERBOSE(p_hwfn, QED_MSG_ILT, qed_ilt_cli_adv_line()
208 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_cxt_cfg_ilt_compute() argument
210 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_cfg_ilt_compute()
218 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); qed_cxt_cfg_ilt_compute()
220 DP_VERBOSE(p_hwfn, QED_MSG_ILT, qed_cxt_cfg_ilt_compute()
222 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); qed_cxt_cfg_ilt_compute()
234 total = pf_cids * CONN_CXT_SIZE(p_hwfn); qed_cxt_cfg_ilt_compute()
237 total, CONN_CXT_SIZE(p_hwfn)); qed_cxt_cfg_ilt_compute()
239 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); qed_cxt_cfg_ilt_compute()
246 qed_cxt_qm_iids(p_hwfn, &qm_iids); qed_cxt_cfg_ilt_compute()
247 total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, qed_cxt_cfg_ilt_compute()
248 p_hwfn->qm_info.num_pqs, 0); qed_cxt_cfg_ilt_compute()
250 DP_VERBOSE(p_hwfn, QED_MSG_ILT, qed_cxt_cfg_ilt_compute()
252 qm_iids.cids, p_hwfn->qm_info.num_pqs, total); qed_cxt_cfg_ilt_compute()
258 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); qed_cxt_cfg_ilt_compute()
261 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > qed_cxt_cfg_ilt_compute()
262 RESC_NUM(p_hwfn, QED_ILT)) { qed_cxt_cfg_ilt_compute()
263 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", qed_cxt_cfg_ilt_compute()
264 curr_line - p_hwfn->p_cxt_mngr->pf_start_line); qed_cxt_cfg_ilt_compute()
290 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) qed_ilt_shadow_free() argument
292 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; qed_ilt_shadow_free()
293 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_ilt_shadow_free()
302 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_ilt_shadow_free()
310 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, qed_ilt_blk_alloc() argument
315 struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; qed_ilt_blk_alloc()
324 p_hwfn->p_cxt_mngr->pf_start_line; qed_ilt_blk_alloc()
333 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_ilt_blk_alloc()
345 DP_VERBOSE(p_hwfn, QED_MSG_ILT, qed_ilt_blk_alloc()
356 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) qed_ilt_shadow_alloc() argument
358 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_ilt_shadow_alloc()
368 DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n"); qed_ilt_shadow_alloc()
373 DP_VERBOSE(p_hwfn, QED_MSG_ILT, qed_ilt_shadow_alloc()
382 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); for_each_ilt_valid_client()
391 qed_ilt_shadow_free(p_hwfn);
395 static void qed_cid_map_free(struct qed_hwfn *p_hwfn) qed_cid_map_free() argument
397 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cid_map_free()
407 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) qed_cid_map_alloc() argument
409 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cid_map_alloc()
414 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; qed_cid_map_alloc()
430 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid; qed_cid_map_alloc()
432 DP_VERBOSE(p_hwfn, QED_MSG_CXT, qed_cid_map_alloc()
442 qed_cid_map_free(p_hwfn); qed_cid_map_alloc()
446 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) qed_cxt_mngr_alloc() argument
453 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); qed_cxt_mngr_alloc()
471 p_hwfn->p_cxt_mngr = p_mngr; qed_cxt_mngr_alloc()
476 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) qed_cxt_tables_alloc() argument
481 rc = qed_ilt_shadow_alloc(p_hwfn); qed_cxt_tables_alloc()
483 DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n"); qed_cxt_tables_alloc()
488 rc = qed_cid_map_alloc(p_hwfn); qed_cxt_tables_alloc()
490 DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n"); qed_cxt_tables_alloc()
497 qed_cxt_mngr_free(p_hwfn); qed_cxt_tables_alloc()
501 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) qed_cxt_mngr_free() argument
503 if (!p_hwfn->p_cxt_mngr) qed_cxt_mngr_free()
506 qed_cid_map_free(p_hwfn); qed_cxt_mngr_free()
507 qed_ilt_shadow_free(p_hwfn); qed_cxt_mngr_free()
508 kfree(p_hwfn->p_cxt_mngr); qed_cxt_mngr_free()
510 p_hwfn->p_cxt_mngr = NULL; qed_cxt_mngr_free()
513 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) qed_cxt_mngr_setup() argument
515 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_mngr_setup()
520 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; qed_cxt_mngr_setup()
551 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) qed_cdu_init_common() argument
556 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; qed_cdu_init_common()
557 cxt_size = CONN_CXT_SIZE(p_hwfn); qed_cdu_init_common()
564 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); qed_cdu_init_common()
567 void qed_qm_init_pf(struct qed_hwfn *p_hwfn) qed_qm_init_pf() argument
570 struct qed_qm_info *qm_info = &p_hwfn->qm_info; qed_qm_init_pf()
574 qed_cxt_qm_iids(p_hwfn, &iids); qed_qm_init_pf()
577 params.port_id = p_hwfn->port_id; qed_qm_init_pf()
578 params.pf_id = p_hwfn->rel_pf_id; qed_qm_init_pf()
580 params.is_first_pf = p_hwfn->first_on_engine; qed_qm_init_pf()
590 qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params); qed_qm_init_pf()
594 static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) qed_cm_init_pf() argument
602 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); qed_cm_init_pf()
603 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq); qed_cm_init_pf()
609 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) qed_dq_init_pf() argument
611 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_dq_init_pf()
615 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
618 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
621 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
624 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
627 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
631 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); qed_dq_init_pf()
634 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) qed_ilt_bounds_init() argument
639 ilt_clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client()
643 STORE_RT_REG(p_hwfn, for_each_ilt_valid_client()
646 STORE_RT_REG(p_hwfn, for_each_ilt_valid_client()
649 STORE_RT_REG(p_hwfn, for_each_ilt_valid_client()
656 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) qed_ilt_init_pf() argument
663 qed_ilt_bounds_init(p_hwfn); qed_ilt_init_pf()
665 p_mngr = p_hwfn->p_cxt_mngr; qed_ilt_init_pf()
667 clients = p_hwfn->p_cxt_mngr->clients; qed_ilt_init_pf()
692 DP_VERBOSE(p_hwfn, QED_MSG_ILT, for_each_ilt_valid_client()
698 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); for_each_ilt_valid_client()
703 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) qed_cxt_hw_init_common() argument
705 qed_cdu_init_common(p_hwfn); qed_cxt_hw_init_common()
708 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_cxt_hw_init_pf() argument
710 qed_qm_init_pf(p_hwfn); qed_cxt_hw_init_pf()
711 qed_cm_init_pf(p_hwfn); qed_cxt_hw_init_pf()
712 qed_dq_init_pf(p_hwfn); qed_cxt_hw_init_pf()
713 qed_ilt_init_pf(p_hwfn); qed_cxt_hw_init_pf()
716 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, qed_cxt_acquire_cid() argument
720 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_acquire_cid()
724 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); qed_cxt_acquire_cid()
732 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", qed_cxt_acquire_cid()
744 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, qed_cxt_test_cid_acquired() argument
748 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_test_cid_acquired()
766 DP_NOTICE(p_hwfn, "Invalid CID %d", cid); qed_cxt_test_cid_acquired()
772 DP_NOTICE(p_hwfn, "CID %d not acquired", cid); qed_cxt_test_cid_acquired()
778 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, qed_cxt_release_cid() argument
781 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_release_cid()
787 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type); qed_cxt_release_cid()
796 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, qed_cxt_get_cid_info() argument
799 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; qed_cxt_get_cid_info()
805 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type); qed_cxt_get_cid_info()
814 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; qed_cxt_get_cid_info()
816 conn_cxt_size = CONN_CXT_SIZE(p_hwfn); qed_cxt_get_cid_info()
827 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), qed_cxt_get_cid_info()
834 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) qed_cxt_set_pf_params() argument
836 struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; qed_cxt_set_pf_params()
841 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); qed_cxt_set_pf_params()
843 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, qed_cxt_set_pf_params()
H A Dqed_hw.c44 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) qed_ptt_pool_alloc() argument
63 p_hwfn->p_ptt_pool = p_pool; qed_ptt_pool_alloc()
69 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) qed_ptt_invalidate() argument
75 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; qed_ptt_invalidate()
80 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) qed_ptt_pool_free() argument
82 kfree(p_hwfn->p_ptt_pool); qed_ptt_pool_free()
83 p_hwfn->p_ptt_pool = NULL; qed_ptt_pool_free()
86 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) qed_ptt_acquire() argument
93 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); qed_ptt_acquire()
95 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { qed_ptt_acquire()
96 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, qed_ptt_acquire()
100 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); qed_ptt_acquire()
102 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_ptt_acquire()
107 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); qed_ptt_acquire()
111 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); qed_ptt_acquire()
115 void qed_ptt_release(struct qed_hwfn *p_hwfn, qed_ptt_release() argument
118 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); qed_ptt_release()
119 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); qed_ptt_release()
120 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); qed_ptt_release()
123 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, qed_ptt_get_hw_addr() argument
142 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, qed_ptt_set_win() argument
148 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); qed_ptt_set_win()
154 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_ptt_set_win()
161 REG_WR(p_hwfn, qed_ptt_set_win()
167 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, qed_set_ptt() argument
171 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); qed_set_ptt()
179 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); qed_set_ptt()
186 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, qed_get_reserved_ptt() argument
190 DP_NOTICE(p_hwfn, qed_get_reserved_ptt()
195 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; qed_get_reserved_ptt()
198 void qed_wr(struct qed_hwfn *p_hwfn, qed_wr() argument
202 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); qed_wr()
204 REG_WR(p_hwfn, bar_addr, val); qed_wr()
205 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_wr()
210 u32 qed_rd(struct qed_hwfn *p_hwfn, qed_rd() argument
214 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); qed_rd()
215 u32 val = REG_RD(p_hwfn, bar_addr); qed_rd()
217 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_rd()
224 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, qed_memcpy_hw() argument
239 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); qed_memcpy_hw()
244 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); qed_memcpy_hw()
256 void qed_memcpy_from(struct qed_hwfn *p_hwfn, qed_memcpy_from() argument
260 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_memcpy_from()
264 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); qed_memcpy_from()
267 void qed_memcpy_to(struct qed_hwfn *p_hwfn, qed_memcpy_to() argument
271 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_memcpy_to()
275 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); qed_memcpy_to()
278 void qed_fid_pretend(struct qed_hwfn *p_hwfn, qed_fid_pretend() argument
300 REG_WR(p_hwfn, qed_fid_pretend()
306 void qed_port_pretend(struct qed_hwfn *p_hwfn, qed_port_pretend() argument
318 REG_WR(p_hwfn, qed_port_pretend()
324 void qed_port_unpretend(struct qed_hwfn *p_hwfn, qed_port_unpretend() argument
335 REG_WR(p_hwfn, qed_port_unpretend()
342 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, qed_dmae_opcode() argument
357 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << qed_dmae_opcode()
364 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << qed_dmae_opcode()
380 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); qed_dmae_opcode()
396 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); qed_dmae_opcode()
397 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); qed_dmae_opcode()
407 qed_dmae_post_command(struct qed_hwfn *p_hwfn, qed_dmae_post_command() argument
410 struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; qed_dmae_post_command()
411 u8 idx_cmd = p_hwfn->dmae_info.channel, i; qed_dmae_post_command()
417 DP_NOTICE(p_hwfn, qed_dmae_post_command()
432 DP_VERBOSE(p_hwfn, qed_dmae_post_command()
454 qed_wr(p_hwfn, p_ptt, qed_dmae_post_command()
460 qed_wr(p_hwfn, p_ptt, qed_dmae_post_command()
467 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) qed_dmae_info_alloc() argument
469 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; qed_dmae_info_alloc()
470 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; qed_dmae_info_alloc()
471 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; qed_dmae_info_alloc()
472 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; qed_dmae_info_alloc()
474 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_alloc()
479 DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); qed_dmae_info_alloc()
483 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; qed_dmae_info_alloc()
484 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_alloc()
488 DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n"); qed_dmae_info_alloc()
492 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; qed_dmae_info_alloc()
493 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_alloc()
497 DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n"); qed_dmae_info_alloc()
501 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; qed_dmae_info_alloc()
505 qed_dmae_info_free(p_hwfn); qed_dmae_info_alloc()
509 void qed_dmae_info_free(struct qed_hwfn *p_hwfn) qed_dmae_info_free() argument
514 mutex_lock(&p_hwfn->dmae_info.mutex); qed_dmae_info_free()
516 if (p_hwfn->dmae_info.p_completion_word) { qed_dmae_info_free()
517 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; qed_dmae_info_free()
518 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_free()
520 p_hwfn->dmae_info.p_completion_word, qed_dmae_info_free()
522 p_hwfn->dmae_info.p_completion_word = NULL; qed_dmae_info_free()
525 if (p_hwfn->dmae_info.p_dmae_cmd) { qed_dmae_info_free()
526 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; qed_dmae_info_free()
527 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_free()
529 p_hwfn->dmae_info.p_dmae_cmd, qed_dmae_info_free()
531 p_hwfn->dmae_info.p_dmae_cmd = NULL; qed_dmae_info_free()
534 if (p_hwfn->dmae_info.p_intermediate_buffer) { qed_dmae_info_free()
535 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; qed_dmae_info_free()
536 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_dmae_info_free()
538 p_hwfn->dmae_info.p_intermediate_buffer, qed_dmae_info_free()
540 p_hwfn->dmae_info.p_intermediate_buffer = NULL; qed_dmae_info_free()
543 mutex_unlock(&p_hwfn->dmae_info.mutex); qed_dmae_info_free()
546 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) qed_dmae_operation_wait() argument
554 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { qed_dmae_operation_wait()
557 DP_NOTICE(p_hwfn->cdev, qed_dmae_operation_wait()
559 *p_hwfn->dmae_info.p_completion_word, qed_dmae_operation_wait()
572 *p_hwfn->dmae_info.p_completion_word = 0; qed_dmae_operation_wait()
577 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, qed_dmae_execute_sub_operation() argument
585 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; qed_dmae_execute_sub_operation()
586 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; qed_dmae_execute_sub_operation()
599 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], qed_dmae_execute_sub_operation()
624 qed_dmae_post_command(p_hwfn, p_ptt); qed_dmae_execute_sub_operation()
626 qed_status = qed_dmae_operation_wait(p_hwfn); qed_dmae_execute_sub_operation()
629 DP_NOTICE(p_hwfn, qed_dmae_execute_sub_operation()
639 &p_hwfn->dmae_info.p_intermediate_buffer[0], qed_dmae_execute_sub_operation()
645 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, qed_dmae_execute_command() argument
652 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; qed_dmae_execute_command()
654 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; qed_dmae_execute_command()
660 qed_dmae_opcode(p_hwfn, qed_dmae_execute_command()
697 qed_status = qed_dmae_execute_sub_operation(p_hwfn, qed_dmae_execute_command()
705 DP_NOTICE(p_hwfn, qed_dmae_execute_command()
718 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, qed_dmae_host2grc() argument
732 mutex_lock(&p_hwfn->dmae_info.mutex); qed_dmae_host2grc()
734 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, qed_dmae_host2grc()
740 mutex_unlock(&p_hwfn->dmae_info.mutex); qed_dmae_host2grc()
745 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, qed_get_qm_pq() argument
753 DP_NOTICE(p_hwfn, qed_get_qm_pq()
762 pq_id = p_hwfn->qm_info.pure_lb_pq; qed_get_qm_pq()
764 pq_id = p_hwfn->qm_info.offload_pq; qed_get_qm_pq()
773 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); qed_get_qm_pq()
H A Dqed_spq.c41 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, qed_spq_blocking_cb() argument
57 static int qed_spq_block(struct qed_hwfn *p_hwfn, qed_spq_block() argument
78 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); qed_spq_block()
79 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); qed_spq_block()
81 DP_NOTICE(p_hwfn, "MCP drain failed\n"); qed_spq_block()
103 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n"); qed_spq_block()
112 qed_spq_fill_entry(struct qed_hwfn *p_hwfn, qed_spq_fill_entry() argument
125 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", qed_spq_fill_entry()
130 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_spq_fill_entry()
147 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, qed_spq_hw_initialize() argument
158 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); qed_spq_hw_initialize()
161 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", qed_spq_hw_initialize()
178 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); qed_spq_hw_initialize()
187 DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); qed_spq_hw_initialize()
189 DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); qed_spq_hw_initialize()
192 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, qed_spq_hw_post() argument
196 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; qed_spq_hw_post()
204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); qed_spq_hw_post()
226 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); qed_spq_hw_post()
231 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_spq_hw_post()
244 qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion() argument
247 DP_NOTICE(p_hwfn, qed_async_event_completion()
256 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, qed_eq_prod_update() argument
260 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); qed_eq_prod_update()
262 REG_WR16(p_hwfn, addr, prod); qed_eq_prod_update()
268 int qed_eq_completion(struct qed_hwfn *p_hwfn, qed_eq_completion() argument
279 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); qed_eq_completion()
297 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_eq_completion()
307 if (qed_async_event_completion(p_hwfn, p_eqe)) qed_eq_completion()
309 } else if (qed_spq_completion(p_hwfn, qed_eq_completion()
319 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); qed_eq_completion()
324 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, qed_eq_alloc() argument
332 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); qed_eq_alloc()
337 if (qed_chain_alloc(p_hwfn->cdev, qed_eq_alloc()
343 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n"); qed_eq_alloc()
348 qed_int_register_cb(p_hwfn, qed_eq_alloc()
357 qed_eq_free(p_hwfn, p_eq); qed_eq_alloc()
361 void qed_eq_setup(struct qed_hwfn *p_hwfn, qed_eq_setup() argument
367 void qed_eq_free(struct qed_hwfn *p_hwfn, qed_eq_free() argument
372 qed_chain_free(p_hwfn->cdev, &p_eq->chain); qed_eq_free()
380 struct qed_hwfn *p_hwfn, qed_cqe_completion()
388 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); qed_cqe_completion()
391 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, qed_eth_cqe_completion() argument
396 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); qed_eth_cqe_completion()
398 DP_NOTICE(p_hwfn, qed_eth_cqe_completion()
408 void qed_spq_setup(struct qed_hwfn *p_hwfn) qed_spq_setup() argument
410 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_setup()
445 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); qed_spq_setup()
446 qed_spq_hw_initialize(p_hwfn, p_spq); qed_spq_setup()
452 int qed_spq_alloc(struct qed_hwfn *p_hwfn) qed_spq_alloc() argument
462 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); qed_spq_alloc()
467 if (qed_chain_alloc(p_hwfn->cdev, qed_spq_alloc()
473 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n"); qed_spq_alloc()
478 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, qed_spq_alloc()
489 p_hwfn->p_spq = p_spq; qed_spq_alloc()
494 qed_chain_free(p_hwfn->cdev, &p_spq->chain); qed_spq_alloc()
499 void qed_spq_free(struct qed_hwfn *p_hwfn) qed_spq_free() argument
501 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_free()
507 dma_free_coherent(&p_hwfn->cdev->pdev->dev, qed_spq_free()
513 qed_chain_free(p_hwfn->cdev, &p_spq->chain); qed_spq_free()
519 qed_spq_get_entry(struct qed_hwfn *p_hwfn, qed_spq_get_entry() argument
522 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_get_entry()
551 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, __qed_spq_return_entry() argument
554 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); __qed_spq_return_entry()
557 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, qed_spq_return_entry() argument
560 spin_lock_bh(&p_hwfn->p_spq->lock); qed_spq_return_entry()
561 __qed_spq_return_entry(p_hwfn, p_ent); qed_spq_return_entry()
562 spin_unlock_bh(&p_hwfn->p_spq->lock); qed_spq_return_entry()
573 * @param p_hwfn
580 qed_spq_add_entry(struct qed_hwfn *p_hwfn, qed_spq_add_entry() argument
584 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_add_entry()
635 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) qed_spq_get_cid() argument
637 if (!p_hwfn->p_spq) qed_spq_get_cid()
639 return p_hwfn->p_spq->cid; qed_spq_get_cid()
645 static int qed_spq_post_list(struct qed_hwfn *p_hwfn, qed_spq_post_list() argument
649 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_post_list()
660 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); qed_spq_post_list()
663 __qed_spq_return_entry(p_hwfn, p_ent); qed_spq_post_list()
671 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) qed_spq_pend_post() argument
673 struct qed_spq *p_spq = p_hwfn->p_spq; qed_spq_pend_post()
688 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); qed_spq_pend_post()
691 return qed_spq_post_list(p_hwfn, &p_spq->pending, qed_spq_pend_post()
695 int qed_spq_post(struct qed_hwfn *p_hwfn, qed_spq_post() argument
700 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; qed_spq_post()
703 if (!p_hwfn) qed_spq_post()
707 DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); qed_spq_post()
712 rc = qed_spq_fill_entry(p_hwfn, p_ent); qed_spq_post()
721 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); qed_spq_post()
725 rc = qed_spq_pend_post(p_hwfn); qed_spq_post()
743 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); qed_spq_post()
748 qed_spq_return_entry(p_hwfn, p_ent); qed_spq_post()
760 __qed_spq_return_entry(p_hwfn, p_ent); qed_spq_post()
766 int qed_spq_completion(struct qed_hwfn *p_hwfn, qed_spq_completion() argument
777 if (!p_hwfn) qed_spq_completion()
780 p_spq = p_hwfn->p_spq; qed_spq_completion()
816 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, qed_spq_completion()
828 DP_NOTICE(p_hwfn, qed_spq_completion()
833 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n", qed_spq_completion()
836 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, qed_spq_completion()
841 qed_spq_return_entry(p_hwfn, found); qed_spq_completion()
845 rc = qed_spq_pend_post(p_hwfn); qed_spq_completion()
851 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) qed_consq_alloc() argument
858 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); qed_consq_alloc()
863 if (qed_chain_alloc(p_hwfn->cdev, qed_consq_alloc()
869 DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); qed_consq_alloc()
876 qed_consq_free(p_hwfn, p_consq); qed_consq_alloc()
880 void qed_consq_setup(struct qed_hwfn *p_hwfn, qed_consq_setup() argument
886 void qed_consq_free(struct qed_hwfn *p_hwfn, qed_consq_free() argument
891 qed_chain_free(p_hwfn->cdev, &p_consq->chain); qed_consq_free()
379 qed_cqe_completion( struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) qed_cqe_completion() argument
H A Dqed_sp.h39 * @param p_hwfn
44 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
148 * @param p_hwfn
153 int qed_spq_post(struct qed_hwfn *p_hwfn,
160 * @param p_hwfn
164 int qed_spq_alloc(struct qed_hwfn *p_hwfn);
169 * @param p_hwfn
171 void qed_spq_setup(struct qed_hwfn *p_hwfn);
176 * @param p_hwfn
178 void qed_spq_free(struct qed_hwfn *p_hwfn);
186 * @param p_hwfn
192 qed_spq_get_entry(struct qed_hwfn *p_hwfn,
199 * @param p_hwfn
202 void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
207 * @param p_hwfn
212 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
218 * @param p_hwfn
221 void qed_eq_setup(struct qed_hwfn *p_hwfn,
227 * @param p_hwfn
230 void qed_eq_free(struct qed_hwfn *p_hwfn,
236 * @param p_hwfn
239 void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
245 * @param p_hwfn
250 int qed_eq_completion(struct qed_hwfn *p_hwfn,
256 * @param p_hwfn
262 int qed_spq_completion(struct qed_hwfn *p_hwfn,
268 * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
270 * @param p_hwfn
274 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
280 * @param p_hwfn
284 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
290 * @param p_hwfn
293 void qed_consq_setup(struct qed_hwfn *p_hwfn,
299 * @param p_hwfn
302 void qed_consq_free(struct qed_hwfn *p_hwfn,
320 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
339 * @param p_hwfn
345 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
357 * @param p_hwfn
362 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
H A Dqed_init_fw_funcs.c123 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, qed_enable_pf_rl() argument
126 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); qed_enable_pf_rl()
129 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, qed_enable_pf_rl()
132 STORE_RT_REG(p_hwfn, qed_enable_pf_rl()
135 STORE_RT_REG(p_hwfn, qed_enable_pf_rl()
140 STORE_RT_REG(p_hwfn, qed_enable_pf_rl()
147 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, qed_enable_pf_wfq() argument
150 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); qed_enable_pf_wfq()
153 STORE_RT_REG(p_hwfn, qed_enable_pf_wfq()
159 static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, qed_enable_vport_rl() argument
162 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, qed_enable_vport_rl()
166 STORE_RT_REG(p_hwfn, qed_enable_vport_rl()
169 STORE_RT_REG(p_hwfn, qed_enable_vport_rl()
174 STORE_RT_REG(p_hwfn, qed_enable_vport_rl()
181 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, qed_enable_vport_wfq() argument
184 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, qed_enable_vport_wfq()
188 STORE_RT_REG(p_hwfn, qed_enable_vport_wfq()
196 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, qed_cmdq_lines_voq_rt_init() argument
205 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); qed_cmdq_lines_voq_rt_init()
210 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), qed_cmdq_lines_voq_rt_init()
212 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); qed_cmdq_lines_voq_rt_init()
213 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, qed_cmdq_lines_voq_rt_init()
219 struct qed_hwfn *p_hwfn, qed_cmdq_lines_rt_init()
228 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); qed_cmdq_lines_rt_init()
245 qed_cmdq_lines_voq_rt_init(p_hwfn, voq, qed_cmdq_lines_rt_init()
249 qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), qed_cmdq_lines_rt_init()
256 struct qed_hwfn *p_hwfn, qed_btb_blocks_rt_init()
290 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), qed_btb_blocks_rt_init()
296 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), qed_btb_blocks_rt_init()
303 struct qed_hwfn *p_hwfn, qed_tx_pq_map_rt_init()
313 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); qed_tx_pq_map_rt_init()
326 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, qed_tx_pq_map_rt_init()
329 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, qed_tx_pq_map_rt_init()
331 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, qed_tx_pq_map_rt_init()
352 STORE_RT_REG(p_hwfn, qed_tx_pq_map_rt_init()
371 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, qed_tx_pq_map_rt_init()
374 STORE_RT_REG(p_hwfn, qed_tx_pq_map_rt_init()
398 curr_mask = qed_rd(p_hwfn, p_ptt, qed_tx_pq_map_rt_init()
403 STORE_RT_REG(p_hwfn, addr, qed_tx_pq_map_rt_init()
409 STORE_RT_REG(p_hwfn, addr, qed_tx_pq_map_rt_init()
417 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, qed_other_pq_map_rt_init() argument
435 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, qed_other_pq_map_rt_init()
438 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, qed_other_pq_map_rt_init()
443 STORE_RT_REG(p_hwfn, qed_other_pq_map_rt_init()
453 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, qed_pf_wfq_rt_init() argument
469 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); qed_pf_wfq_rt_init()
472 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, qed_pf_wfq_rt_init()
474 STORE_RT_REG(p_hwfn, qed_pf_wfq_rt_init()
482 OVERWRITE_RT_REG(p_hwfn, qed_pf_wfq_rt_init()
494 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, qed_pf_rl_rt_init() argument
501 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); qed_pf_rl_rt_init()
504 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, qed_pf_rl_rt_init()
506 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, qed_pf_rl_rt_init()
508 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); qed_pf_rl_rt_init()
515 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, qed_vp_wfq_rt_init() argument
533 DP_NOTICE(p_hwfn, qed_vp_wfq_rt_init()
545 STORE_RT_REG(p_hwfn, qed_vp_wfq_rt_init()
548 STORE_RT_REG(p_hwfn, temp + vport_pq_id, qed_vp_wfq_rt_init()
551 STORE_RT_REG(p_hwfn, qed_vp_wfq_rt_init()
563 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, qed_vport_rl_rt_init() argument
575 DP_NOTICE(p_hwfn, qed_vport_rl_rt_init()
580 STORE_RT_REG(p_hwfn, qed_vport_rl_rt_init()
583 STORE_RT_REG(p_hwfn, qed_vport_rl_rt_init()
586 STORE_RT_REG(p_hwfn, qed_vport_rl_rt_init()
594 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, qed_poll_on_qm_cmd_ready() argument
602 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); qed_poll_on_qm_cmd_ready()
607 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, qed_poll_on_qm_cmd_ready()
615 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, qed_send_qm_cmd() argument
621 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) qed_send_qm_cmd()
624 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); qed_send_qm_cmd()
625 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); qed_send_qm_cmd()
626 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); qed_send_qm_cmd()
627 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); qed_send_qm_cmd()
628 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); qed_send_qm_cmd()
630 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); qed_send_qm_cmd()
647 struct qed_hwfn *p_hwfn, qed_qm_common_rt_init()
667 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); qed_qm_common_rt_init()
668 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); qed_qm_common_rt_init()
669 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); qed_qm_common_rt_init()
670 qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); qed_qm_common_rt_init()
671 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); qed_qm_common_rt_init()
672 qed_cmdq_lines_rt_init(p_hwfn, qed_qm_common_rt_init()
676 qed_btb_blocks_rt_init(p_hwfn, qed_qm_common_rt_init()
683 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, qed_qm_pf_rt_init() argument
699 qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, qed_qm_pf_rt_init()
703 qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); qed_qm_pf_rt_init()
706 if (qed_pf_wfq_rt_init(p_hwfn, p_params)) qed_qm_pf_rt_init()
709 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) qed_qm_pf_rt_init()
712 if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport, qed_qm_pf_rt_init()
716 if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, qed_qm_pf_rt_init()
723 int qed_init_pf_rl(struct qed_hwfn *p_hwfn, qed_init_pf_rl() argument
731 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); qed_init_pf_rl()
735 qed_wr(p_hwfn, p_ptt, qed_init_pf_rl()
738 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); qed_init_pf_rl()
743 int qed_init_vport_rl(struct qed_hwfn *p_hwfn, qed_init_vport_rl() argument
751 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); qed_init_vport_rl()
755 qed_wr(p_hwfn, p_ptt, qed_init_vport_rl()
758 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); qed_init_vport_rl()
763 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, qed_send_qm_stop_cmd() argument
790 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, qed_send_qm_stop_cmd()
218 qed_cmdq_lines_rt_init( struct qed_hwfn *p_hwfn, u8 max_ports_per_engine, u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) qed_cmdq_lines_rt_init() argument
255 qed_btb_blocks_rt_init( struct qed_hwfn *p_hwfn, u8 max_ports_per_engine, u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) qed_btb_blocks_rt_init() argument
302 qed_tx_pq_map_rt_init( struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_qm_pf_rt_init_params *p_params, u32 base_mem_addr_4kb) qed_tx_pq_map_rt_init() argument
646 qed_qm_common_rt_init( struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params) qed_qm_common_rt_init() argument
H A Dqed_int.h55 * @param p_hwfn
62 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
72 * @param p_hwfn
76 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
83 * @param p_hwfn
86 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
93 * @param p_hwfn
97 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
105 * @param p_hwfn
116 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
125 * @param p_hwfn
129 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
138 * @param p_hwfn
146 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
154 * @param p_hwfn - pointer to hwfn
163 * @param p_hwfn
168 int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
187 #define SB_ALIGNED_SIZE(p_hwfn) \
188 ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
216 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
221 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
229 * @param p_hwfn
234 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
237 typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
247 * @param p_hwfn
259 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
271 * @param p_hwfn
276 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
282 * @param p_hwfn
286 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
292 * @param p_hwfn
300 void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
310 * @param p_hwfn
316 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
326 * @param p_hwfn
333 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
343 * @param p_hwfn
348 int qed_int_alloc(struct qed_hwfn *p_hwfn,
354 * @param p_hwfn
356 void qed_int_free(struct qed_hwfn *p_hwfn);
361 * @param p_hwfn
364 void qed_int_setup(struct qed_hwfn *p_hwfn,
370 * @param p_hwfn
376 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
382 * @param p_hwfn
388 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
H A Dqed_l2.c135 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, qed_sp_vport_start() argument
150 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); qed_sp_vport_start()
158 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_vport_start()
159 qed_spq_get_cid(p_hwfn), qed_sp_vport_start()
184 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, qed_sp_vport_start()
187 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_vport_start()
191 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, qed_sp_vport_update_rss() argument
207 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id); qed_sp_vport_update_rss()
242 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, qed_sp_vport_update_rss()
250 rc = qed_fw_l2_queue(p_hwfn, qed_sp_vport_update_rss()
257 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n", qed_sp_vport_update_rss()
268 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, qed_sp_update_accept_mode() argument
302 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_update_accept_mode()
329 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_update_accept_mode()
335 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, qed_sp_update_mcast_bin() argument
356 qed_sp_vport_update(struct qed_hwfn *p_hwfn, qed_sp_vport_update() argument
369 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); qed_sp_vport_update()
378 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_vport_update()
379 qed_spq_get_cid(p_hwfn), qed_sp_vport_update()
397 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); qed_sp_vport_update()
400 qed_spq_return_entry(p_hwfn, p_ent); qed_sp_vport_update()
405 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); qed_sp_vport_update()
407 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); qed_sp_vport_update()
408 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_vport_update()
411 static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, qed_sp_vport_stop() argument
421 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); qed_sp_vport_stop()
429 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_vport_stop()
430 qed_spq_get_cid(p_hwfn), qed_sp_vport_stop()
441 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_vport_stop()
459 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
461 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; for_each_hwfn()
463 rc = qed_sp_vport_update(p_hwfn, &vport_update_params, for_each_hwfn()
470 DP_VERBOSE(p_hwfn, QED_MSG_SP, for_each_hwfn()
480 struct qed_hwfn *p_hwfn, qed_sp_release_queue_cid()
486 qed_cxt_release_cid(p_hwfn, p_cid_data->cid); qed_sp_release_queue_cid()
494 qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, qed_sp_eth_rxq_start_ramrod() argument
513 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; qed_sp_eth_rxq_start_ramrod()
518 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); qed_sp_eth_rxq_start_ramrod()
522 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); qed_sp_eth_rxq_start_ramrod()
526 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_eth_rxq_start_ramrod()
535 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_eth_rxq_start_ramrod()
561 rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_rxq_start_ramrod()
567 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, qed_sp_eth_rx_queue_start() argument
582 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); qed_sp_eth_rx_queue_start()
586 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); qed_sp_eth_rx_queue_start()
590 *pp_prod = (u8 __iomem *)p_hwfn->regview + qed_sp_eth_rx_queue_start()
595 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), qed_sp_eth_rx_queue_start()
599 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; qed_sp_eth_rx_queue_start()
600 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, qed_sp_eth_rx_queue_start()
603 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); qed_sp_eth_rx_queue_start()
608 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, qed_sp_eth_rx_queue_start()
619 qed_sp_release_queue_cid(p_hwfn, p_rx_cid); qed_sp_eth_rx_queue_start()
624 static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, qed_sp_eth_rx_queue_stop() argument
629 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; qed_sp_eth_rx_queue_stop()
640 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_eth_rx_queue_stop()
651 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); qed_sp_eth_rx_queue_stop()
652 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); qed_sp_eth_rx_queue_stop()
659 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) && qed_sp_eth_rx_queue_stop()
662 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) || qed_sp_eth_rx_queue_stop()
665 rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_rx_queue_stop()
669 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); qed_sp_eth_rx_queue_stop()
673 qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, qed_sp_eth_txq_start_ramrod() argument
691 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; qed_sp_eth_txq_start_ramrod()
695 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); qed_sp_eth_txq_start_ramrod()
703 rc = qed_sp_init_request(p_hwfn, &p_ent, cid, qed_sp_eth_txq_start_ramrod()
723 pq_id = qed_get_qm_pq(p_hwfn, qed_sp_eth_txq_start_ramrod()
728 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_txq_start_ramrod()
732 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, qed_sp_eth_tx_queue_start() argument
744 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); qed_sp_eth_tx_queue_start()
748 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; qed_sp_eth_tx_queue_start()
753 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, qed_sp_eth_tx_queue_start()
756 DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); qed_sp_eth_tx_queue_start()
761 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_eth_tx_queue_start()
766 rc = qed_sp_eth_txq_start_ramrod(p_hwfn, qed_sp_eth_tx_queue_start()
775 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + qed_sp_eth_tx_queue_start()
779 qed_sp_release_queue_cid(p_hwfn, p_tx_cid); qed_sp_eth_tx_queue_start()
784 static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, qed_sp_eth_tx_queue_stop() argument
787 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; qed_sp_eth_tx_queue_stop()
796 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_eth_tx_queue_stop()
805 rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_tx_queue_stop()
809 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); qed_sp_eth_tx_queue_stop()
849 qed_filter_ucast_common(struct qed_hwfn *p_hwfn, qed_filter_ucast_common() argument
865 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, qed_filter_ucast_common()
870 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, qed_filter_ucast_common()
880 rc = qed_sp_init_request(p_hwfn, pp_ent, qed_filter_ucast_common()
881 qed_spq_get_cid(p_hwfn), qed_filter_ucast_common()
969 DP_NOTICE(p_hwfn, qed_filter_ucast_common()
985 static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, qed_sp_eth_filter_ucast() argument
996 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, qed_sp_eth_filter_ucast()
1000 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); qed_sp_eth_filter_ucast()
1006 rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_filter_ucast()
1008 DP_ERR(p_hwfn, qed_sp_eth_filter_ucast()
1014 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_eth_filter_ucast()
1027 DP_VERBOSE(p_hwfn, QED_MSG_SP, qed_sp_eth_filter_ucast()
1096 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, qed_sp_eth_filter_mcast() argument
1110 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, qed_sp_eth_filter_mcast()
1115 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, qed_sp_eth_filter_mcast()
1126 rc = qed_sp_init_request(p_hwfn, &p_ent, qed_sp_eth_filter_mcast()
1127 qed_spq_get_cid(p_hwfn), qed_sp_eth_filter_mcast()
1128 p_hwfn->hw_info.opaque_fid, qed_sp_eth_filter_mcast()
1134 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); qed_sp_eth_filter_mcast()
1169 return qed_spq_post(p_hwfn, p_ent, NULL); qed_sp_eth_filter_mcast()
1188 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1195 opaque_fid = p_hwfn->hw_info.opaque_fid; for_each_hwfn()
1197 rc = qed_sp_eth_filter_mcast(p_hwfn, for_each_hwfn()
1215 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1221 opaque_fid = p_hwfn->hw_info.opaque_fid; for_each_hwfn()
1223 rc = qed_sp_eth_filter_ucast(p_hwfn, for_each_hwfn()
1279 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1281 rc = qed_sp_vport_start(p_hwfn, for_each_hwfn()
1282 p_hwfn->hw_info.concrete_fid, for_each_hwfn()
1283 p_hwfn->hw_info.opaque_fid, for_each_hwfn()
1294 qed_hw_start_fastpath(p_hwfn); for_each_hwfn()
1312 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1314 rc = qed_sp_vport_stop(p_hwfn, for_each_hwfn()
1315 p_hwfn->hw_info.opaque_fid, for_each_hwfn()
1402 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
1404 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; for_each_hwfn()
1405 rc = qed_sp_vport_update(p_hwfn, &sp_params, for_each_hwfn()
1431 struct qed_hwfn *p_hwfn; qed_start_rxq() local
1434 p_hwfn = &cdev->hwfns[hwfn_index]; qed_start_rxq()
1439 rc = qed_sp_eth_rx_queue_start(p_hwfn, qed_start_rxq()
1440 p_hwfn->hw_info.opaque_fid, qed_start_rxq()
1465 struct qed_hwfn *p_hwfn; qed_stop_rxq() local
1468 p_hwfn = &cdev->hwfns[hwfn_index]; qed_stop_rxq()
1470 rc = qed_sp_eth_rx_queue_stop(p_hwfn, qed_stop_rxq()
1488 struct qed_hwfn *p_hwfn; qed_start_txq() local
1492 p_hwfn = &cdev->hwfns[hwfn_index]; qed_start_txq()
1497 rc = qed_sp_eth_tx_queue_start(p_hwfn, qed_start_txq()
1498 p_hwfn->hw_info.opaque_fid, qed_start_txq()
1528 struct qed_hwfn *p_hwfn; qed_stop_txq() local
1532 p_hwfn = &cdev->hwfns[hwfn_index]; qed_stop_txq()
1534 rc = qed_sp_eth_tx_queue_stop(p_hwfn, qed_stop_txq()
479 qed_sp_release_queue_cid( struct qed_hwfn *p_hwfn, struct qed_hw_cid_data *p_cid_data) qed_sp_release_queue_cid() argument
H A Dqed_mcp.h103 * @param p_hwfn
112 * @param p_hwfn
121 * @param p_hwfn
126 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
131 * @param p_hwfn
137 int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
172 * @param p_hwfn - hw function
182 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
193 * @param p_hwfn
196 int qed_mcp_drain(struct qed_hwfn *p_hwfn,
202 * @param p_hwfn
208 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
215 * @param p_hwfn
223 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
233 #define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
235 ((p_hwfn)->abs_pf_id & 1) << 3) : \
237 #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
265 * @param p_hwfn - HW func
270 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
276 * @param p_hwfn
280 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
285 * @param p_hwfn - HW func
291 int qed_mcp_free(struct qed_hwfn *p_hwfn);
300 * @param p_hwfn - HW function
305 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
315 * @param p_hwfn - hw function
326 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
333 * @param p_hwfn
336 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
342 * @param p_hwfn
346 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
352 * @param p_hwfn
357 int qed_mcp_reset(struct qed_hwfn *p_hwfn,
363 * @param p_hwfn
367 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
H A Dqed_dev_api.h106 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
141 * @param p_hwfn
145 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
154 * @param p_hwfn
157 void qed_ptt_release(struct qed_hwfn *p_hwfn,
187 * @param p_hwfn
195 qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
205 * @param p_hwfn
225 * @param p_hwfn
234 * @param p_hwfn
235 * @param src_id - relative to p_hwfn
240 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
247 * @param p_hwfn
248 * @param src_id - relative to p_hwfn
253 int qed_fw_vport(struct qed_hwfn *p_hwfn,
260 * @param p_hwfn
261 * @param src_id - relative to p_hwfn
266 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
273 * @param p_hwfn
279 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
H A Dqed_main.c538 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_nic_stop() local
540 if (p_hwfn->b_sp_dpc_enabled) { qed_nic_stop()
541 tasklet_disable(p_hwfn->sp_dpc); qed_nic_stop()
542 p_hwfn->b_sp_dpc_enabled = false; qed_nic_stop()
545 i, p_hwfn->sp_dpc); qed_nic_stop()
650 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, qed_unzip_data() argument
655 p_hwfn->stream->next_in = input_buf; qed_unzip_data()
656 p_hwfn->stream->avail_in = input_len; qed_unzip_data()
657 p_hwfn->stream->next_out = unzip_buf; qed_unzip_data()
658 p_hwfn->stream->avail_out = max_size; qed_unzip_data()
660 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); qed_unzip_data()
663 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", qed_unzip_data()
668 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); qed_unzip_data()
669 zlib_inflateEnd(p_hwfn->stream); qed_unzip_data()
672 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", qed_unzip_data()
673 p_hwfn->stream->msg, rc); qed_unzip_data()
677 return p_hwfn->stream->total_out / 4; qed_unzip_data()
686 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
688 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); for_each_hwfn()
689 if (!p_hwfn->stream) for_each_hwfn()
695 p_hwfn->stream->workspace = workspace; for_each_hwfn()
706 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; for_each_hwfn() local
708 if (!p_hwfn->stream) for_each_hwfn()
711 vfree(p_hwfn->stream->workspace); for_each_hwfn()
712 kfree(p_hwfn->stream); for_each_hwfn()
722 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; qed_update_pf_params() local
724 p_hwfn->pf_params = *params; qed_update_pf_params()
837 struct qed_hwfn *p_hwfn; qed_sb_init() local
852 p_hwfn = &cdev->hwfns[hwfn_index]; qed_sb_init()
859 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, qed_sb_init()
869 struct qed_hwfn *p_hwfn; qed_sb_release() local
875 p_hwfn = &cdev->hwfns[hwfn_index]; qed_sb_release()
882 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); qed_sb_release()
H A Dqed.h53 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
54 ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
55 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
409 #define IRO (p_hwfn->cdev->iro_arr)
491 u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
H A Dqed_hsi.h1589 * @param p_hwfn
1602 struct qed_hwfn *p_hwfn,
1624 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
1631 * @param p_hwfn
1638 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
1646 * @param p_hwfn
1654 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
1661 * @param p_hwfn
1672 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
/linux-4.4.14/include/linux/qed/
H A Dqed_if.h479 static inline void __internal_ram_wr(void *p_hwfn, __internal_ram_wr() argument

Completed in 552 milliseconds