Lines Matching refs:hw
39 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) in ixgbevf_start_hw_vf() argument
42 hw->adapter_stopped = false; in ixgbevf_start_hw_vf()
54 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) in ixgbevf_init_hw_vf() argument
56 s32 status = hw->mac.ops.start_hw(hw); in ixgbevf_init_hw_vf()
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); in ixgbevf_init_hw_vf()
70 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) in ixgbevf_reset_hw_vf() argument
72 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_reset_hw_vf()
79 hw->mac.ops.stop_adapter(hw); in ixgbevf_reset_hw_vf()
82 hw->api_version = ixgbe_mbox_api_10; in ixgbevf_reset_hw_vf()
84 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); in ixgbevf_reset_hw_vf()
85 IXGBE_WRITE_FLUSH(hw); in ixgbevf_reset_hw_vf()
88 while (!mbx->ops.check_for_rst(hw) && timeout) { in ixgbevf_reset_hw_vf()
100 mbx->ops.write_posted(hw, msgbuf, 1); in ixgbevf_reset_hw_vf()
108 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); in ixgbevf_reset_hw_vf()
120 ether_addr_copy(hw->mac.perm_addr, addr); in ixgbevf_reset_hw_vf()
121 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; in ixgbevf_reset_hw_vf()
135 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) in ixgbevf_stop_hw_vf() argument
144 hw->adapter_stopped = true; in ixgbevf_stop_hw_vf()
147 number_of_queues = hw->mac.max_rx_queues; in ixgbevf_stop_hw_vf()
149 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); in ixgbevf_stop_hw_vf()
152 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); in ixgbevf_stop_hw_vf()
156 IXGBE_WRITE_FLUSH(hw); in ixgbevf_stop_hw_vf()
159 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); in ixgbevf_stop_hw_vf()
162 IXGBE_READ_REG(hw, IXGBE_VTEICR); in ixgbevf_stop_hw_vf()
165 number_of_queues = hw->mac.max_tx_queues; in ixgbevf_stop_hw_vf()
167 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); in ixgbevf_stop_hw_vf()
170 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); in ixgbevf_stop_hw_vf()
189 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) in ixgbevf_mta_vector() argument
193 switch (hw->mac.mc_filter_type) { in ixgbevf_mta_vector()
220 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) in ixgbevf_get_mac_addr_vf() argument
222 ether_addr_copy(mac_addr, hw->mac.perm_addr); in ixgbevf_get_mac_addr_vf()
227 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) in ixgbevf_set_uc_addr_vf() argument
229 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_set_uc_addr_vf()
244 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); in ixgbevf_set_uc_addr_vf()
247 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); in ixgbevf_set_uc_addr_vf()
270 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) in ixgbevf_get_reta_locked() argument
288 if (hw->api_version != ixgbe_mbox_api_12 || in ixgbevf_get_reta_locked()
289 hw->mac.type >= ixgbe_mac_X550_vf) in ixgbevf_get_reta_locked()
294 err = hw->mbx.ops.write_posted(hw, msgbuf, 1); in ixgbevf_get_reta_locked()
299 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); in ixgbevf_get_reta_locked()
338 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) in ixgbevf_get_rss_key_locked() argument
349 if (hw->api_version != ixgbe_mbox_api_12 || in ixgbevf_get_rss_key_locked()
350 hw->mac.type >= ixgbe_mac_X550_vf) in ixgbevf_get_rss_key_locked()
354 err = hw->mbx.ops.write_posted(hw, msgbuf, 1); in ixgbevf_get_rss_key_locked()
359 err = hw->mbx.ops.read_posted(hw, msgbuf, 11); in ixgbevf_get_rss_key_locked()
389 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, in ixgbevf_set_rar_vf() argument
392 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_set_rar_vf()
400 ret_val = mbx->ops.write_posted(hw, msgbuf, 3); in ixgbevf_set_rar_vf()
403 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); in ixgbevf_set_rar_vf()
410 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); in ixgbevf_set_rar_vf()
415 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, in ixgbevf_write_msg_read_ack() argument
418 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_write_msg_read_ack()
420 s32 retval = mbx->ops.write_posted(hw, msg, size); in ixgbevf_write_msg_read_ack()
423 mbx->ops.read_posted(hw, retmsg, size); in ixgbevf_write_msg_read_ack()
433 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, in ixgbevf_update_mc_addr_list_vf() argument
463 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); in ixgbevf_update_mc_addr_list_vf()
466 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); in ixgbevf_update_mc_addr_list_vf()
479 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, in ixgbevf_update_xcast_mode() argument
482 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_update_xcast_mode()
486 switch (hw->api_version) { in ixgbevf_update_xcast_mode()
496 err = mbx->ops.write_posted(hw, msgbuf, 2); in ixgbevf_update_xcast_mode()
500 err = mbx->ops.read_posted(hw, msgbuf, 2); in ixgbevf_update_xcast_mode()
518 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, in ixgbevf_set_vfta_vf() argument
521 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_set_vfta_vf()
530 err = mbx->ops.write_posted(hw, msgbuf, 2); in ixgbevf_set_vfta_vf()
534 err = mbx->ops.read_posted(hw, msgbuf, 2); in ixgbevf_set_vfta_vf()
559 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, in ixgbevf_setup_mac_link_vf() argument
575 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, in ixgbevf_check_mac_link_vf() argument
580 struct ixgbe_mbx_info *mbx = &hw->mbx; in ixgbevf_check_mac_link_vf()
581 struct ixgbe_mac_info *mac = &hw->mac; in ixgbevf_check_mac_link_vf()
587 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) in ixgbevf_check_mac_link_vf()
594 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); in ixgbevf_check_mac_link_vf()
606 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); in ixgbevf_check_mac_link_vf()
628 if (mbx->ops.read(hw, &in_msg, 1)) in ixgbevf_check_mac_link_vf()
659 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) in ixgbevf_rlpml_set_vf() argument
665 ixgbevf_write_msg_read_ack(hw, msgbuf, 2); in ixgbevf_rlpml_set_vf()
673 int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) in ixgbevf_negotiate_api_version() argument
682 err = hw->mbx.ops.write_posted(hw, msg, 3); in ixgbevf_negotiate_api_version()
685 err = hw->mbx.ops.read_posted(hw, msg, 3); in ixgbevf_negotiate_api_version()
692 hw->api_version = api; in ixgbevf_negotiate_api_version()
702 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, in ixgbevf_get_queues() argument
709 switch (hw->api_version) { in ixgbevf_get_queues()
720 err = hw->mbx.ops.write_posted(hw, msg, 5); in ixgbevf_get_queues()
723 err = hw->mbx.ops.read_posted(hw, msg, 5); in ixgbevf_get_queues()
736 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; in ixgbevf_get_queues()
737 if (hw->mac.max_tx_queues == 0 || in ixgbevf_get_queues()
738 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) in ixgbevf_get_queues()
739 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; in ixgbevf_get_queues()
741 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; in ixgbevf_get_queues()
742 if (hw->mac.max_rx_queues == 0 || in ixgbevf_get_queues()
743 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) in ixgbevf_get_queues()
744 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; in ixgbevf_get_queues()
748 if (*num_tcs > hw->mac.max_rx_queues) in ixgbevf_get_queues()
753 if (*default_tc >= hw->mac.max_tx_queues) in ixgbevf_get_queues()