Lines Matching refs:hw
34 static s32 igb_update_flash_i210(struct e1000_hw *hw);
42 static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) in igb_get_hw_semaphore_i210() argument
45 s32 timeout = hw->nvm.word_size + 1; in igb_get_hw_semaphore_i210()
62 if (hw->dev_spec._82575.clear_semaphore_once) { in igb_get_hw_semaphore_i210()
63 hw->dev_spec._82575.clear_semaphore_once = false; in igb_get_hw_semaphore_i210()
64 igb_put_hw_semaphore(hw); in igb_get_hw_semaphore_i210()
95 igb_put_hw_semaphore(hw); in igb_get_hw_semaphore_i210()
112 static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) in igb_acquire_nvm_i210() argument
114 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); in igb_acquire_nvm_i210()
124 static void igb_release_nvm_i210(struct e1000_hw *hw) in igb_release_nvm_i210() argument
126 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); in igb_release_nvm_i210()
137 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) in igb_acquire_swfw_sync_i210() argument
146 if (igb_get_hw_semaphore_i210(hw)) { in igb_acquire_swfw_sync_i210()
156 igb_put_hw_semaphore(hw); in igb_acquire_swfw_sync_i210()
170 igb_put_hw_semaphore(hw); in igb_acquire_swfw_sync_i210()
183 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) in igb_release_swfw_sync_i210() argument
187 while (igb_get_hw_semaphore_i210(hw)) in igb_release_swfw_sync_i210()
194 igb_put_hw_semaphore(hw); in igb_release_swfw_sync_i210()
207 static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, in igb_read_nvm_srrd_i210() argument
220 if (!(hw->nvm.ops.acquire(hw))) { in igb_read_nvm_srrd_i210()
221 status = igb_read_nvm_eerd(hw, offset, count, in igb_read_nvm_srrd_i210()
223 hw->nvm.ops.release(hw); in igb_read_nvm_srrd_i210()
247 static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, in igb_write_nvm_srwr() argument
250 struct e1000_nvm_info *nvm = &hw->nvm; in igb_write_nvm_srwr()
307 static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, in igb_write_nvm_srwr_i210() argument
320 if (!(hw->nvm.ops.acquire(hw))) { in igb_write_nvm_srwr_i210()
321 status = igb_write_nvm_srwr(hw, offset, count, in igb_write_nvm_srwr_i210()
323 hw->nvm.ops.release(hw); in igb_write_nvm_srwr_i210()
344 static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) in igb_read_invm_word_i210() argument
385 static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, in igb_read_invm_i210() argument
393 ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); in igb_read_invm_i210()
394 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, in igb_read_invm_i210()
396 ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, in igb_read_invm_i210()
402 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); in igb_read_invm_i210()
409 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); in igb_read_invm_i210()
416 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); in igb_read_invm_i210()
423 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); in igb_read_invm_i210()
430 ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); in igb_read_invm_i210()
437 *data = hw->subsystem_device_id; in igb_read_invm_i210()
440 *data = hw->subsystem_vendor_id; in igb_read_invm_i210()
443 *data = hw->device_id; in igb_read_invm_i210()
446 *data = hw->vendor_id; in igb_read_invm_i210()
463 s32 igb_read_invm_version(struct e1000_hw *hw, in igb_read_invm_version() argument
557 static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) in igb_validate_nvm_checksum_i210() argument
562 if (!(hw->nvm.ops.acquire(hw))) { in igb_validate_nvm_checksum_i210()
568 read_op_ptr = hw->nvm.ops.read; in igb_validate_nvm_checksum_i210()
569 hw->nvm.ops.read = igb_read_nvm_eerd; in igb_validate_nvm_checksum_i210()
571 status = igb_validate_nvm_checksum(hw); in igb_validate_nvm_checksum_i210()
574 hw->nvm.ops.read = read_op_ptr; in igb_validate_nvm_checksum_i210()
576 hw->nvm.ops.release(hw); in igb_validate_nvm_checksum_i210()
592 static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) in igb_update_nvm_checksum_i210() argument
602 ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); in igb_update_nvm_checksum_i210()
608 if (!(hw->nvm.ops.acquire(hw))) { in igb_update_nvm_checksum_i210()
615 ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); in igb_update_nvm_checksum_i210()
617 hw->nvm.ops.release(hw); in igb_update_nvm_checksum_i210()
624 ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, in igb_update_nvm_checksum_i210()
627 hw->nvm.ops.release(hw); in igb_update_nvm_checksum_i210()
632 hw->nvm.ops.release(hw); in igb_update_nvm_checksum_i210()
634 ret_val = igb_update_flash_i210(hw); in igb_update_nvm_checksum_i210()
647 static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) in igb_pool_flash_update_done_i210() argument
669 bool igb_get_flash_presence_i210(struct e1000_hw *hw) in igb_get_flash_presence_i210() argument
686 static s32 igb_update_flash_i210(struct e1000_hw *hw) in igb_update_flash_i210() argument
691 ret_val = igb_pool_flash_update_done_i210(hw); in igb_update_flash_i210()
700 ret_val = igb_pool_flash_update_done_i210(hw); in igb_update_flash_i210()
718 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) in igb_valid_led_default_i210() argument
722 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); in igb_valid_led_default_i210()
729 switch (hw->phy.media_type) { in igb_valid_led_default_i210()
751 static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, in __igb_access_xmdio_reg() argument
756 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); in __igb_access_xmdio_reg()
760 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); in __igb_access_xmdio_reg()
764 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | in __igb_access_xmdio_reg()
770 ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); in __igb_access_xmdio_reg()
772 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); in __igb_access_xmdio_reg()
777 ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); in __igb_access_xmdio_reg()
791 s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) in igb_read_xmdio_reg() argument
793 return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); in igb_read_xmdio_reg()
803 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) in igb_write_xmdio_reg() argument
805 return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); in igb_write_xmdio_reg()
812 s32 igb_init_nvm_params_i210(struct e1000_hw *hw) in igb_init_nvm_params_i210() argument
815 struct e1000_nvm_info *nvm = &hw->nvm; in igb_init_nvm_params_i210()
822 if (igb_get_flash_presence_i210(hw)) { in igb_init_nvm_params_i210()
823 hw->nvm.type = e1000_nvm_flash_hw; in igb_init_nvm_params_i210()
829 hw->nvm.type = e1000_nvm_invm; in igb_init_nvm_params_i210()
845 s32 igb_pll_workaround_i210(struct e1000_hw *hw) in igb_pll_workaround_i210() argument
859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, in igb_pll_workaround_i210()
866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | in igb_pll_workaround_i210()
887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); in igb_pll_workaround_i210()
889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); in igb_pll_workaround_i210()
892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); in igb_pll_workaround_i210()