root/drivers/net/ethernet/intel/ice/ice_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ice_get_tx_pending
  2. ice_check_for_hang_subtask
  3. ice_init_mac_fltr
  4. ice_add_mac_to_sync_list
  5. ice_add_mac_to_unsync_list
  6. ice_vsi_fltr_changed
  7. ice_cfg_promisc
  8. ice_vsi_sync_fltr
  9. ice_sync_fltr_subtask
  10. ice_dis_vsi
  11. ice_pf_dis_all_vsi
  12. ice_prepare_for_reset
  13. ice_do_reset
  14. ice_reset_subtask
  15. ice_print_topo_conflict
  16. ice_print_link_msg
  17. ice_vsi_link_event
  18. ice_link_event
  19. ice_watchdog_subtask
  20. ice_init_link_events
  21. ice_handle_link_event
  22. __ice_clean_ctrlq
  23. ice_ctrlq_pending
  24. ice_clean_adminq_subtask
  25. ice_clean_mailboxq_subtask
  26. ice_service_task_schedule
  27. ice_service_task_complete
  28. ice_service_task_stop
  29. ice_service_task_restart
  30. ice_service_timer
  31. ice_handle_mdd_event
  32. ice_force_phys_link_state
  33. ice_check_media_subtask
  34. ice_service_task
  35. ice_set_ctrlq_len
  36. ice_irq_affinity_notify
  37. ice_irq_affinity_release
  38. ice_vsi_ena_irq
  39. ice_vsi_req_irq_msix
  40. ice_ena_misc_vector
  41. ice_misc_intr
  42. ice_dis_ctrlq_interrupts
  43. ice_free_irq_msix_misc
  44. ice_ena_ctrlq_interrupts
  45. ice_req_irq_msix_misc
  46. ice_napi_add
  47. ice_set_ops
  48. ice_set_netdev_features
  49. ice_cfg_netdev
  50. ice_fill_rss_lut
  51. ice_pf_vsi_setup
  52. ice_lb_vsi_setup
  53. ice_vlan_rx_add_vid
  54. ice_vlan_rx_kill_vid
  55. ice_setup_pf_sw
  56. ice_get_avail_q_count
  57. ice_get_avail_txq_count
  58. ice_get_avail_rxq_count
  59. ice_deinit_pf
  60. ice_set_pf_caps
  61. ice_init_pf
  62. ice_ena_msix_range
  63. ice_dis_msix
  64. ice_clear_interrupt_scheme
  65. ice_init_interrupt_scheme
  66. ice_log_pkg_init
  67. ice_load_pkg
  68. ice_verify_cacheline_size
  69. ice_send_version
  70. ice_get_opt_fw_name
  71. ice_request_fw
  72. ice_probe
  73. ice_remove
  74. ice_pci_err_detected
  75. ice_pci_err_slot_reset
  76. ice_pci_err_resume
  77. ice_pci_err_reset_prepare
  78. ice_pci_err_reset_done
  79. ice_module_init
  80. ice_module_exit
  81. ice_set_mac_address
  82. ice_set_rx_mode
  83. ice_fdb_add
  84. ice_fdb_del
  85. ice_set_features
  86. ice_vsi_vlan_setup
  87. ice_vsi_cfg
  88. ice_napi_enable_all
  89. ice_up_complete
  90. ice_up
  91. ice_fetch_u64_stats_per_ring
  92. ice_update_vsi_ring_stats
  93. ice_update_vsi_stats
  94. ice_update_pf_stats
  95. ice_get_stats64
  96. ice_napi_disable_all
  97. ice_down
  98. ice_vsi_setup_tx_rings
  99. ice_vsi_setup_rx_rings
  100. ice_vsi_open
  101. ice_vsi_release_all
  102. ice_ena_vsi
  103. ice_pf_ena_all_vsi
  104. ice_vsi_rebuild_by_type
  105. ice_update_pf_netdev_link
  106. ice_rebuild
  107. ice_change_mtu
  108. ice_set_rss
  109. ice_get_rss
  110. ice_bridge_getlink
  111. ice_vsi_update_bridge_mode
  112. ice_bridge_setlink
  113. ice_tx_timeout
  114. ice_open
  115. ice_stop
  116. ice_features_check

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (c) 2018, Intel Corporation. */
   3 
   4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
   5 
   6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7 
   8 #include "ice.h"
   9 #include "ice_lib.h"
  10 #include "ice_dcb_lib.h"
  11 
  12 #define DRV_VERSION_MAJOR 0
  13 #define DRV_VERSION_MINOR 8
  14 #define DRV_VERSION_BUILD 1
  15 
  16 #define DRV_VERSION     __stringify(DRV_VERSION_MAJOR) "." \
  17                         __stringify(DRV_VERSION_MINOR) "." \
  18                         __stringify(DRV_VERSION_BUILD) "-k"
  19 #define DRV_SUMMARY     "Intel(R) Ethernet Connection E800 Series Linux Driver"
  20 const char ice_drv_ver[] = DRV_VERSION;
  21 static const char ice_driver_string[] = DRV_SUMMARY;
  22 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  23 
  24 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
  25 #define ICE_DDP_PKG_PATH        "intel/ice/ddp/"
  26 #define ICE_DDP_PKG_FILE        ICE_DDP_PKG_PATH "ice.pkg"
  27 
  28 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  29 MODULE_DESCRIPTION(DRV_SUMMARY);
  30 MODULE_LICENSE("GPL v2");
  31 MODULE_VERSION(DRV_VERSION);
  32 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
  33 
  34 static int debug = -1;
  35 module_param(debug, int, 0644);
  36 #ifndef CONFIG_DYNAMIC_DEBUG
  37 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  38 #else
  39 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  40 #endif /* !CONFIG_DYNAMIC_DEBUG */
  41 
  42 static struct workqueue_struct *ice_wq;
  43 static const struct net_device_ops ice_netdev_safe_mode_ops;
  44 static const struct net_device_ops ice_netdev_ops;
  45 
  46 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
  47 
  48 static void ice_vsi_release_all(struct ice_pf *pf);
  49 
  50 /**
  51  * ice_get_tx_pending - returns number of Tx descriptors not processed
  52  * @ring: the ring of descriptors
  53  */
  54 static u16 ice_get_tx_pending(struct ice_ring *ring)
  55 {
  56         u16 head, tail;
  57 
  58         head = ring->next_to_clean;
  59         tail = ring->next_to_use;
  60 
  61         if (head != tail)
  62                 return (head < tail) ?
  63                         tail - head : (tail + ring->count - head);
  64         return 0;
  65 }
  66 
  67 /**
  68  * ice_check_for_hang_subtask - check for and recover hung queues
  69  * @pf: pointer to PF struct
  70  */
  71 static void ice_check_for_hang_subtask(struct ice_pf *pf)
  72 {
  73         struct ice_vsi *vsi = NULL;
  74         struct ice_hw *hw;
  75         unsigned int i;
  76         int packets;
  77         u32 v;
  78 
  79         ice_for_each_vsi(pf, v)
  80                 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
  81                         vsi = pf->vsi[v];
  82                         break;
  83                 }
  84 
  85         if (!vsi || test_bit(__ICE_DOWN, vsi->state))
  86                 return;
  87 
  88         if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
  89                 return;
  90 
  91         hw = &vsi->back->hw;
  92 
  93         for (i = 0; i < vsi->num_txq; i++) {
  94                 struct ice_ring *tx_ring = vsi->tx_rings[i];
  95 
  96                 if (tx_ring && tx_ring->desc) {
  97                         /* If packet counter has not changed the queue is
  98                          * likely stalled, so force an interrupt for this
  99                          * queue.
 100                          *
 101                          * prev_pkt would be negative if there was no
 102                          * pending work.
 103                          */
 104                         packets = tx_ring->stats.pkts & INT_MAX;
 105                         if (tx_ring->tx_stats.prev_pkt == packets) {
 106                                 /* Trigger sw interrupt to revive the queue */
 107                                 ice_trigger_sw_intr(hw, tx_ring->q_vector);
 108                                 continue;
 109                         }
 110 
 111                         /* Memory barrier between read of packet count and call
 112                          * to ice_get_tx_pending()
 113                          */
 114                         smp_rmb();
 115                         tx_ring->tx_stats.prev_pkt =
 116                             ice_get_tx_pending(tx_ring) ? packets : -1;
 117                 }
 118         }
 119 }
 120 
 121 /**
 122  * ice_init_mac_fltr - Set initial MAC filters
 123  * @pf: board private structure
 124  *
 125  * Set initial set of MAC filters for PF VSI; configure filters for permanent
 126  * address and broadcast address. If an error is encountered, netdevice will be
 127  * unregistered.
 128  */
 129 static int ice_init_mac_fltr(struct ice_pf *pf)
 130 {
 131         enum ice_status status;
 132         u8 broadcast[ETH_ALEN];
 133         struct ice_vsi *vsi;
 134 
 135         vsi = ice_get_main_vsi(pf);
 136         if (!vsi)
 137                 return -EINVAL;
 138 
 139         /* To add a MAC filter, first add the MAC to a list and then
 140          * pass the list to ice_add_mac.
 141          */
 142 
 143          /* Add a unicast MAC filter so the VSI can get its packets */
 144         status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
 145         if (status)
 146                 goto unregister;
 147 
 148         /* VSI needs to receive broadcast traffic, so add the broadcast
 149          * MAC address to the list as well.
 150          */
 151         eth_broadcast_addr(broadcast);
 152         status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
 153         if (status)
 154                 goto unregister;
 155 
 156         return 0;
 157 unregister:
 158         /* We aren't useful with no MAC filters, so unregister if we
 159          * had an error
 160          */
 161         if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
 162                 dev_err(&pf->pdev->dev,
 163                         "Could not add MAC filters error %d. Unregistering device\n",
 164                         status);
 165                 unregister_netdev(vsi->netdev);
 166                 free_netdev(vsi->netdev);
 167                 vsi->netdev = NULL;
 168         }
 169 
 170         return -EIO;
 171 }
 172 
 173 /**
 174  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
 175  * @netdev: the net device on which the sync is happening
 176  * @addr: MAC address to sync
 177  *
 178  * This is a callback function which is called by the in kernel device sync
 179  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
 180  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
 181  * MAC filters from the hardware.
 182  */
 183 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
 184 {
 185         struct ice_netdev_priv *np = netdev_priv(netdev);
 186         struct ice_vsi *vsi = np->vsi;
 187 
 188         if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
 189                 return -EINVAL;
 190 
 191         return 0;
 192 }
 193 
 194 /**
 195  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
 196  * @netdev: the net device on which the unsync is happening
 197  * @addr: MAC address to unsync
 198  *
 199  * This is a callback function which is called by the in kernel device unsync
 200  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
 201  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
 202  * delete the MAC filters from the hardware.
 203  */
 204 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 205 {
 206         struct ice_netdev_priv *np = netdev_priv(netdev);
 207         struct ice_vsi *vsi = np->vsi;
 208 
 209         if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
 210                 return -EINVAL;
 211 
 212         return 0;
 213 }
 214 
 215 /**
 216  * ice_vsi_fltr_changed - check if filter state changed
 217  * @vsi: VSI to be checked
 218  *
 219  * returns true if filter state has changed, false otherwise.
 220  */
 221 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 222 {
 223         return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
 224                test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
 225                test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
 226 }
 227 
 228 /**
 229  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
 230  * @vsi: the VSI being configured
 231  * @promisc_m: mask of promiscuous config bits
 232  * @set_promisc: enable or disable promisc flag request
 233  *
 234  */
 235 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
 236 {
 237         struct ice_hw *hw = &vsi->back->hw;
 238         enum ice_status status = 0;
 239 
 240         if (vsi->type != ICE_VSI_PF)
 241                 return 0;
 242 
 243         if (vsi->vlan_ena) {
 244                 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
 245                                                   set_promisc);
 246         } else {
 247                 if (set_promisc)
 248                         status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
 249                                                      0);
 250                 else
 251                         status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 252                                                        0);
 253         }
 254 
 255         if (status)
 256                 return -EIO;
 257 
 258         return 0;
 259 }
 260 
 261 /**
 262  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
 263  * @vsi: ptr to the VSI
 264  *
 265  * Push any outstanding VSI filter changes through the AdminQ.
 266  */
 267 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
 268 {
 269         struct device *dev = &vsi->back->pdev->dev;
 270         struct net_device *netdev = vsi->netdev;
 271         bool promisc_forced_on = false;
 272         struct ice_pf *pf = vsi->back;
 273         struct ice_hw *hw = &pf->hw;
 274         enum ice_status status = 0;
 275         u32 changed_flags = 0;
 276         u8 promisc_m;
 277         int err = 0;
 278 
 279         if (!vsi->netdev)
 280                 return -EINVAL;
 281 
 282         while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
 283                 usleep_range(1000, 2000);
 284 
 285         changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
 286         vsi->current_netdev_flags = vsi->netdev->flags;
 287 
 288         INIT_LIST_HEAD(&vsi->tmp_sync_list);
 289         INIT_LIST_HEAD(&vsi->tmp_unsync_list);
 290 
 291         if (ice_vsi_fltr_changed(vsi)) {
 292                 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
 293                 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
 294                 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
 295 
 296                 /* grab the netdev's addr_list_lock */
 297                 netif_addr_lock_bh(netdev);
 298                 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
 299                               ice_add_mac_to_unsync_list);
 300                 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
 301                               ice_add_mac_to_unsync_list);
 302                 /* our temp lists are populated. release lock */
 303                 netif_addr_unlock_bh(netdev);
 304         }
 305 
 306         /* Remove MAC addresses in the unsync list */
 307         status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
 308         ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
 309         if (status) {
 310                 netdev_err(netdev, "Failed to delete MAC filters\n");
 311                 /* if we failed because of alloc failures, just bail */
 312                 if (status == ICE_ERR_NO_MEMORY) {
 313                         err = -ENOMEM;
 314                         goto out;
 315                 }
 316         }
 317 
 318         /* Add MAC addresses in the sync list */
 319         status = ice_add_mac(hw, &vsi->tmp_sync_list);
 320         ice_free_fltr_list(dev, &vsi->tmp_sync_list);
 321         /* If filter is added successfully or already exists, do not go into
 322          * 'if' condition and report it as error. Instead continue processing
 323          * rest of the function.
 324          */
 325         if (status && status != ICE_ERR_ALREADY_EXISTS) {
 326                 netdev_err(netdev, "Failed to add MAC filters\n");
 327                 /* If there is no more space for new umac filters, VSI
 328                  * should go into promiscuous mode. There should be some
 329                  * space reserved for promiscuous filters.
 330                  */
 331                 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
 332                     !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
 333                                       vsi->state)) {
 334                         promisc_forced_on = true;
 335                         netdev_warn(netdev,
 336                                     "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
 337                                     vsi->vsi_num);
 338                 } else {
 339                         err = -EIO;
 340                         goto out;
 341                 }
 342         }
 343         /* check for changes in promiscuous modes */
 344         if (changed_flags & IFF_ALLMULTI) {
 345                 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
 346                         if (vsi->vlan_ena)
 347                                 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 348                         else
 349                                 promisc_m = ICE_MCAST_PROMISC_BITS;
 350 
 351                         err = ice_cfg_promisc(vsi, promisc_m, true);
 352                         if (err) {
 353                                 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
 354                                            vsi->vsi_num);
 355                                 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
 356                                 goto out_promisc;
 357                         }
 358                 } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
 359                         if (vsi->vlan_ena)
 360                                 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
 361                         else
 362                                 promisc_m = ICE_MCAST_PROMISC_BITS;
 363 
 364                         err = ice_cfg_promisc(vsi, promisc_m, false);
 365                         if (err) {
 366                                 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
 367                                            vsi->vsi_num);
 368                                 vsi->current_netdev_flags |= IFF_ALLMULTI;
 369                                 goto out_promisc;
 370                         }
 371                 }
 372         }
 373 
 374         if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
 375             test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
 376                 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
 377                 if (vsi->current_netdev_flags & IFF_PROMISC) {
 378                         /* Apply Rx filter rule to get traffic from wire */
 379                         status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
 380                                                   ICE_FLTR_RX);
 381                         if (status) {
 382                                 netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
 383                                            vsi->vsi_num);
 384                                 vsi->current_netdev_flags &= ~IFF_PROMISC;
 385                                 err = -EIO;
 386                                 goto out_promisc;
 387                         }
 388                 } else {
 389                         /* Clear Rx filter to remove traffic from wire */
 390                         status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
 391                                                   ICE_FLTR_RX);
 392                         if (status) {
 393                                 netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
 394                                            vsi->vsi_num);
 395                                 vsi->current_netdev_flags |= IFF_PROMISC;
 396                                 err = -EIO;
 397                                 goto out_promisc;
 398                         }
 399                 }
 400         }
 401         goto exit;
 402 
 403 out_promisc:
 404         set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
 405         goto exit;
 406 out:
 407         /* if something went wrong then set the changed flag so we try again */
 408         set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
 409         set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
 410 exit:
 411         clear_bit(__ICE_CFG_BUSY, vsi->state);
 412         return err;
 413 }
 414 
 415 /**
 416  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
 417  * @pf: board private structure
 418  */
 419 static void ice_sync_fltr_subtask(struct ice_pf *pf)
 420 {
 421         int v;
 422 
 423         if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
 424                 return;
 425 
 426         clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 427 
 428         ice_for_each_vsi(pf, v)
 429                 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
 430                     ice_vsi_sync_fltr(pf->vsi[v])) {
 431                         /* come back and try again later */
 432                         set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
 433                         break;
 434                 }
 435 }
 436 
 437 /**
 438  * ice_dis_vsi - pause a VSI
 439  * @vsi: the VSI being paused
 440  * @locked: is the rtnl_lock already held
 441  */
 442 static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
 443 {
 444         if (test_bit(__ICE_DOWN, vsi->state))
 445                 return;
 446 
 447         set_bit(__ICE_NEEDS_RESTART, vsi->state);
 448 
 449         if (vsi->type == ICE_VSI_PF && vsi->netdev) {
 450                 if (netif_running(vsi->netdev)) {
 451                         if (!locked)
 452                                 rtnl_lock();
 453 
 454                         ice_stop(vsi->netdev);
 455 
 456                         if (!locked)
 457                                 rtnl_unlock();
 458                 } else {
 459                         ice_vsi_close(vsi);
 460                 }
 461         }
 462 }
 463 
 464 /**
 465  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
 466  * @pf: the PF
 467  * @locked: is the rtnl_lock already held
 468  */
 469 #ifdef CONFIG_DCB
 470 void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 471 #else
 472 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
 473 #endif /* CONFIG_DCB */
 474 {
 475         int v;
 476 
 477         ice_for_each_vsi(pf, v)
 478                 if (pf->vsi[v])
 479                         ice_dis_vsi(pf->vsi[v], locked);
 480 }
 481 
 482 /**
 483  * ice_prepare_for_reset - prep for the core to reset
 484  * @pf: board private structure
 485  *
 486  * Inform or close all dependent features in prep for reset.
 487  */
 488 static void
 489 ice_prepare_for_reset(struct ice_pf *pf)
 490 {
 491         struct ice_hw *hw = &pf->hw;
 492         int i;
 493 
 494         /* already prepared for reset */
 495         if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
 496                 return;
 497 
 498         /* Notify VFs of impending reset */
 499         if (ice_check_sq_alive(hw, &hw->mailboxq))
 500                 ice_vc_notify_reset(pf);
 501 
 502         /* Disable VFs until reset is completed */
 503         for (i = 0; i < pf->num_alloc_vfs; i++)
 504                 ice_set_vf_state_qs_dis(&pf->vf[i]);
 505 
 506         /* clear SW filtering DB */
 507         ice_clear_hw_tbls(hw);
 508         /* disable the VSIs and their queues that are not already DOWN */
 509         ice_pf_dis_all_vsi(pf, false);
 510 
 511         if (hw->port_info)
 512                 ice_sched_clear_port(hw->port_info);
 513 
 514         ice_shutdown_all_ctrlq(hw);
 515 
 516         set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
 517 }
 518 
 519 /**
 520  * ice_do_reset - Initiate one of many types of resets
 521  * @pf: board private structure
 522  * @reset_type: reset type requested
 523  * before this function was called.
 524  */
 525 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
 526 {
 527         struct device *dev = &pf->pdev->dev;
 528         struct ice_hw *hw = &pf->hw;
 529 
 530         dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
 531         WARN_ON(in_interrupt());
 532 
 533         ice_prepare_for_reset(pf);
 534 
 535         /* trigger the reset */
 536         if (ice_reset(hw, reset_type)) {
 537                 dev_err(dev, "reset %d failed\n", reset_type);
 538                 set_bit(__ICE_RESET_FAILED, pf->state);
 539                 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
 540                 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
 541                 clear_bit(__ICE_PFR_REQ, pf->state);
 542                 clear_bit(__ICE_CORER_REQ, pf->state);
 543                 clear_bit(__ICE_GLOBR_REQ, pf->state);
 544                 return;
 545         }
 546 
 547         /* PFR is a bit of a special case because it doesn't result in an OICR
 548          * interrupt. So for PFR, rebuild after the reset and clear the reset-
 549          * associated state bits.
 550          */
 551         if (reset_type == ICE_RESET_PFR) {
 552                 pf->pfr_count++;
 553                 ice_rebuild(pf, reset_type);
 554                 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
 555                 clear_bit(__ICE_PFR_REQ, pf->state);
 556                 ice_reset_all_vfs(pf, true);
 557         }
 558 }
 559 
 560 /**
 561  * ice_reset_subtask - Set up for resetting the device and driver
 562  * @pf: board private structure
 563  */
 564 static void ice_reset_subtask(struct ice_pf *pf)
 565 {
 566         enum ice_reset_req reset_type = ICE_RESET_INVAL;
 567 
 568         /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
 569          * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
 570          * of reset is pending and sets bits in pf->state indicating the reset
 571          * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
 572          * prepare for pending reset if not already (for PF software-initiated
 573          * global resets the software should already be prepared for it as
 574          * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
 575          * by firmware or software on other PFs, that bit is not set so prepare
 576          * for the reset now), poll for reset done, rebuild and return.
 577          */
 578         if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
 579                 /* Perform the largest reset requested */
 580                 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
 581                         reset_type = ICE_RESET_CORER;
 582                 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
 583                         reset_type = ICE_RESET_GLOBR;
 584                 if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
 585                         reset_type = ICE_RESET_EMPR;
 586                 /* return if no valid reset type requested */
 587                 if (reset_type == ICE_RESET_INVAL)
 588                         return;
 589                 ice_prepare_for_reset(pf);
 590 
 591                 /* make sure we are ready to rebuild */
 592                 if (ice_check_reset(&pf->hw)) {
 593                         set_bit(__ICE_RESET_FAILED, pf->state);
 594                 } else {
 595                         /* done with reset. start rebuild */
 596                         pf->hw.reset_ongoing = false;
 597                         ice_rebuild(pf, reset_type);
 598                         /* clear bit to resume normal operations, but
 599                          * ICE_NEEDS_RESTART bit is set in case rebuild failed
 600                          */
 601                         clear_bit(__ICE_RESET_OICR_RECV, pf->state);
 602                         clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
 603                         clear_bit(__ICE_PFR_REQ, pf->state);
 604                         clear_bit(__ICE_CORER_REQ, pf->state);
 605                         clear_bit(__ICE_GLOBR_REQ, pf->state);
 606                         ice_reset_all_vfs(pf, true);
 607                 }
 608 
 609                 return;
 610         }
 611 
 612         /* No pending resets to finish processing. Check for new resets */
 613         if (test_bit(__ICE_PFR_REQ, pf->state))
 614                 reset_type = ICE_RESET_PFR;
 615         if (test_bit(__ICE_CORER_REQ, pf->state))
 616                 reset_type = ICE_RESET_CORER;
 617         if (test_bit(__ICE_GLOBR_REQ, pf->state))
 618                 reset_type = ICE_RESET_GLOBR;
 619         /* If no valid reset type requested just return */
 620         if (reset_type == ICE_RESET_INVAL)
 621                 return;
 622 
 623         /* reset if not already down or busy */
 624         if (!test_bit(__ICE_DOWN, pf->state) &&
 625             !test_bit(__ICE_CFG_BUSY, pf->state)) {
 626                 ice_do_reset(pf, reset_type);
 627         }
 628 }
 629 
 630 /**
 631  * ice_print_topo_conflict - print topology conflict message
 632  * @vsi: the VSI whose topology status is being checked
 633  */
 634 static void ice_print_topo_conflict(struct ice_vsi *vsi)
 635 {
 636         switch (vsi->port_info->phy.link_info.topo_media_conflict) {
 637         case ICE_AQ_LINK_TOPO_CONFLICT:
 638         case ICE_AQ_LINK_MEDIA_CONFLICT:
 639                 netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
 640                 break;
 641         default:
 642                 break;
 643         }
 644 }
 645 
 646 /**
 647  * ice_print_link_msg - print link up or down message
 648  * @vsi: the VSI whose link status is being queried
 649  * @isup: boolean for if the link is now up or down
 650  */
 651 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 652 {
 653         struct ice_aqc_get_phy_caps_data *caps;
 654         enum ice_status status;
 655         const char *fec_req;
 656         const char *speed;
 657         const char *fec;
 658         const char *fc;
 659         const char *an;
 660 
 661         if (!vsi)
 662                 return;
 663 
 664         if (vsi->current_isup == isup)
 665                 return;
 666 
 667         vsi->current_isup = isup;
 668 
 669         if (!isup) {
 670                 netdev_info(vsi->netdev, "NIC Link is Down\n");
 671                 return;
 672         }
 673 
 674         switch (vsi->port_info->phy.link_info.link_speed) {
 675         case ICE_AQ_LINK_SPEED_100GB:
 676                 speed = "100 G";
 677                 break;
 678         case ICE_AQ_LINK_SPEED_50GB:
 679                 speed = "50 G";
 680                 break;
 681         case ICE_AQ_LINK_SPEED_40GB:
 682                 speed = "40 G";
 683                 break;
 684         case ICE_AQ_LINK_SPEED_25GB:
 685                 speed = "25 G";
 686                 break;
 687         case ICE_AQ_LINK_SPEED_20GB:
 688                 speed = "20 G";
 689                 break;
 690         case ICE_AQ_LINK_SPEED_10GB:
 691                 speed = "10 G";
 692                 break;
 693         case ICE_AQ_LINK_SPEED_5GB:
 694                 speed = "5 G";
 695                 break;
 696         case ICE_AQ_LINK_SPEED_2500MB:
 697                 speed = "2.5 G";
 698                 break;
 699         case ICE_AQ_LINK_SPEED_1000MB:
 700                 speed = "1 G";
 701                 break;
 702         case ICE_AQ_LINK_SPEED_100MB:
 703                 speed = "100 M";
 704                 break;
 705         default:
 706                 speed = "Unknown";
 707                 break;
 708         }
 709 
 710         switch (vsi->port_info->fc.current_mode) {
 711         case ICE_FC_FULL:
 712                 fc = "Rx/Tx";
 713                 break;
 714         case ICE_FC_TX_PAUSE:
 715                 fc = "Tx";
 716                 break;
 717         case ICE_FC_RX_PAUSE:
 718                 fc = "Rx";
 719                 break;
 720         case ICE_FC_NONE:
 721                 fc = "None";
 722                 break;
 723         default:
 724                 fc = "Unknown";
 725                 break;
 726         }
 727 
 728         /* Get FEC mode based on negotiated link info */
 729         switch (vsi->port_info->phy.link_info.fec_info) {
 730         case ICE_AQ_LINK_25G_RS_528_FEC_EN:
 731                 /* fall through */
 732         case ICE_AQ_LINK_25G_RS_544_FEC_EN:
 733                 fec = "RS-FEC";
 734                 break;
 735         case ICE_AQ_LINK_25G_KR_FEC_EN:
 736                 fec = "FC-FEC/BASE-R";
 737                 break;
 738         default:
 739                 fec = "NONE";
 740                 break;
 741         }
 742 
 743         /* check if autoneg completed, might be false due to not supported */
 744         if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
 745                 an = "True";
 746         else
 747                 an = "False";
 748 
 749         /* Get FEC mode requested based on PHY caps last SW configuration */
 750         caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
 751         if (!caps) {
 752                 fec_req = "Unknown";
 753                 goto done;
 754         }
 755 
 756         status = ice_aq_get_phy_caps(vsi->port_info, false,
 757                                      ICE_AQC_REPORT_SW_CFG, caps, NULL);
 758         if (status)
 759                 netdev_info(vsi->netdev, "Get phy capability failed.\n");
 760 
 761         if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
 762             caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
 763                 fec_req = "RS-FEC";
 764         else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
 765                  caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
 766                 fec_req = "FC-FEC/BASE-R";
 767         else
 768                 fec_req = "NONE";
 769 
 770         devm_kfree(&vsi->back->pdev->dev, caps);
 771 
 772 done:
 773         netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
 774                     speed, fec_req, fec, an, fc);
 775         ice_print_topo_conflict(vsi);
 776 }
 777 
 778 /**
 779  * ice_vsi_link_event - update the VSI's netdev
 780  * @vsi: the VSI on which the link event occurred
 781  * @link_up: whether or not the VSI needs to be set up or down
 782  */
 783 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
 784 {
 785         if (!vsi)
 786                 return;
 787 
 788         if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
 789                 return;
 790 
 791         if (vsi->type == ICE_VSI_PF) {
 792                 if (link_up == netif_carrier_ok(vsi->netdev))
 793                         return;
 794 
 795                 if (link_up) {
 796                         netif_carrier_on(vsi->netdev);
 797                         netif_tx_wake_all_queues(vsi->netdev);
 798                 } else {
 799                         netif_carrier_off(vsi->netdev);
 800                         netif_tx_stop_all_queues(vsi->netdev);
 801                 }
 802         }
 803 }
 804 
 805 /**
 806  * ice_link_event - process the link event
 807  * @pf: PF that the link event is associated with
 808  * @pi: port_info for the port that the link event is associated with
 809  * @link_up: true if the physical link is up and false if it is down
 810  * @link_speed: current link speed received from the link event
 811  *
 812  * Returns 0 on success and negative on failure
 813  */
 814 static int
 815 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
 816                u16 link_speed)
 817 {
 818         struct ice_phy_info *phy_info;
 819         struct ice_vsi *vsi;
 820         u16 old_link_speed;
 821         bool old_link;
 822         int result;
 823 
 824         phy_info = &pi->phy;
 825         phy_info->link_info_old = phy_info->link_info;
 826 
 827         old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
 828         old_link_speed = phy_info->link_info_old.link_speed;
 829 
 830         /* update the link info structures and re-enable link events,
 831          * don't bail on failure due to other book keeping needed
 832          */
 833         result = ice_update_link_info(pi);
 834         if (result)
 835                 dev_dbg(&pf->pdev->dev,
 836                         "Failed to update link status and re-enable link events for port %d\n",
 837                         pi->lport);
 838 
 839         /* if the old link up/down and speed is the same as the new */
 840         if (link_up == old_link && link_speed == old_link_speed)
 841                 return result;
 842 
 843         vsi = ice_get_main_vsi(pf);
 844         if (!vsi || !vsi->port_info)
 845                 return -EINVAL;
 846 
 847         /* turn off PHY if media was removed */
 848         if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
 849             !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
 850                 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
 851 
 852                 result = ice_aq_set_link_restart_an(pi, false, NULL);
 853                 if (result) {
 854                         dev_dbg(&pf->pdev->dev,
 855                                 "Failed to set link down, VSI %d error %d\n",
 856                                 vsi->vsi_num, result);
 857                         return result;
 858                 }
 859         }
 860 
 861         ice_vsi_link_event(vsi, link_up);
 862         ice_print_link_msg(vsi, link_up);
 863 
 864         if (pf->num_alloc_vfs)
 865                 ice_vc_notify_link_state(pf);
 866 
 867         return result;
 868 }
 869 
 870 /**
 871  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
 872  * @pf: board private structure
 873  */
 874 static void ice_watchdog_subtask(struct ice_pf *pf)
 875 {
 876         int i;
 877 
 878         /* if interface is down do nothing */
 879         if (test_bit(__ICE_DOWN, pf->state) ||
 880             test_bit(__ICE_CFG_BUSY, pf->state))
 881                 return;
 882 
 883         /* make sure we don't do these things too often */
 884         if (time_before(jiffies,
 885                         pf->serv_tmr_prev + pf->serv_tmr_period))
 886                 return;
 887 
 888         pf->serv_tmr_prev = jiffies;
 889 
 890         /* Update the stats for active netdevs so the network stack
 891          * can look at updated numbers whenever it cares to
 892          */
 893         ice_update_pf_stats(pf);
 894         ice_for_each_vsi(pf, i)
 895                 if (pf->vsi[i] && pf->vsi[i]->netdev)
 896                         ice_update_vsi_stats(pf->vsi[i]);
 897 }
 898 
 899 /**
 900  * ice_init_link_events - enable/initialize link events
 901  * @pi: pointer to the port_info instance
 902  *
 903  * Returns -EIO on failure, 0 on success
 904  */
 905 static int ice_init_link_events(struct ice_port_info *pi)
 906 {
 907         u16 mask;
 908 
 909         mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
 910                        ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
 911 
 912         if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
 913                 dev_dbg(ice_hw_to_dev(pi->hw),
 914                         "Failed to set link event mask for port %d\n",
 915                         pi->lport);
 916                 return -EIO;
 917         }
 918 
 919         if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
 920                 dev_dbg(ice_hw_to_dev(pi->hw),
 921                         "Failed to enable link events for port %d\n",
 922                         pi->lport);
 923                 return -EIO;
 924         }
 925 
 926         return 0;
 927 }
 928 
 929 /**
 930  * ice_handle_link_event - handle link event via ARQ
 931  * @pf: PF that the link event is associated with
 932  * @event: event structure containing link status info
 933  */
 934 static int
 935 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
 936 {
 937         struct ice_aqc_get_link_status_data *link_data;
 938         struct ice_port_info *port_info;
 939         int status;
 940 
 941         link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
 942         port_info = pf->hw.port_info;
 943         if (!port_info)
 944                 return -EINVAL;
 945 
 946         status = ice_link_event(pf, port_info,
 947                                 !!(link_data->link_info & ICE_AQ_LINK_UP),
 948                                 le16_to_cpu(link_data->link_speed));
 949         if (status)
 950                 dev_dbg(&pf->pdev->dev,
 951                         "Could not process link event, error %d\n", status);
 952 
 953         return status;
 954 }
 955 
 956 /**
 957  * __ice_clean_ctrlq - helper function to clean controlq rings
 958  * @pf: ptr to struct ice_pf
 959  * @q_type: specific Control queue type
 960  */
 961 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
 962 {
 963         struct ice_rq_event_info event;
 964         struct ice_hw *hw = &pf->hw;
 965         struct ice_ctl_q_info *cq;
 966         u16 pending, i = 0;
 967         const char *qtype;
 968         u32 oldval, val;
 969 
 970         /* Do not clean control queue if/when PF reset fails */
 971         if (test_bit(__ICE_RESET_FAILED, pf->state))
 972                 return 0;
 973 
 974         switch (q_type) {
 975         case ICE_CTL_Q_ADMIN:
 976                 cq = &hw->adminq;
 977                 qtype = "Admin";
 978                 break;
 979         case ICE_CTL_Q_MAILBOX:
 980                 cq = &hw->mailboxq;
 981                 qtype = "Mailbox";
 982                 break;
 983         default:
 984                 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
 985                          q_type);
 986                 return 0;
 987         }
 988 
 989         /* check for error indications - PF_xx_AxQLEN register layout for
 990          * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
 991          */
 992         val = rd32(hw, cq->rq.len);
 993         if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
 994                    PF_FW_ARQLEN_ARQCRIT_M)) {
 995                 oldval = val;
 996                 if (val & PF_FW_ARQLEN_ARQVFE_M)
 997                         dev_dbg(&pf->pdev->dev,
 998                                 "%s Receive Queue VF Error detected\n", qtype);
 999                 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1000                         dev_dbg(&pf->pdev->dev,
1001                                 "%s Receive Queue Overflow Error detected\n",
1002                                 qtype);
1003                 }
1004                 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1005                         dev_dbg(&pf->pdev->dev,
1006                                 "%s Receive Queue Critical Error detected\n",
1007                                 qtype);
1008                 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1009                          PF_FW_ARQLEN_ARQCRIT_M);
1010                 if (oldval != val)
1011                         wr32(hw, cq->rq.len, val);
1012         }
1013 
1014         val = rd32(hw, cq->sq.len);
1015         if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1016                    PF_FW_ATQLEN_ATQCRIT_M)) {
1017                 oldval = val;
1018                 if (val & PF_FW_ATQLEN_ATQVFE_M)
1019                         dev_dbg(&pf->pdev->dev,
1020                                 "%s Send Queue VF Error detected\n", qtype);
1021                 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1022                         dev_dbg(&pf->pdev->dev,
1023                                 "%s Send Queue Overflow Error detected\n",
1024                                 qtype);
1025                 }
1026                 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1027                         dev_dbg(&pf->pdev->dev,
1028                                 "%s Send Queue Critical Error detected\n",
1029                                 qtype);
1030                 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1031                          PF_FW_ATQLEN_ATQCRIT_M);
1032                 if (oldval != val)
1033                         wr32(hw, cq->sq.len, val);
1034         }
1035 
1036         event.buf_len = cq->rq_buf_size;
1037         event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
1038                                      GFP_KERNEL);
1039         if (!event.msg_buf)
1040                 return 0;
1041 
1042         do {
1043                 enum ice_status ret;
1044                 u16 opcode;
1045 
1046                 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1047                 if (ret == ICE_ERR_AQ_NO_WORK)
1048                         break;
1049                 if (ret) {
1050                         dev_err(&pf->pdev->dev,
1051                                 "%s Receive Queue event error %d\n", qtype,
1052                                 ret);
1053                         break;
1054                 }
1055 
1056                 opcode = le16_to_cpu(event.desc.opcode);
1057 
1058                 switch (opcode) {
1059                 case ice_aqc_opc_get_link_status:
1060                         if (ice_handle_link_event(pf, &event))
1061                                 dev_err(&pf->pdev->dev,
1062                                         "Could not handle link event\n");
1063                         break;
1064                 case ice_mbx_opc_send_msg_to_pf:
1065                         ice_vc_process_vf_msg(pf, &event);
1066                         break;
1067                 case ice_aqc_opc_fw_logging:
1068                         ice_output_fw_log(hw, &event.desc, event.msg_buf);
1069                         break;
1070                 case ice_aqc_opc_lldp_set_mib_change:
1071                         ice_dcb_process_lldp_set_mib_change(pf, &event);
1072                         break;
1073                 default:
1074                         dev_dbg(&pf->pdev->dev,
1075                                 "%s Receive Queue unknown event 0x%04x ignored\n",
1076                                 qtype, opcode);
1077                         break;
1078                 }
1079         } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1080 
1081         devm_kfree(&pf->pdev->dev, event.msg_buf);
1082 
1083         return pending && (i == ICE_DFLT_IRQ_WORK);
1084 }
1085 
1086 /**
1087  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1088  * @hw: pointer to hardware info
1089  * @cq: control queue information
1090  *
1091  * returns true if there are pending messages in a queue, false if there aren't
1092  */
1093 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1094 {
1095         u16 ntu;
1096 
1097         ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1098         return cq->rq.next_to_clean != ntu;
1099 }
1100 
1101 /**
1102  * ice_clean_adminq_subtask - clean the AdminQ rings
1103  * @pf: board private structure
1104  */
1105 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1106 {
1107         struct ice_hw *hw = &pf->hw;
1108 
1109         if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1110                 return;
1111 
1112         if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1113                 return;
1114 
1115         clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1116 
1117         /* There might be a situation where new messages arrive to a control
1118          * queue between processing the last message and clearing the
1119          * EVENT_PENDING bit. So before exiting, check queue head again (using
1120          * ice_ctrlq_pending) and process new messages if any.
1121          */
1122         if (ice_ctrlq_pending(hw, &hw->adminq))
1123                 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1124 
1125         ice_flush(hw);
1126 }
1127 
1128 /**
1129  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1130  * @pf: board private structure
1131  */
1132 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1133 {
1134         struct ice_hw *hw = &pf->hw;
1135 
1136         if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1137                 return;
1138 
1139         if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1140                 return;
1141 
1142         clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1143 
1144         if (ice_ctrlq_pending(hw, &hw->mailboxq))
1145                 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1146 
1147         ice_flush(hw);
1148 }
1149 
1150 /**
1151  * ice_service_task_schedule - schedule the service task to wake up
1152  * @pf: board private structure
1153  *
1154  * If not already scheduled, this puts the task into the work queue.
1155  */
1156 static void ice_service_task_schedule(struct ice_pf *pf)
1157 {
1158         if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1159             !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1160             !test_bit(__ICE_NEEDS_RESTART, pf->state))
1161                 queue_work(ice_wq, &pf->serv_task);
1162 }
1163 
1164 /**
1165  * ice_service_task_complete - finish up the service task
1166  * @pf: board private structure
1167  */
1168 static void ice_service_task_complete(struct ice_pf *pf)
1169 {
1170         WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1171 
1172         /* force memory (pf->state) to sync before next service task */
1173         smp_mb__before_atomic();
1174         clear_bit(__ICE_SERVICE_SCHED, pf->state);
1175 }
1176 
1177 /**
1178  * ice_service_task_stop - stop service task and cancel works
1179  * @pf: board private structure
1180  */
1181 static void ice_service_task_stop(struct ice_pf *pf)
1182 {
1183         set_bit(__ICE_SERVICE_DIS, pf->state);
1184 
1185         if (pf->serv_tmr.function)
1186                 del_timer_sync(&pf->serv_tmr);
1187         if (pf->serv_task.func)
1188                 cancel_work_sync(&pf->serv_task);
1189 
1190         clear_bit(__ICE_SERVICE_SCHED, pf->state);
1191 }
1192 
1193 /**
1194  * ice_service_task_restart - restart service task and schedule works
1195  * @pf: board private structure
1196  *
1197  * This function is needed for suspend and resume works (e.g WoL scenario)
1198  */
1199 static void ice_service_task_restart(struct ice_pf *pf)
1200 {
1201         clear_bit(__ICE_SERVICE_DIS, pf->state);
1202         ice_service_task_schedule(pf);
1203 }
1204 
1205 /**
1206  * ice_service_timer - timer callback to schedule service task
1207  * @t: pointer to timer_list
1208  */
1209 static void ice_service_timer(struct timer_list *t)
1210 {
1211         struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1212 
1213         mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1214         ice_service_task_schedule(pf);
1215 }
1216 
1217 /**
1218  * ice_handle_mdd_event - handle malicious driver detect event
1219  * @pf: pointer to the PF structure
1220  *
1221  * Called from service task. OICR interrupt handler indicates MDD event
1222  */
1223 static void ice_handle_mdd_event(struct ice_pf *pf)
1224 {
1225         struct ice_hw *hw = &pf->hw;
1226         bool mdd_detected = false;
1227         u32 reg;
1228         int i;
1229 
1230         if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
1231                 return;
1232 
1233         /* find what triggered the MDD event */
1234         reg = rd32(hw, GL_MDET_TX_PQM);
1235         if (reg & GL_MDET_TX_PQM_VALID_M) {
1236                 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1237                                 GL_MDET_TX_PQM_PF_NUM_S;
1238                 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1239                                 GL_MDET_TX_PQM_VF_NUM_S;
1240                 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1241                                 GL_MDET_TX_PQM_MAL_TYPE_S;
1242                 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1243                                 GL_MDET_TX_PQM_QNUM_S);
1244 
1245                 if (netif_msg_tx_err(pf))
1246                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1247                                  event, queue, pf_num, vf_num);
1248                 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1249                 mdd_detected = true;
1250         }
1251 
1252         reg = rd32(hw, GL_MDET_TX_TCLAN);
1253         if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1254                 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1255                                 GL_MDET_TX_TCLAN_PF_NUM_S;
1256                 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1257                                 GL_MDET_TX_TCLAN_VF_NUM_S;
1258                 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1259                                 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1260                 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1261                                 GL_MDET_TX_TCLAN_QNUM_S);
1262 
1263                 if (netif_msg_rx_err(pf))
1264                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1265                                  event, queue, pf_num, vf_num);
1266                 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1267                 mdd_detected = true;
1268         }
1269 
1270         reg = rd32(hw, GL_MDET_RX);
1271         if (reg & GL_MDET_RX_VALID_M) {
1272                 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1273                                 GL_MDET_RX_PF_NUM_S;
1274                 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1275                                 GL_MDET_RX_VF_NUM_S;
1276                 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1277                                 GL_MDET_RX_MAL_TYPE_S;
1278                 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1279                                 GL_MDET_RX_QNUM_S);
1280 
1281                 if (netif_msg_rx_err(pf))
1282                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1283                                  event, queue, pf_num, vf_num);
1284                 wr32(hw, GL_MDET_RX, 0xffffffff);
1285                 mdd_detected = true;
1286         }
1287 
1288         if (mdd_detected) {
1289                 bool pf_mdd_detected = false;
1290 
1291                 reg = rd32(hw, PF_MDET_TX_PQM);
1292                 if (reg & PF_MDET_TX_PQM_VALID_M) {
1293                         wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1294                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1295                         pf_mdd_detected = true;
1296                 }
1297 
1298                 reg = rd32(hw, PF_MDET_TX_TCLAN);
1299                 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1300                         wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1301                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1302                         pf_mdd_detected = true;
1303                 }
1304 
1305                 reg = rd32(hw, PF_MDET_RX);
1306                 if (reg & PF_MDET_RX_VALID_M) {
1307                         wr32(hw, PF_MDET_RX, 0xFFFF);
1308                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1309                         pf_mdd_detected = true;
1310                 }
1311                 /* Queue belongs to the PF initiate a reset */
1312                 if (pf_mdd_detected) {
1313                         set_bit(__ICE_NEEDS_RESTART, pf->state);
1314                         ice_service_task_schedule(pf);
1315                 }
1316         }
1317 
1318         /* check to see if one of the VFs caused the MDD */
1319         for (i = 0; i < pf->num_alloc_vfs; i++) {
1320                 struct ice_vf *vf = &pf->vf[i];
1321 
1322                 bool vf_mdd_detected = false;
1323 
1324                 reg = rd32(hw, VP_MDET_TX_PQM(i));
1325                 if (reg & VP_MDET_TX_PQM_VALID_M) {
1326                         wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1327                         vf_mdd_detected = true;
1328                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1329                                  i);
1330                 }
1331 
1332                 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1333                 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1334                         wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1335                         vf_mdd_detected = true;
1336                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1337                                  i);
1338                 }
1339 
1340                 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1341                 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1342                         wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1343                         vf_mdd_detected = true;
1344                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1345                                  i);
1346                 }
1347 
1348                 reg = rd32(hw, VP_MDET_RX(i));
1349                 if (reg & VP_MDET_RX_VALID_M) {
1350                         wr32(hw, VP_MDET_RX(i), 0xFFFF);
1351                         vf_mdd_detected = true;
1352                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1353                                  i);
1354                 }
1355 
1356                 if (vf_mdd_detected) {
1357                         vf->num_mdd_events++;
1358                         if (vf->num_mdd_events &&
1359                             vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
1360                                 dev_info(&pf->pdev->dev,
1361                                          "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
1362                                          i, vf->num_mdd_events);
1363                 }
1364         }
1365 }
1366 
1367 /**
1368  * ice_force_phys_link_state - Force the physical link state
1369  * @vsi: VSI to force the physical link state to up/down
1370  * @link_up: true/false indicates to set the physical link to up/down
1371  *
1372  * Force the physical link state by getting the current PHY capabilities from
1373  * hardware and setting the PHY config based on the determined capabilities. If
1374  * link changes a link event will be triggered because both the Enable Automatic
1375  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1376  *
1377  * Returns 0 on success, negative on failure
1378  */
1379 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1380 {
1381         struct ice_aqc_get_phy_caps_data *pcaps;
1382         struct ice_aqc_set_phy_cfg_data *cfg;
1383         struct ice_port_info *pi;
1384         struct device *dev;
1385         int retcode;
1386 
1387         if (!vsi || !vsi->port_info || !vsi->back)
1388                 return -EINVAL;
1389         if (vsi->type != ICE_VSI_PF)
1390                 return 0;
1391 
1392         dev = &vsi->back->pdev->dev;
1393 
1394         pi = vsi->port_info;
1395 
1396         pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
1397         if (!pcaps)
1398                 return -ENOMEM;
1399 
1400         retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1401                                       NULL);
1402         if (retcode) {
1403                 dev_err(dev,
1404                         "Failed to get phy capabilities, VSI %d error %d\n",
1405                         vsi->vsi_num, retcode);
1406                 retcode = -EIO;
1407                 goto out;
1408         }
1409 
1410         /* No change in link */
1411         if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1412             link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1413                 goto out;
1414 
1415         cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
1416         if (!cfg) {
1417                 retcode = -ENOMEM;
1418                 goto out;
1419         }
1420 
1421         cfg->phy_type_low = pcaps->phy_type_low;
1422         cfg->phy_type_high = pcaps->phy_type_high;
1423         cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1424         cfg->low_power_ctrl = pcaps->low_power_ctrl;
1425         cfg->eee_cap = pcaps->eee_cap;
1426         cfg->eeer_value = pcaps->eeer_value;
1427         cfg->link_fec_opt = pcaps->link_fec_options;
1428         if (link_up)
1429                 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1430         else
1431                 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1432 
1433         retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
1434         if (retcode) {
1435                 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1436                         vsi->vsi_num, retcode);
1437                 retcode = -EIO;
1438         }
1439 
1440         devm_kfree(dev, cfg);
1441 out:
1442         devm_kfree(dev, pcaps);
1443         return retcode;
1444 }
1445 
1446 /**
1447  * ice_check_media_subtask - Check for media; bring link up if detected.
1448  * @pf: pointer to PF struct
1449  */
1450 static void ice_check_media_subtask(struct ice_pf *pf)
1451 {
1452         struct ice_port_info *pi;
1453         struct ice_vsi *vsi;
1454         int err;
1455 
1456         vsi = ice_get_main_vsi(pf);
1457         if (!vsi)
1458                 return;
1459 
1460         /* No need to check for media if it's already present or the interface
1461          * is down
1462          */
1463         if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
1464             test_bit(__ICE_DOWN, vsi->state))
1465                 return;
1466 
1467         /* Refresh link info and check if media is present */
1468         pi = vsi->port_info;
1469         err = ice_update_link_info(pi);
1470         if (err)
1471                 return;
1472 
1473         if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1474                 err = ice_force_phys_link_state(vsi, true);
1475                 if (err)
1476                         return;
1477                 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1478 
1479                 /* A Link Status Event will be generated; the event handler
1480                  * will complete bringing the interface up
1481                  */
1482         }
1483 }
1484 
1485 /**
1486  * ice_service_task - manage and run subtasks
1487  * @work: pointer to work_struct contained by the PF struct
1488  */
1489 static void ice_service_task(struct work_struct *work)
1490 {
1491         struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1492         unsigned long start_time = jiffies;
1493 
1494         /* subtasks */
1495 
1496         /* process reset requests first */
1497         ice_reset_subtask(pf);
1498 
1499         /* bail if a reset/recovery cycle is pending or rebuild failed */
1500         if (ice_is_reset_in_progress(pf->state) ||
1501             test_bit(__ICE_SUSPENDED, pf->state) ||
1502             test_bit(__ICE_NEEDS_RESTART, pf->state)) {
1503                 ice_service_task_complete(pf);
1504                 return;
1505         }
1506 
1507         ice_clean_adminq_subtask(pf);
1508         ice_check_media_subtask(pf);
1509         ice_check_for_hang_subtask(pf);
1510         ice_sync_fltr_subtask(pf);
1511         ice_handle_mdd_event(pf);
1512         ice_watchdog_subtask(pf);
1513 
1514         if (ice_is_safe_mode(pf)) {
1515                 ice_service_task_complete(pf);
1516                 return;
1517         }
1518 
1519         ice_process_vflr_event(pf);
1520         ice_clean_mailboxq_subtask(pf);
1521 
1522         /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1523         ice_service_task_complete(pf);
1524 
1525         /* If the tasks have taken longer than one service timer period
1526          * or there is more work to be done, reset the service timer to
1527          * schedule the service task now.
1528          */
1529         if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
1530             test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
1531             test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1532             test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
1533             test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1534                 mod_timer(&pf->serv_tmr, jiffies);
1535 }
1536 
1537 /**
1538  * ice_set_ctrlq_len - helper function to set controlq length
1539  * @hw: pointer to the HW instance
1540  */
1541 static void ice_set_ctrlq_len(struct ice_hw *hw)
1542 {
1543         hw->adminq.num_rq_entries = ICE_AQ_LEN;
1544         hw->adminq.num_sq_entries = ICE_AQ_LEN;
1545         hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1546         hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
1547         hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
1548         hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
1549         hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1550         hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1551 }
1552 
1553 /**
1554  * ice_irq_affinity_notify - Callback for affinity changes
1555  * @notify: context as to what irq was changed
1556  * @mask: the new affinity mask
1557  *
1558  * This is a callback function used by the irq_set_affinity_notifier function
1559  * so that we may register to receive changes to the irq affinity masks.
1560  */
1561 static void
1562 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1563                         const cpumask_t *mask)
1564 {
1565         struct ice_q_vector *q_vector =
1566                 container_of(notify, struct ice_q_vector, affinity_notify);
1567 
1568         cpumask_copy(&q_vector->affinity_mask, mask);
1569 }
1570 
1571 /**
1572  * ice_irq_affinity_release - Callback for affinity notifier release
1573  * @ref: internal core kernel usage
1574  *
1575  * This is a callback function used by the irq_set_affinity_notifier function
1576  * to inform the current notification subscriber that they will no longer
1577  * receive notifications.
1578  */
1579 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1580 
1581 /**
1582  * ice_vsi_ena_irq - Enable IRQ for the given VSI
1583  * @vsi: the VSI being configured
1584  */
1585 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1586 {
1587         struct ice_hw *hw = &vsi->back->hw;
1588         int i;
1589 
1590         ice_for_each_q_vector(vsi, i)
1591                 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1592 
1593         ice_flush(hw);
1594         return 0;
1595 }
1596 
1597 /**
1598  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1599  * @vsi: the VSI being configured
1600  * @basename: name for the vector
1601  */
1602 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1603 {
1604         int q_vectors = vsi->num_q_vectors;
1605         struct ice_pf *pf = vsi->back;
1606         int base = vsi->base_vector;
1607         int rx_int_idx = 0;
1608         int tx_int_idx = 0;
1609         int vector, err;
1610         int irq_num;
1611 
1612         for (vector = 0; vector < q_vectors; vector++) {
1613                 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1614 
1615                 irq_num = pf->msix_entries[base + vector].vector;
1616 
1617                 if (q_vector->tx.ring && q_vector->rx.ring) {
1618                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1619                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1620                         tx_int_idx++;
1621                 } else if (q_vector->rx.ring) {
1622                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1623                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
1624                 } else if (q_vector->tx.ring) {
1625                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1626                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
1627                 } else {
1628                         /* skip this unused q_vector */
1629                         continue;
1630                 }
1631                 err = devm_request_irq(&pf->pdev->dev, irq_num,
1632                                        vsi->irq_handler, 0,
1633                                        q_vector->name, q_vector);
1634                 if (err) {
1635                         netdev_err(vsi->netdev,
1636                                    "MSIX request_irq failed, error: %d\n", err);
1637                         goto free_q_irqs;
1638                 }
1639 
1640                 /* register for affinity change notifications */
1641                 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1642                 q_vector->affinity_notify.release = ice_irq_affinity_release;
1643                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1644 
1645                 /* assign the mask for this irq */
1646                 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1647         }
1648 
1649         vsi->irqs_ready = true;
1650         return 0;
1651 
1652 free_q_irqs:
1653         while (vector) {
1654                 vector--;
1655                 irq_num = pf->msix_entries[base + vector].vector,
1656                 irq_set_affinity_notifier(irq_num, NULL);
1657                 irq_set_affinity_hint(irq_num, NULL);
1658                 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1659         }
1660         return err;
1661 }
1662 
1663 /**
1664  * ice_ena_misc_vector - enable the non-queue interrupts
1665  * @pf: board private structure
1666  */
1667 static void ice_ena_misc_vector(struct ice_pf *pf)
1668 {
1669         struct ice_hw *hw = &pf->hw;
1670         u32 val;
1671 
1672         /* clear things first */
1673         wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
1674         rd32(hw, PFINT_OICR);           /* read to clear */
1675 
1676         val = (PFINT_OICR_ECC_ERR_M |
1677                PFINT_OICR_MAL_DETECT_M |
1678                PFINT_OICR_GRST_M |
1679                PFINT_OICR_PCI_EXCEPTION_M |
1680                PFINT_OICR_VFLR_M |
1681                PFINT_OICR_HMC_ERR_M |
1682                PFINT_OICR_PE_CRITERR_M);
1683 
1684         wr32(hw, PFINT_OICR_ENA, val);
1685 
1686         /* SW_ITR_IDX = 0, but don't change INTENA */
1687         wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1688              GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1689 }
1690 
1691 /**
1692  * ice_misc_intr - misc interrupt handler
1693  * @irq: interrupt number
1694  * @data: pointer to a q_vector
1695  */
1696 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1697 {
1698         struct ice_pf *pf = (struct ice_pf *)data;
1699         struct ice_hw *hw = &pf->hw;
1700         irqreturn_t ret = IRQ_NONE;
1701         u32 oicr, ena_mask;
1702 
1703         set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1704         set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1705 
1706         oicr = rd32(hw, PFINT_OICR);
1707         ena_mask = rd32(hw, PFINT_OICR_ENA);
1708 
1709         if (oicr & PFINT_OICR_SWINT_M) {
1710                 ena_mask &= ~PFINT_OICR_SWINT_M;
1711                 pf->sw_int_count++;
1712         }
1713 
1714         if (oicr & PFINT_OICR_MAL_DETECT_M) {
1715                 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1716                 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1717         }
1718         if (oicr & PFINT_OICR_VFLR_M) {
1719                 ena_mask &= ~PFINT_OICR_VFLR_M;
1720                 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1721         }
1722 
1723         if (oicr & PFINT_OICR_GRST_M) {
1724                 u32 reset;
1725 
1726                 /* we have a reset warning */
1727                 ena_mask &= ~PFINT_OICR_GRST_M;
1728                 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1729                         GLGEN_RSTAT_RESET_TYPE_S;
1730 
1731                 if (reset == ICE_RESET_CORER)
1732                         pf->corer_count++;
1733                 else if (reset == ICE_RESET_GLOBR)
1734                         pf->globr_count++;
1735                 else if (reset == ICE_RESET_EMPR)
1736                         pf->empr_count++;
1737                 else
1738                         dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
1739                                 reset);
1740 
1741                 /* If a reset cycle isn't already in progress, we set a bit in
1742                  * pf->state so that the service task can start a reset/rebuild.
1743                  * We also make note of which reset happened so that peer
1744                  * devices/drivers can be informed.
1745                  */
1746                 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
1747                         if (reset == ICE_RESET_CORER)
1748                                 set_bit(__ICE_CORER_RECV, pf->state);
1749                         else if (reset == ICE_RESET_GLOBR)
1750                                 set_bit(__ICE_GLOBR_RECV, pf->state);
1751                         else
1752                                 set_bit(__ICE_EMPR_RECV, pf->state);
1753 
1754                         /* There are couple of different bits at play here.
1755                          * hw->reset_ongoing indicates whether the hardware is
1756                          * in reset. This is set to true when a reset interrupt
1757                          * is received and set back to false after the driver
1758                          * has determined that the hardware is out of reset.
1759                          *
1760                          * __ICE_RESET_OICR_RECV in pf->state indicates
1761                          * that a post reset rebuild is required before the
1762                          * driver is operational again. This is set above.
1763                          *
1764                          * As this is the start of the reset/rebuild cycle, set
1765                          * both to indicate that.
1766                          */
1767                         hw->reset_ongoing = true;
1768                 }
1769         }
1770 
1771         if (oicr & PFINT_OICR_HMC_ERR_M) {
1772                 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1773                 dev_dbg(&pf->pdev->dev,
1774                         "HMC Error interrupt - info 0x%x, data 0x%x\n",
1775                         rd32(hw, PFHMC_ERRORINFO),
1776                         rd32(hw, PFHMC_ERRORDATA));
1777         }
1778 
1779         /* Report any remaining unexpected interrupts */
1780         oicr &= ena_mask;
1781         if (oicr) {
1782                 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
1783                         oicr);
1784                 /* If a critical error is pending there is no choice but to
1785                  * reset the device.
1786                  */
1787                 if (oicr & (PFINT_OICR_PE_CRITERR_M |
1788                             PFINT_OICR_PCI_EXCEPTION_M |
1789                             PFINT_OICR_ECC_ERR_M)) {
1790                         set_bit(__ICE_PFR_REQ, pf->state);
1791                         ice_service_task_schedule(pf);
1792                 }
1793         }
1794         ret = IRQ_HANDLED;
1795 
1796         if (!test_bit(__ICE_DOWN, pf->state)) {
1797                 ice_service_task_schedule(pf);
1798                 ice_irq_dynamic_ena(hw, NULL, NULL);
1799         }
1800 
1801         return ret;
1802 }
1803 
1804 /**
1805  * ice_dis_ctrlq_interrupts - disable control queue interrupts
1806  * @hw: pointer to HW structure
1807  */
1808 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
1809 {
1810         /* disable Admin queue Interrupt causes */
1811         wr32(hw, PFINT_FW_CTL,
1812              rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
1813 
1814         /* disable Mailbox queue Interrupt causes */
1815         wr32(hw, PFINT_MBX_CTL,
1816              rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
1817 
1818         /* disable Control queue Interrupt causes */
1819         wr32(hw, PFINT_OICR_CTL,
1820              rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
1821 
1822         ice_flush(hw);
1823 }
1824 
1825 /**
1826  * ice_free_irq_msix_misc - Unroll misc vector setup
1827  * @pf: board private structure
1828  */
1829 static void ice_free_irq_msix_misc(struct ice_pf *pf)
1830 {
1831         struct ice_hw *hw = &pf->hw;
1832 
1833         ice_dis_ctrlq_interrupts(hw);
1834 
1835         /* disable OICR interrupt */
1836         wr32(hw, PFINT_OICR_ENA, 0);
1837         ice_flush(hw);
1838 
1839         if (pf->msix_entries) {
1840                 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
1841                 devm_free_irq(&pf->pdev->dev,
1842                               pf->msix_entries[pf->oicr_idx].vector, pf);
1843         }
1844 
1845         pf->num_avail_sw_msix += 1;
1846         ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
1847 }
1848 
1849 /**
1850  * ice_ena_ctrlq_interrupts - enable control queue interrupts
1851  * @hw: pointer to HW structure
1852  * @reg_idx: HW vector index to associate the control queue interrupts with
1853  */
1854 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
1855 {
1856         u32 val;
1857 
1858         val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
1859                PFINT_OICR_CTL_CAUSE_ENA_M);
1860         wr32(hw, PFINT_OICR_CTL, val);
1861 
1862         /* enable Admin queue Interrupt causes */
1863         val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
1864                PFINT_FW_CTL_CAUSE_ENA_M);
1865         wr32(hw, PFINT_FW_CTL, val);
1866 
1867         /* enable Mailbox queue Interrupt causes */
1868         val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
1869                PFINT_MBX_CTL_CAUSE_ENA_M);
1870         wr32(hw, PFINT_MBX_CTL, val);
1871 
1872         ice_flush(hw);
1873 }
1874 
1875 /**
1876  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
1877  * @pf: board private structure
1878  *
1879  * This sets up the handler for MSIX 0, which is used to manage the
1880  * non-queue interrupts, e.g. AdminQ and errors. This is not used
1881  * when in MSI or Legacy interrupt mode.
1882  */
1883 static int ice_req_irq_msix_misc(struct ice_pf *pf)
1884 {
1885         struct ice_hw *hw = &pf->hw;
1886         int oicr_idx, err = 0;
1887 
1888         if (!pf->int_name[0])
1889                 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
1890                          dev_driver_string(&pf->pdev->dev),
1891                          dev_name(&pf->pdev->dev));
1892 
1893         /* Do not request IRQ but do enable OICR interrupt since settings are
1894          * lost during reset. Note that this function is called only during
1895          * rebuild path and not while reset is in progress.
1896          */
1897         if (ice_is_reset_in_progress(pf->state))
1898                 goto skip_req_irq;
1899 
1900         /* reserve one vector in irq_tracker for misc interrupts */
1901         oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1902         if (oicr_idx < 0)
1903                 return oicr_idx;
1904 
1905         pf->num_avail_sw_msix -= 1;
1906         pf->oicr_idx = oicr_idx;
1907 
1908         err = devm_request_irq(&pf->pdev->dev,
1909                                pf->msix_entries[pf->oicr_idx].vector,
1910                                ice_misc_intr, 0, pf->int_name, pf);
1911         if (err) {
1912                 dev_err(&pf->pdev->dev,
1913                         "devm_request_irq for %s failed: %d\n",
1914                         pf->int_name, err);
1915                 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1916                 pf->num_avail_sw_msix += 1;
1917                 return err;
1918         }
1919 
1920 skip_req_irq:
1921         ice_ena_misc_vector(pf);
1922 
1923         ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
1924         wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
1925              ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
1926 
1927         ice_flush(hw);
1928         ice_irq_dynamic_ena(hw, NULL, NULL);
1929 
1930         return 0;
1931 }
1932 
1933 /**
1934  * ice_napi_add - register NAPI handler for the VSI
1935  * @vsi: VSI for which NAPI handler is to be registered
1936  *
1937  * This function is only called in the driver's load path. Registering the NAPI
1938  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
1939  * reset/rebuild, etc.)
1940  */
1941 static void ice_napi_add(struct ice_vsi *vsi)
1942 {
1943         int v_idx;
1944 
1945         if (!vsi->netdev)
1946                 return;
1947 
1948         ice_for_each_q_vector(vsi, v_idx)
1949                 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1950                                ice_napi_poll, NAPI_POLL_WEIGHT);
1951 }
1952 
1953 /**
1954  * ice_set_ops - set netdev and ethtools ops for the given netdev
1955  * @netdev: netdev instance
1956  */
1957 static void ice_set_ops(struct net_device *netdev)
1958 {
1959         struct ice_pf *pf = ice_netdev_to_pf(netdev);
1960 
1961         if (ice_is_safe_mode(pf)) {
1962                 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
1963                 ice_set_ethtool_safe_mode_ops(netdev);
1964                 return;
1965         }
1966 
1967         netdev->netdev_ops = &ice_netdev_ops;
1968         ice_set_ethtool_ops(netdev);
1969 }
1970 
1971 /**
1972  * ice_set_netdev_features - set features for the given netdev
1973  * @netdev: netdev instance
1974  */
1975 static void ice_set_netdev_features(struct net_device *netdev)
1976 {
1977         struct ice_pf *pf = ice_netdev_to_pf(netdev);
1978         netdev_features_t csumo_features;
1979         netdev_features_t vlano_features;
1980         netdev_features_t dflt_features;
1981         netdev_features_t tso_features;
1982 
1983         if (ice_is_safe_mode(pf)) {
1984                 /* safe mode */
1985                 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
1986                 netdev->hw_features = netdev->features;
1987                 return;
1988         }
1989 
1990         dflt_features = NETIF_F_SG      |
1991                         NETIF_F_HIGHDMA |
1992                         NETIF_F_RXHASH;
1993 
1994         csumo_features = NETIF_F_RXCSUM   |
1995                          NETIF_F_IP_CSUM  |
1996                          NETIF_F_SCTP_CRC |
1997                          NETIF_F_IPV6_CSUM;
1998 
1999         vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2000                          NETIF_F_HW_VLAN_CTAG_TX     |
2001                          NETIF_F_HW_VLAN_CTAG_RX;
2002 
2003         tso_features = NETIF_F_TSO;
2004 
2005         /* set features that user can change */
2006         netdev->hw_features = dflt_features | csumo_features |
2007                               vlano_features | tso_features;
2008 
2009         /* enable features */
2010         netdev->features |= netdev->hw_features;
2011         /* encap and VLAN devices inherit default, csumo and tso features */
2012         netdev->hw_enc_features |= dflt_features | csumo_features |
2013                                    tso_features;
2014         netdev->vlan_features |= dflt_features | csumo_features |
2015                                  tso_features;
2016 }
2017 
2018 /**
2019  * ice_cfg_netdev - Allocate, configure and register a netdev
2020  * @vsi: the VSI associated with the new netdev
2021  *
2022  * Returns 0 on success, negative value on failure
2023  */
2024 static int ice_cfg_netdev(struct ice_vsi *vsi)
2025 {
2026         struct ice_pf *pf = vsi->back;
2027         struct ice_netdev_priv *np;
2028         struct net_device *netdev;
2029         u8 mac_addr[ETH_ALEN];
2030         int err;
2031 
2032         netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
2033                                     vsi->alloc_rxq);
2034         if (!netdev)
2035                 return -ENOMEM;
2036 
2037         vsi->netdev = netdev;
2038         np = netdev_priv(netdev);
2039         np->vsi = vsi;
2040 
2041         ice_set_netdev_features(netdev);
2042 
2043         ice_set_ops(netdev);
2044 
2045         if (vsi->type == ICE_VSI_PF) {
2046                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
2047                 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
2048                 ether_addr_copy(netdev->dev_addr, mac_addr);
2049                 ether_addr_copy(netdev->perm_addr, mac_addr);
2050         }
2051 
2052         netdev->priv_flags |= IFF_UNICAST_FLT;
2053 
2054         /* Setup netdev TC information */
2055         ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
2056 
2057         /* setup watchdog timeout value to be 5 second */
2058         netdev->watchdog_timeo = 5 * HZ;
2059 
2060         netdev->min_mtu = ETH_MIN_MTU;
2061         netdev->max_mtu = ICE_MAX_MTU;
2062 
2063         err = register_netdev(vsi->netdev);
2064         if (err)
2065                 return err;
2066 
2067         netif_carrier_off(vsi->netdev);
2068 
2069         /* make sure transmit queues start off as stopped */
2070         netif_tx_stop_all_queues(vsi->netdev);
2071 
2072         return 0;
2073 }
2074 
2075 /**
2076  * ice_fill_rss_lut - Fill the RSS lookup table with default values
2077  * @lut: Lookup table
2078  * @rss_table_size: Lookup table size
2079  * @rss_size: Range of queue number for hashing
2080  */
2081 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
2082 {
2083         u16 i;
2084 
2085         for (i = 0; i < rss_table_size; i++)
2086                 lut[i] = i % rss_size;
2087 }
2088 
2089 /**
2090  * ice_pf_vsi_setup - Set up a PF VSI
2091  * @pf: board private structure
2092  * @pi: pointer to the port_info instance
2093  *
2094  * Returns pointer to the successfully allocated VSI software struct
2095  * on success, otherwise returns NULL on failure.
2096  */
2097 static struct ice_vsi *
2098 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
2099 {
2100         return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
2101 }
2102 
2103 /**
2104  * ice_lb_vsi_setup - Set up a loopback VSI
2105  * @pf: board private structure
2106  * @pi: pointer to the port_info instance
2107  *
2108  * Returns pointer to the successfully allocated VSI software struct
2109  * on success, otherwise returns NULL on failure.
2110  */
2111 struct ice_vsi *
2112 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
2113 {
2114         return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
2115 }
2116 
2117 /**
2118  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
2119  * @netdev: network interface to be adjusted
2120  * @proto: unused protocol
2121  * @vid: VLAN ID to be added
2122  *
2123  * net_device_ops implementation for adding VLAN IDs
2124  */
2125 static int
2126 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
2127                     u16 vid)
2128 {
2129         struct ice_netdev_priv *np = netdev_priv(netdev);
2130         struct ice_vsi *vsi = np->vsi;
2131         int ret;
2132 
2133         if (vid >= VLAN_N_VID) {
2134                 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
2135                            vid, VLAN_N_VID);
2136                 return -EINVAL;
2137         }
2138 
2139         if (vsi->info.pvid)
2140                 return -EINVAL;
2141 
2142         /* Enable VLAN pruning when VLAN 0 is added */
2143         if (unlikely(!vid)) {
2144                 ret = ice_cfg_vlan_pruning(vsi, true, false);
2145                 if (ret)
2146                         return ret;
2147         }
2148 
2149         /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
2150          * needed to continue allowing all untagged packets since VLAN prune
2151          * list is applied to all packets by the switch
2152          */
2153         ret = ice_vsi_add_vlan(vsi, vid);
2154         if (!ret) {
2155                 vsi->vlan_ena = true;
2156                 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
2157         }
2158 
2159         return ret;
2160 }
2161 
2162 /**
2163  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
2164  * @netdev: network interface to be adjusted
2165  * @proto: unused protocol
2166  * @vid: VLAN ID to be removed
2167  *
2168  * net_device_ops implementation for removing VLAN IDs
2169  */
2170 static int
2171 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
2172                      u16 vid)
2173 {
2174         struct ice_netdev_priv *np = netdev_priv(netdev);
2175         struct ice_vsi *vsi = np->vsi;
2176         int ret;
2177 
2178         if (vsi->info.pvid)
2179                 return -EINVAL;
2180 
2181         /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
2182          * information
2183          */
2184         ret = ice_vsi_kill_vlan(vsi, vid);
2185         if (ret)
2186                 return ret;
2187 
2188         /* Disable VLAN pruning when VLAN 0 is removed */
2189         if (unlikely(!vid))
2190                 ret = ice_cfg_vlan_pruning(vsi, false, false);
2191 
2192         vsi->vlan_ena = false;
2193         set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
2194         return ret;
2195 }
2196 
2197 /**
2198  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
2199  * @pf: board private structure
2200  *
2201  * Returns 0 on success, negative value on failure
2202  */
2203 static int ice_setup_pf_sw(struct ice_pf *pf)
2204 {
2205         struct ice_vsi *vsi;
2206         int status = 0;
2207 
2208         if (ice_is_reset_in_progress(pf->state))
2209                 return -EBUSY;
2210 
2211         vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
2212         if (!vsi) {
2213                 status = -ENOMEM;
2214                 goto unroll_vsi_setup;
2215         }
2216 
2217         status = ice_cfg_netdev(vsi);
2218         if (status) {
2219                 status = -ENODEV;
2220                 goto unroll_vsi_setup;
2221         }
2222 
2223         /* registering the NAPI handler requires both the queues and
2224          * netdev to be created, which are done in ice_pf_vsi_setup()
2225          * and ice_cfg_netdev() respectively
2226          */
2227         ice_napi_add(vsi);
2228 
2229         status = ice_init_mac_fltr(pf);
2230         if (status)
2231                 goto unroll_napi_add;
2232 
2233         return status;
2234 
2235 unroll_napi_add:
2236         if (vsi) {
2237                 ice_napi_del(vsi);
2238                 if (vsi->netdev) {
2239                         if (vsi->netdev->reg_state == NETREG_REGISTERED)
2240                                 unregister_netdev(vsi->netdev);
2241                         free_netdev(vsi->netdev);
2242                         vsi->netdev = NULL;
2243                 }
2244         }
2245 
2246 unroll_vsi_setup:
2247         if (vsi) {
2248                 ice_vsi_free_q_vectors(vsi);
2249                 ice_vsi_delete(vsi);
2250                 ice_vsi_put_qs(vsi);
2251                 ice_vsi_clear(vsi);
2252         }
2253         return status;
2254 }
2255 
2256 /**
2257  * ice_get_avail_q_count - Get count of queues in use
2258  * @pf_qmap: bitmap to get queue use count from
2259  * @lock: pointer to a mutex that protects access to pf_qmap
2260  * @size: size of the bitmap
2261  */
2262 static u16
2263 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
2264 {
2265         u16 count = 0, bit;
2266 
2267         mutex_lock(lock);
2268         for_each_clear_bit(bit, pf_qmap, size)
2269                 count++;
2270         mutex_unlock(lock);
2271 
2272         return count;
2273 }
2274 
2275 /**
2276  * ice_get_avail_txq_count - Get count of Tx queues in use
2277  * @pf: pointer to an ice_pf instance
2278  */
2279 u16 ice_get_avail_txq_count(struct ice_pf *pf)
2280 {
2281         return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
2282                                      pf->max_pf_txqs);
2283 }
2284 
2285 /**
2286  * ice_get_avail_rxq_count - Get count of Rx queues in use
2287  * @pf: pointer to an ice_pf instance
2288  */
2289 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
2290 {
2291         return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
2292                                      pf->max_pf_rxqs);
2293 }
2294 
2295 /**
2296  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
2297  * @pf: board private structure to initialize
2298  */
2299 static void ice_deinit_pf(struct ice_pf *pf)
2300 {
2301         ice_service_task_stop(pf);
2302         mutex_destroy(&pf->sw_mutex);
2303         mutex_destroy(&pf->avail_q_mutex);
2304 
2305         if (pf->avail_txqs) {
2306                 bitmap_free(pf->avail_txqs);
2307                 pf->avail_txqs = NULL;
2308         }
2309 
2310         if (pf->avail_rxqs) {
2311                 bitmap_free(pf->avail_rxqs);
2312                 pf->avail_rxqs = NULL;
2313         }
2314 }
2315 
2316 /**
2317  * ice_set_pf_caps - set PFs capability flags
2318  * @pf: pointer to the PF instance
2319  */
2320 static void ice_set_pf_caps(struct ice_pf *pf)
2321 {
2322         struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
2323 
2324         clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2325         if (func_caps->common_cap.dcb)
2326                 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2327 #ifdef CONFIG_PCI_IOV
2328         clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
2329         if (func_caps->common_cap.sr_iov_1_1) {
2330                 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
2331                 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
2332                                               ICE_MAX_VF_COUNT);
2333         }
2334 #endif /* CONFIG_PCI_IOV */
2335         clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
2336         if (func_caps->common_cap.rss_table_size)
2337                 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
2338 
2339         pf->max_pf_txqs = func_caps->common_cap.num_txq;
2340         pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
2341 }
2342 
2343 /**
2344  * ice_init_pf - Initialize general software structures (struct ice_pf)
2345  * @pf: board private structure to initialize
2346  */
2347 static int ice_init_pf(struct ice_pf *pf)
2348 {
2349         ice_set_pf_caps(pf);
2350 
2351         mutex_init(&pf->sw_mutex);
2352 
2353         /* setup service timer and periodic service task */
2354         timer_setup(&pf->serv_tmr, ice_service_timer, 0);
2355         pf->serv_tmr_period = HZ;
2356         INIT_WORK(&pf->serv_task, ice_service_task);
2357         clear_bit(__ICE_SERVICE_SCHED, pf->state);
2358 
2359         mutex_init(&pf->avail_q_mutex);
2360         pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
2361         if (!pf->avail_txqs)
2362                 return -ENOMEM;
2363 
2364         pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
2365         if (!pf->avail_rxqs) {
2366                 devm_kfree(&pf->pdev->dev, pf->avail_txqs);
2367                 pf->avail_txqs = NULL;
2368                 return -ENOMEM;
2369         }
2370 
2371         return 0;
2372 }
2373 
2374 /**
2375  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
2376  * @pf: board private structure
2377  *
2378  * compute the number of MSIX vectors required (v_budget) and request from
2379  * the OS. Return the number of vectors reserved or negative on failure
2380  */
2381 static int ice_ena_msix_range(struct ice_pf *pf)
2382 {
2383         int v_left, v_actual, v_budget = 0;
2384         int needed, err, i;
2385 
2386         v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
2387 
2388         /* reserve one vector for miscellaneous handler */
2389         needed = 1;
2390         if (v_left < needed)
2391                 goto no_hw_vecs_left_err;
2392         v_budget += needed;
2393         v_left -= needed;
2394 
2395         /* reserve vectors for LAN traffic */
2396         needed = min_t(int, num_online_cpus(), v_left);
2397         if (v_left < needed)
2398                 goto no_hw_vecs_left_err;
2399         pf->num_lan_msix = needed;
2400         v_budget += needed;
2401         v_left -= needed;
2402 
2403         pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
2404                                         sizeof(*pf->msix_entries), GFP_KERNEL);
2405 
2406         if (!pf->msix_entries) {
2407                 err = -ENOMEM;
2408                 goto exit_err;
2409         }
2410 
2411         for (i = 0; i < v_budget; i++)
2412                 pf->msix_entries[i].entry = i;
2413 
2414         /* actually reserve the vectors */
2415         v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
2416                                          ICE_MIN_MSIX, v_budget);
2417 
2418         if (v_actual < 0) {
2419                 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
2420                 err = v_actual;
2421                 goto msix_err;
2422         }
2423 
2424         if (v_actual < v_budget) {
2425                 dev_warn(&pf->pdev->dev,
2426                          "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
2427                          v_budget, v_actual);
2428 /* 2 vectors for LAN (traffic + OICR) */
2429 #define ICE_MIN_LAN_VECS 2
2430 
2431                 if (v_actual < ICE_MIN_LAN_VECS) {
2432                         /* error if we can't get minimum vectors */
2433                         pci_disable_msix(pf->pdev);
2434                         err = -ERANGE;
2435                         goto msix_err;
2436                 } else {
2437                         pf->num_lan_msix = ICE_MIN_LAN_VECS;
2438                 }
2439         }
2440 
2441         return v_actual;
2442 
2443 msix_err:
2444         devm_kfree(&pf->pdev->dev, pf->msix_entries);
2445         goto exit_err;
2446 
2447 no_hw_vecs_left_err:
2448         dev_err(&pf->pdev->dev,
2449                 "not enough device MSI-X vectors. requested = %d, available = %d\n",
2450                 needed, v_left);
2451         err = -ERANGE;
2452 exit_err:
2453         pf->num_lan_msix = 0;
2454         return err;
2455 }
2456 
2457 /**
2458  * ice_dis_msix - Disable MSI-X interrupt setup in OS
2459  * @pf: board private structure
2460  */
2461 static void ice_dis_msix(struct ice_pf *pf)
2462 {
2463         pci_disable_msix(pf->pdev);
2464         devm_kfree(&pf->pdev->dev, pf->msix_entries);
2465         pf->msix_entries = NULL;
2466 }
2467 
2468 /**
2469  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
2470  * @pf: board private structure
2471  */
2472 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
2473 {
2474         ice_dis_msix(pf);
2475 
2476         if (pf->irq_tracker) {
2477                 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
2478                 pf->irq_tracker = NULL;
2479         }
2480 }
2481 
2482 /**
2483  * ice_init_interrupt_scheme - Determine proper interrupt scheme
2484  * @pf: board private structure to initialize
2485  */
2486 static int ice_init_interrupt_scheme(struct ice_pf *pf)
2487 {
2488         int vectors;
2489 
2490         vectors = ice_ena_msix_range(pf);
2491 
2492         if (vectors < 0)
2493                 return vectors;
2494 
2495         /* set up vector assignment tracking */
2496         pf->irq_tracker =
2497                 devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
2498                              (sizeof(u16) * vectors), GFP_KERNEL);
2499         if (!pf->irq_tracker) {
2500                 ice_dis_msix(pf);
2501                 return -ENOMEM;
2502         }
2503 
2504         /* populate SW interrupts pool with number of OS granted IRQs. */
2505         pf->num_avail_sw_msix = vectors;
2506         pf->irq_tracker->num_entries = vectors;
2507         pf->irq_tracker->end = pf->irq_tracker->num_entries;
2508 
2509         return 0;
2510 }
2511 
2512 /**
2513  * ice_log_pkg_init - log result of DDP package load
2514  * @hw: pointer to hardware info
2515  * @status: status of package load
2516  */
2517 static void
2518 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
2519 {
2520         struct ice_pf *pf = (struct ice_pf *)hw->back;
2521         struct device *dev = &pf->pdev->dev;
2522 
2523         switch (*status) {
2524         case ICE_SUCCESS:
2525                 /* The package download AdminQ command returned success because
2526                  * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
2527                  * already a package loaded on the device.
2528                  */
2529                 if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
2530                     hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
2531                     hw->pkg_ver.update == hw->active_pkg_ver.update &&
2532                     hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
2533                     !memcmp(hw->pkg_name, hw->active_pkg_name,
2534                             sizeof(hw->pkg_name))) {
2535                         if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
2536                                 dev_info(dev,
2537                                          "DDP package already present on device: %s version %d.%d.%d.%d\n",
2538                                          hw->active_pkg_name,
2539                                          hw->active_pkg_ver.major,
2540                                          hw->active_pkg_ver.minor,
2541                                          hw->active_pkg_ver.update,
2542                                          hw->active_pkg_ver.draft);
2543                         else
2544                                 dev_info(dev,
2545                                          "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
2546                                          hw->active_pkg_name,
2547                                          hw->active_pkg_ver.major,
2548                                          hw->active_pkg_ver.minor,
2549                                          hw->active_pkg_ver.update,
2550                                          hw->active_pkg_ver.draft);
2551                 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
2552                            hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
2553                         dev_err(dev,
2554                                 "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
2555                                 hw->active_pkg_name,
2556                                 hw->active_pkg_ver.major,
2557                                 hw->active_pkg_ver.minor,
2558                                 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
2559                         *status = ICE_ERR_NOT_SUPPORTED;
2560                 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
2561                            hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
2562                         dev_info(dev,
2563                                  "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
2564                                  hw->active_pkg_name,
2565                                  hw->active_pkg_ver.major,
2566                                  hw->active_pkg_ver.minor,
2567                                  hw->active_pkg_ver.update,
2568                                  hw->active_pkg_ver.draft,
2569                                  hw->pkg_name,
2570                                  hw->pkg_ver.major,
2571                                  hw->pkg_ver.minor,
2572                                  hw->pkg_ver.update,
2573                                  hw->pkg_ver.draft);
2574                 } else {
2575                         dev_err(dev,
2576                                 "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
2577                         *status = ICE_ERR_NOT_SUPPORTED;
2578                 }
2579                 break;
2580         case ICE_ERR_BUF_TOO_SHORT:
2581                 /* fall-through */
2582         case ICE_ERR_CFG:
2583                 dev_err(dev,
2584                         "The DDP package file is invalid. Entering Safe Mode.\n");
2585                 break;
2586         case ICE_ERR_NOT_SUPPORTED:
2587                 /* Package File version not supported */
2588                 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
2589                     (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
2590                      hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
2591                         dev_err(dev,
2592                                 "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
2593                 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
2594                          (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
2595                           hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
2596                         dev_err(dev,
2597                                 "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
2598                                 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
2599                 break;
2600         case ICE_ERR_AQ_ERROR:
2601                 switch (hw->adminq.sq_last_status) {
2602                 case ICE_AQ_RC_ENOSEC:
2603                 case ICE_AQ_RC_EBADSIG:
2604                         dev_err(dev,
2605                                 "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
2606                         return;
2607                 case ICE_AQ_RC_ESVN:
2608                         dev_err(dev,
2609                                 "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
2610                         return;
2611                 case ICE_AQ_RC_EBADMAN:
2612                 case ICE_AQ_RC_EBADBUF:
2613                         dev_err(dev,
2614                                 "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
2615                         return;
2616                 default:
2617                         break;
2618                 }
2619                 /* fall-through */
2620         default:
2621                 dev_err(dev,
2622                         "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
2623                         *status);
2624                 break;
2625         }
2626 }
2627 
2628 /**
2629  * ice_load_pkg - load/reload the DDP Package file
2630  * @firmware: firmware structure when firmware requested or NULL for reload
2631  * @pf: pointer to the PF instance
2632  *
2633  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
2634  * initialize HW tables.
2635  */
2636 static void
2637 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
2638 {
2639         enum ice_status status = ICE_ERR_PARAM;
2640         struct device *dev = &pf->pdev->dev;
2641         struct ice_hw *hw = &pf->hw;
2642 
2643         /* Load DDP Package */
2644         if (firmware && !hw->pkg_copy) {
2645                 status = ice_copy_and_init_pkg(hw, firmware->data,
2646                                                firmware->size);
2647                 ice_log_pkg_init(hw, &status);
2648         } else if (!firmware && hw->pkg_copy) {
2649                 /* Reload package during rebuild after CORER/GLOBR reset */
2650                 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
2651                 ice_log_pkg_init(hw, &status);
2652         } else {
2653                 dev_err(dev,
2654                         "The DDP package file failed to load. Entering Safe Mode.\n");
2655         }
2656 
2657         if (status) {
2658                 /* Safe Mode */
2659                 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
2660                 return;
2661         }
2662 
2663         /* Successful download package is the precondition for advanced
2664          * features, hence setting the ICE_FLAG_ADV_FEATURES flag
2665          */
2666         set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
2667 }
2668 
2669 /**
2670  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
2671  * @pf: pointer to the PF structure
2672  *
2673  * There is no error returned here because the driver should be able to handle
2674  * 128 Byte cache lines, so we only print a warning in case issues are seen,
2675  * specifically with Tx.
2676  */
2677 static void ice_verify_cacheline_size(struct ice_pf *pf)
2678 {
2679         if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
2680                 dev_warn(&pf->pdev->dev,
2681                          "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
2682                          ICE_CACHE_LINE_BYTES);
2683 }
2684 
2685 /**
2686  * ice_send_version - update firmware with driver version
2687  * @pf: PF struct
2688  *
2689  * Returns ICE_SUCCESS on success, else error code
2690  */
2691 static enum ice_status ice_send_version(struct ice_pf *pf)
2692 {
2693         struct ice_driver_ver dv;
2694 
2695         dv.major_ver = DRV_VERSION_MAJOR;
2696         dv.minor_ver = DRV_VERSION_MINOR;
2697         dv.build_ver = DRV_VERSION_BUILD;
2698         dv.subbuild_ver = 0;
2699         strscpy((char *)dv.driver_string, DRV_VERSION,
2700                 sizeof(dv.driver_string));
2701         return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
2702 }
2703 
2704 /**
2705  * ice_get_opt_fw_name - return optional firmware file name or NULL
2706  * @pf: pointer to the PF instance
2707  */
2708 static char *ice_get_opt_fw_name(struct ice_pf *pf)
2709 {
2710         /* Optional firmware name same as default with additional dash
2711          * followed by a EUI-64 identifier (PCIe Device Serial Number)
2712          */
2713         struct pci_dev *pdev = pf->pdev;
2714         char *opt_fw_filename = NULL;
2715         u32 dword;
2716         u8 dsn[8];
2717         int pos;
2718 
2719         /* Determine the name of the optional file using the DSN (two
2720          * dwords following the start of the DSN Capability).
2721          */
2722         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
2723         if (pos) {
2724                 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
2725                 if (!opt_fw_filename)
2726                         return NULL;
2727 
2728                 pci_read_config_dword(pdev, pos + 4, &dword);
2729                 put_unaligned_le32(dword, &dsn[0]);
2730                 pci_read_config_dword(pdev, pos + 8, &dword);
2731                 put_unaligned_le32(dword, &dsn[4]);
2732                 snprintf(opt_fw_filename, NAME_MAX,
2733                          "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
2734                          ICE_DDP_PKG_PATH,
2735                          dsn[7], dsn[6], dsn[5], dsn[4],
2736                          dsn[3], dsn[2], dsn[1], dsn[0]);
2737         }
2738 
2739         return opt_fw_filename;
2740 }
2741 
2742 /**
2743  * ice_request_fw - Device initialization routine
2744  * @pf: pointer to the PF instance
2745  */
2746 static void ice_request_fw(struct ice_pf *pf)
2747 {
2748         char *opt_fw_filename = ice_get_opt_fw_name(pf);
2749         const struct firmware *firmware = NULL;
2750         struct device *dev = &pf->pdev->dev;
2751         int err = 0;
2752 
2753         /* optional device-specific DDP (if present) overrides the default DDP
2754          * package file. kernel logs a debug message if the file doesn't exist,
2755          * and warning messages for other errors.
2756          */
2757         if (opt_fw_filename) {
2758                 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
2759                 if (err) {
2760                         kfree(opt_fw_filename);
2761                         goto dflt_pkg_load;
2762                 }
2763 
2764                 /* request for firmware was successful. Download to device */
2765                 ice_load_pkg(firmware, pf);
2766                 kfree(opt_fw_filename);
2767                 release_firmware(firmware);
2768                 return;
2769         }
2770 
2771 dflt_pkg_load:
2772         err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
2773         if (err) {
2774                 dev_err(dev,
2775                         "The DDP package file was not found or could not be read. Entering Safe Mode\n");
2776                 return;
2777         }
2778 
2779         /* request for firmware was successful. Download to device */
2780         ice_load_pkg(firmware, pf);
2781         release_firmware(firmware);
2782 }
2783 
2784 /**
2785  * ice_probe - Device initialization routine
2786  * @pdev: PCI device information struct
2787  * @ent: entry in ice_pci_tbl
2788  *
2789  * Returns 0 on success, negative on failure
2790  */
2791 static int
2792 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
2793 {
2794         struct device *dev = &pdev->dev;
2795         struct ice_pf *pf;
2796         struct ice_hw *hw;
2797         int err;
2798 
2799         /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
2800         err = pcim_enable_device(pdev);
2801         if (err)
2802                 return err;
2803 
2804         err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
2805         if (err) {
2806                 dev_err(dev, "BAR0 I/O map error %d\n", err);
2807                 return err;
2808         }
2809 
2810         pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
2811         if (!pf)
2812                 return -ENOMEM;
2813 
2814         /* set up for high or low DMA */
2815         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2816         if (err)
2817                 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2818         if (err) {
2819                 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
2820                 return err;
2821         }
2822 
2823         pci_enable_pcie_error_reporting(pdev);
2824         pci_set_master(pdev);
2825 
2826         pf->pdev = pdev;
2827         pci_set_drvdata(pdev, pf);
2828         set_bit(__ICE_DOWN, pf->state);
2829         /* Disable service task until DOWN bit is cleared */
2830         set_bit(__ICE_SERVICE_DIS, pf->state);
2831 
2832         hw = &pf->hw;
2833         hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
2834         hw->back = pf;
2835         hw->vendor_id = pdev->vendor;
2836         hw->device_id = pdev->device;
2837         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2838         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2839         hw->subsystem_device_id = pdev->subsystem_device;
2840         hw->bus.device = PCI_SLOT(pdev->devfn);
2841         hw->bus.func = PCI_FUNC(pdev->devfn);
2842         ice_set_ctrlq_len(hw);
2843 
2844         pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
2845 
2846 #ifndef CONFIG_DYNAMIC_DEBUG
2847         if (debug < -1)
2848                 hw->debug_mask = debug;
2849 #endif
2850 
2851         err = ice_init_hw(hw);
2852         if (err) {
2853                 dev_err(dev, "ice_init_hw failed: %d\n", err);
2854                 err = -EIO;
2855                 goto err_exit_unroll;
2856         }
2857 
2858         dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
2859                  hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
2860                  hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
2861                  ice_nvm_version_str(hw), hw->fw_build);
2862 
2863         ice_request_fw(pf);
2864 
2865         /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
2866          * set in pf->state, which will cause ice_is_safe_mode to return
2867          * true
2868          */
2869         if (ice_is_safe_mode(pf)) {
2870                 dev_err(dev,
2871                         "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
2872                 /* we already got function/device capabilities but these don't
2873                  * reflect what the driver needs to do in safe mode. Instead of
2874                  * adding conditional logic everywhere to ignore these
2875                  * device/function capabilities, override them.
2876                  */
2877                 ice_set_safe_mode_caps(hw);
2878         }
2879 
2880         err = ice_init_pf(pf);
2881         if (err) {
2882                 dev_err(dev, "ice_init_pf failed: %d\n", err);
2883                 goto err_init_pf_unroll;
2884         }
2885 
2886         pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
2887         if (!pf->num_alloc_vsi) {
2888                 err = -EIO;
2889                 goto err_init_pf_unroll;
2890         }
2891 
2892         pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
2893                                GFP_KERNEL);
2894         if (!pf->vsi) {
2895                 err = -ENOMEM;
2896                 goto err_init_pf_unroll;
2897         }
2898 
2899         err = ice_init_interrupt_scheme(pf);
2900         if (err) {
2901                 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
2902                 err = -EIO;
2903                 goto err_init_interrupt_unroll;
2904         }
2905 
2906         /* Driver is mostly up */
2907         clear_bit(__ICE_DOWN, pf->state);
2908 
2909         /* In case of MSIX we are going to setup the misc vector right here
2910          * to handle admin queue events etc. In case of legacy and MSI
2911          * the misc functionality and queue processing is combined in
2912          * the same vector and that gets setup at open.
2913          */
2914         err = ice_req_irq_msix_misc(pf);
2915         if (err) {
2916                 dev_err(dev, "setup of misc vector failed: %d\n", err);
2917                 goto err_init_interrupt_unroll;
2918         }
2919 
2920         /* create switch struct for the switch element created by FW on boot */
2921         pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
2922         if (!pf->first_sw) {
2923                 err = -ENOMEM;
2924                 goto err_msix_misc_unroll;
2925         }
2926 
2927         if (hw->evb_veb)
2928                 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
2929         else
2930                 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
2931 
2932         pf->first_sw->pf = pf;
2933 
2934         /* record the sw_id available for later use */
2935         pf->first_sw->sw_id = hw->port_info->sw_id;
2936 
2937         err = ice_setup_pf_sw(pf);
2938         if (err) {
2939                 dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
2940                 goto err_alloc_sw_unroll;
2941         }
2942 
2943         clear_bit(__ICE_SERVICE_DIS, pf->state);
2944 
2945         /* tell the firmware we are up */
2946         err = ice_send_version(pf);
2947         if (err) {
2948                 dev_err(dev,
2949                         "probe failed sending driver version %s. error: %d\n",
2950                         ice_drv_ver, err);
2951                 goto err_alloc_sw_unroll;
2952         }
2953 
2954         /* since everything is good, start the service timer */
2955         mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2956 
2957         err = ice_init_link_events(pf->hw.port_info);
2958         if (err) {
2959                 dev_err(dev, "ice_init_link_events failed: %d\n", err);
2960                 goto err_alloc_sw_unroll;
2961         }
2962 
2963         ice_verify_cacheline_size(pf);
2964 
2965         /* If no DDP driven features have to be setup, return here */
2966         if (ice_is_safe_mode(pf))
2967                 return 0;
2968 
2969         /* initialize DDP driven features */
2970 
2971         /* Note: DCB init failure is non-fatal to load */
2972         if (ice_init_pf_dcb(pf, false)) {
2973                 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2974                 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
2975         } else {
2976                 ice_cfg_lldp_mib_change(&pf->hw, true);
2977         }
2978 
2979         return 0;
2980 
2981 err_alloc_sw_unroll:
2982         set_bit(__ICE_SERVICE_DIS, pf->state);
2983         set_bit(__ICE_DOWN, pf->state);
2984         devm_kfree(&pf->pdev->dev, pf->first_sw);
2985 err_msix_misc_unroll:
2986         ice_free_irq_msix_misc(pf);
2987 err_init_interrupt_unroll:
2988         ice_clear_interrupt_scheme(pf);
2989         devm_kfree(dev, pf->vsi);
2990 err_init_pf_unroll:
2991         ice_deinit_pf(pf);
2992         ice_deinit_hw(hw);
2993 err_exit_unroll:
2994         pci_disable_pcie_error_reporting(pdev);
2995         return err;
2996 }
2997 
2998 /**
2999  * ice_remove - Device removal routine
3000  * @pdev: PCI device information struct
3001  */
3002 static void ice_remove(struct pci_dev *pdev)
3003 {
3004         struct ice_pf *pf = pci_get_drvdata(pdev);
3005         int i;
3006 
3007         if (!pf)
3008                 return;
3009 
3010         for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
3011                 if (!ice_is_reset_in_progress(pf->state))
3012                         break;
3013                 msleep(100);
3014         }
3015 
3016         set_bit(__ICE_DOWN, pf->state);
3017         ice_service_task_stop(pf);
3018 
3019         if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
3020                 ice_free_vfs(pf);
3021         ice_vsi_release_all(pf);
3022         ice_free_irq_msix_misc(pf);
3023         ice_for_each_vsi(pf, i) {
3024                 if (!pf->vsi[i])
3025                         continue;
3026                 ice_vsi_free_q_vectors(pf->vsi[i]);
3027         }
3028         ice_deinit_pf(pf);
3029         ice_deinit_hw(&pf->hw);
3030         ice_clear_interrupt_scheme(pf);
3031         /* Issue a PFR as part of the prescribed driver unload flow.  Do not
3032          * do it via ice_schedule_reset() since there is no need to rebuild
3033          * and the service task is already stopped.
3034          */
3035         ice_reset(&pf->hw, ICE_RESET_PFR);
3036         pci_disable_pcie_error_reporting(pdev);
3037 }
3038 
3039 /**
3040  * ice_pci_err_detected - warning that PCI error has been detected
3041  * @pdev: PCI device information struct
3042  * @err: the type of PCI error
3043  *
3044  * Called to warn that something happened on the PCI bus and the error handling
3045  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
3046  */
3047 static pci_ers_result_t
3048 ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
3049 {
3050         struct ice_pf *pf = pci_get_drvdata(pdev);
3051 
3052         if (!pf) {
3053                 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
3054                         __func__, err);
3055                 return PCI_ERS_RESULT_DISCONNECT;
3056         }
3057 
3058         if (!test_bit(__ICE_SUSPENDED, pf->state)) {
3059                 ice_service_task_stop(pf);
3060 
3061                 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
3062                         set_bit(__ICE_PFR_REQ, pf->state);
3063                         ice_prepare_for_reset(pf);
3064                 }
3065         }
3066 
3067         return PCI_ERS_RESULT_NEED_RESET;
3068 }
3069 
3070 /**
3071  * ice_pci_err_slot_reset - a PCI slot reset has just happened
3072  * @pdev: PCI device information struct
3073  *
3074  * Called to determine if the driver can recover from the PCI slot reset by
3075  * using a register read to determine if the device is recoverable.
3076  */
3077 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
3078 {
3079         struct ice_pf *pf = pci_get_drvdata(pdev);
3080         pci_ers_result_t result;
3081         int err;
3082         u32 reg;
3083 
3084         err = pci_enable_device_mem(pdev);
3085         if (err) {
3086                 dev_err(&pdev->dev,
3087                         "Cannot re-enable PCI device after reset, error %d\n",
3088                         err);
3089                 result = PCI_ERS_RESULT_DISCONNECT;
3090         } else {
3091                 pci_set_master(pdev);
3092                 pci_restore_state(pdev);
3093                 pci_save_state(pdev);
3094                 pci_wake_from_d3(pdev, false);
3095 
3096                 /* Check for life */
3097                 reg = rd32(&pf->hw, GLGEN_RTRIG);
3098                 if (!reg)
3099                         result = PCI_ERS_RESULT_RECOVERED;
3100                 else
3101                         result = PCI_ERS_RESULT_DISCONNECT;
3102         }
3103 
3104         err = pci_cleanup_aer_uncorrect_error_status(pdev);
3105         if (err)
3106                 dev_dbg(&pdev->dev,
3107                         "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
3108                         err);
3109                 /* non-fatal, continue */
3110 
3111         return result;
3112 }
3113 
3114 /**
3115  * ice_pci_err_resume - restart operations after PCI error recovery
3116  * @pdev: PCI device information struct
3117  *
3118  * Called to allow the driver to bring things back up after PCI error and/or
3119  * reset recovery have finished
3120  */
3121 static void ice_pci_err_resume(struct pci_dev *pdev)
3122 {
3123         struct ice_pf *pf = pci_get_drvdata(pdev);
3124 
3125         if (!pf) {
3126                 dev_err(&pdev->dev,
3127                         "%s failed, device is unrecoverable\n", __func__);
3128                 return;
3129         }
3130 
3131         if (test_bit(__ICE_SUSPENDED, pf->state)) {
3132                 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
3133                         __func__);
3134                 return;
3135         }
3136 
3137         ice_do_reset(pf, ICE_RESET_PFR);
3138         ice_service_task_restart(pf);
3139         mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
3140 }
3141 
3142 /**
3143  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
3144  * @pdev: PCI device information struct
3145  */
3146 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
3147 {
3148         struct ice_pf *pf = pci_get_drvdata(pdev);
3149 
3150         if (!test_bit(__ICE_SUSPENDED, pf->state)) {
3151                 ice_service_task_stop(pf);
3152 
3153                 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
3154                         set_bit(__ICE_PFR_REQ, pf->state);
3155                         ice_prepare_for_reset(pf);
3156                 }
3157         }
3158 }
3159 
3160 /**
3161  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
3162  * @pdev: PCI device information struct
3163  */
3164 static void ice_pci_err_reset_done(struct pci_dev *pdev)
3165 {
3166         ice_pci_err_resume(pdev);
3167 }
3168 
3169 /* ice_pci_tbl - PCI Device ID Table
3170  *
3171  * Wildcard entries (PCI_ANY_ID) should come last
3172  * Last entry must be all 0s
3173  *
3174  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
3175  *   Class, Class Mask, private data (not used) }
3176  */
3177 static const struct pci_device_id ice_pci_tbl[] = {
3178         { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
3179         { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
3180         { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
3181         /* required last entry */
3182         { 0, }
3183 };
3184 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
3185 
3186 static const struct pci_error_handlers ice_pci_err_handler = {
3187         .error_detected = ice_pci_err_detected,
3188         .slot_reset = ice_pci_err_slot_reset,
3189         .reset_prepare = ice_pci_err_reset_prepare,
3190         .reset_done = ice_pci_err_reset_done,
3191         .resume = ice_pci_err_resume
3192 };
3193 
3194 static struct pci_driver ice_driver = {
3195         .name = KBUILD_MODNAME,
3196         .id_table = ice_pci_tbl,
3197         .probe = ice_probe,
3198         .remove = ice_remove,
3199         .sriov_configure = ice_sriov_configure,
3200         .err_handler = &ice_pci_err_handler
3201 };
3202 
3203 /**
3204  * ice_module_init - Driver registration routine
3205  *
3206  * ice_module_init is the first routine called when the driver is
3207  * loaded. All it does is register with the PCI subsystem.
3208  */
3209 static int __init ice_module_init(void)
3210 {
3211         int status;
3212 
3213         pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
3214         pr_info("%s\n", ice_copyright);
3215 
3216         ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
3217         if (!ice_wq) {
3218                 pr_err("Failed to create workqueue\n");
3219                 return -ENOMEM;
3220         }
3221 
3222         status = pci_register_driver(&ice_driver);
3223         if (status) {
3224                 pr_err("failed to register PCI driver, err %d\n", status);
3225                 destroy_workqueue(ice_wq);
3226         }
3227 
3228         return status;
3229 }
3230 module_init(ice_module_init);
3231 
3232 /**
3233  * ice_module_exit - Driver exit cleanup routine
3234  *
3235  * ice_module_exit is called just before the driver is removed
3236  * from memory.
3237  */
3238 static void __exit ice_module_exit(void)
3239 {
3240         pci_unregister_driver(&ice_driver);
3241         destroy_workqueue(ice_wq);
3242         pr_info("module unloaded\n");
3243 }
3244 module_exit(ice_module_exit);
3245 
3246 /**
3247  * ice_set_mac_address - NDO callback to set MAC address
3248  * @netdev: network interface device structure
3249  * @pi: pointer to an address structure
3250  *
3251  * Returns 0 on success, negative on failure
3252  */
3253 static int ice_set_mac_address(struct net_device *netdev, void *pi)
3254 {
3255         struct ice_netdev_priv *np = netdev_priv(netdev);
3256         struct ice_vsi *vsi = np->vsi;
3257         struct ice_pf *pf = vsi->back;
3258         struct ice_hw *hw = &pf->hw;
3259         struct sockaddr *addr = pi;
3260         enum ice_status status;
3261         u8 flags = 0;
3262         int err = 0;
3263         u8 *mac;
3264 
3265         mac = (u8 *)addr->sa_data;
3266 
3267         if (!is_valid_ether_addr(mac))
3268                 return -EADDRNOTAVAIL;
3269 
3270         if (ether_addr_equal(netdev->dev_addr, mac)) {
3271                 netdev_warn(netdev, "already using mac %pM\n", mac);
3272                 return 0;
3273         }
3274 
3275         if (test_bit(__ICE_DOWN, pf->state) ||
3276             ice_is_reset_in_progress(pf->state)) {
3277                 netdev_err(netdev, "can't set mac %pM. device not ready\n",
3278                            mac);
3279                 return -EBUSY;
3280         }
3281 
3282         /* When we change the MAC address we also have to change the MAC address
3283          * based filter rules that were created previously for the old MAC
3284          * address. So first, we remove the old filter rule using ice_remove_mac
3285          * and then create a new filter rule using ice_add_mac via
3286          * ice_vsi_cfg_mac_fltr function call for both add and/or remove
3287          * filters.
3288          */
3289         status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
3290         if (status) {
3291                 err = -EADDRNOTAVAIL;
3292                 goto err_update_filters;
3293         }
3294 
3295         status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
3296         if (status) {
3297                 err = -EADDRNOTAVAIL;
3298                 goto err_update_filters;
3299         }
3300 
3301 err_update_filters:
3302         if (err) {
3303                 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
3304                            mac);
3305                 return err;
3306         }
3307 
3308         /* change the netdev's MAC address */
3309         memcpy(netdev->dev_addr, mac, netdev->addr_len);
3310         netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
3311                    netdev->dev_addr);
3312 
3313         /* write new MAC address to the firmware */
3314         flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3315         status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
3316         if (status) {
3317                 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
3318                            mac, status);
3319         }
3320         return 0;
3321 }
3322 
3323 /**
3324  * ice_set_rx_mode - NDO callback to set the netdev filters
3325  * @netdev: network interface device structure
3326  */
3327 static void ice_set_rx_mode(struct net_device *netdev)
3328 {
3329         struct ice_netdev_priv *np = netdev_priv(netdev);
3330         struct ice_vsi *vsi = np->vsi;
3331 
3332         if (!vsi)
3333                 return;
3334 
3335         /* Set the flags to synchronize filters
3336          * ndo_set_rx_mode may be triggered even without a change in netdev
3337          * flags
3338          */
3339         set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
3340         set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
3341         set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
3342 
3343         /* schedule our worker thread which will take care of
3344          * applying the new filter changes
3345          */
3346         ice_service_task_schedule(vsi->back);
3347 }
3348 
3349 /**
3350  * ice_fdb_add - add an entry to the hardware database
3351  * @ndm: the input from the stack
3352  * @tb: pointer to array of nladdr (unused)
3353  * @dev: the net device pointer
3354  * @addr: the MAC address entry being added
3355  * @vid: VLAN ID
3356  * @flags: instructions from stack about fdb operation
3357  * @extack: netlink extended ack
3358  */
3359 static int
3360 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
3361             struct net_device *dev, const unsigned char *addr, u16 vid,
3362             u16 flags, struct netlink_ext_ack __always_unused *extack)
3363 {
3364         int err;
3365 
3366         if (vid) {
3367                 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
3368                 return -EINVAL;
3369         }
3370         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
3371                 netdev_err(dev, "FDB only supports static addresses\n");
3372                 return -EINVAL;
3373         }
3374 
3375         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
3376                 err = dev_uc_add_excl(dev, addr);
3377         else if (is_multicast_ether_addr(addr))
3378                 err = dev_mc_add_excl(dev, addr);
3379         else
3380                 err = -EINVAL;
3381 
3382         /* Only return duplicate errors if NLM_F_EXCL is set */
3383         if (err == -EEXIST && !(flags & NLM_F_EXCL))
3384                 err = 0;
3385 
3386         return err;
3387 }
3388 
3389 /**
3390  * ice_fdb_del - delete an entry from the hardware database
3391  * @ndm: the input from the stack
3392  * @tb: pointer to array of nladdr (unused)
3393  * @dev: the net device pointer
3394  * @addr: the MAC address entry being added
3395  * @vid: VLAN ID
3396  */
3397 static int
3398 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
3399             struct net_device *dev, const unsigned char *addr,
3400             __always_unused u16 vid)
3401 {
3402         int err;
3403 
3404         if (ndm->ndm_state & NUD_PERMANENT) {
3405                 netdev_err(dev, "FDB only supports static addresses\n");
3406                 return -EINVAL;
3407         }
3408 
3409         if (is_unicast_ether_addr(addr))
3410                 err = dev_uc_del(dev, addr);
3411         else if (is_multicast_ether_addr(addr))
3412                 err = dev_mc_del(dev, addr);
3413         else
3414                 err = -EINVAL;
3415 
3416         return err;
3417 }
3418 
3419 /**
3420  * ice_set_features - set the netdev feature flags
3421  * @netdev: ptr to the netdev being adjusted
3422  * @features: the feature set that the stack is suggesting
3423  */
3424 static int
3425 ice_set_features(struct net_device *netdev, netdev_features_t features)
3426 {
3427         struct ice_netdev_priv *np = netdev_priv(netdev);
3428         struct ice_vsi *vsi = np->vsi;
3429         int ret = 0;
3430 
3431         /* Don't set any netdev advanced features with device in Safe Mode */
3432         if (ice_is_safe_mode(vsi->back)) {
3433                 dev_err(&vsi->back->pdev->dev,
3434                         "Device is in Safe Mode - not enabling advanced netdev features\n");
3435                 return ret;
3436         }
3437 
3438         /* Multiple features can be changed in one call so keep features in
3439          * separate if/else statements to guarantee each feature is checked
3440          */
3441         if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
3442                 ret = ice_vsi_manage_rss_lut(vsi, true);
3443         else if (!(features & NETIF_F_RXHASH) &&
3444                  netdev->features & NETIF_F_RXHASH)
3445                 ret = ice_vsi_manage_rss_lut(vsi, false);
3446 
3447         if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
3448             !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
3449                 ret = ice_vsi_manage_vlan_stripping(vsi, true);
3450         else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
3451                  (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
3452                 ret = ice_vsi_manage_vlan_stripping(vsi, false);
3453 
3454         if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
3455             !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
3456                 ret = ice_vsi_manage_vlan_insertion(vsi);
3457         else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
3458                  (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
3459                 ret = ice_vsi_manage_vlan_insertion(vsi);
3460 
3461         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3462             !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3463                 ret = ice_cfg_vlan_pruning(vsi, true, false);
3464         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3465                  (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3466                 ret = ice_cfg_vlan_pruning(vsi, false, false);
3467 
3468         return ret;
3469 }
3470 
3471 /**
3472  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
3473  * @vsi: VSI to setup VLAN properties for
3474  */
3475 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
3476 {
3477         int ret = 0;
3478 
3479         if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3480                 ret = ice_vsi_manage_vlan_stripping(vsi, true);
3481         if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
3482                 ret = ice_vsi_manage_vlan_insertion(vsi);
3483 
3484         return ret;
3485 }
3486 
3487 /**
3488  * ice_vsi_cfg - Setup the VSI
3489  * @vsi: the VSI being configured
3490  *
3491  * Return 0 on success and negative value on error
3492  */
3493 int ice_vsi_cfg(struct ice_vsi *vsi)
3494 {
3495         int err;
3496 
3497         if (vsi->netdev) {
3498                 ice_set_rx_mode(vsi->netdev);
3499 
3500                 err = ice_vsi_vlan_setup(vsi);
3501 
3502                 if (err)
3503                         return err;
3504         }
3505         ice_vsi_cfg_dcb_rings(vsi);
3506 
3507         err = ice_vsi_cfg_lan_txqs(vsi);
3508         if (!err)
3509                 err = ice_vsi_cfg_rxqs(vsi);
3510 
3511         return err;
3512 }
3513 
3514 /**
3515  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3516  * @vsi: the VSI being configured
3517  */
3518 static void ice_napi_enable_all(struct ice_vsi *vsi)
3519 {
3520         int q_idx;
3521 
3522         if (!vsi->netdev)
3523                 return;
3524 
3525         ice_for_each_q_vector(vsi, q_idx) {
3526                 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3527 
3528                 if (q_vector->rx.ring || q_vector->tx.ring)
3529                         napi_enable(&q_vector->napi);
3530         }
3531 }
3532 
3533 /**
3534  * ice_up_complete - Finish the last steps of bringing up a connection
3535  * @vsi: The VSI being configured
3536  *
3537  * Return 0 on success and negative value on error
3538  */
3539 static int ice_up_complete(struct ice_vsi *vsi)
3540 {
3541         struct ice_pf *pf = vsi->back;
3542         int err;
3543 
3544         ice_vsi_cfg_msix(vsi);
3545 
3546         /* Enable only Rx rings, Tx rings were enabled by the FW when the
3547          * Tx queue group list was configured and the context bits were
3548          * programmed using ice_vsi_cfg_txqs
3549          */
3550         err = ice_vsi_start_rx_rings(vsi);
3551         if (err)
3552                 return err;
3553 
3554         clear_bit(__ICE_DOWN, vsi->state);
3555         ice_napi_enable_all(vsi);
3556         ice_vsi_ena_irq(vsi);
3557 
3558         if (vsi->port_info &&
3559             (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
3560             vsi->netdev) {
3561                 ice_print_link_msg(vsi, true);
3562                 netif_tx_start_all_queues(vsi->netdev);
3563                 netif_carrier_on(vsi->netdev);
3564         }
3565 
3566         ice_service_task_schedule(pf);
3567 
3568         return 0;
3569 }
3570 
3571 /**
3572  * ice_up - Bring the connection back up after being down
3573  * @vsi: VSI being configured
3574  */
3575 int ice_up(struct ice_vsi *vsi)
3576 {
3577         int err;
3578 
3579         err = ice_vsi_cfg(vsi);
3580         if (!err)
3581                 err = ice_up_complete(vsi);
3582 
3583         return err;
3584 }
3585 
3586 /**
3587  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
3588  * @ring: Tx or Rx ring to read stats from
3589  * @pkts: packets stats counter
3590  * @bytes: bytes stats counter
3591  *
3592  * This function fetches stats from the ring considering the atomic operations
3593  * that needs to be performed to read u64 values in 32 bit machine.
3594  */
3595 static void
3596 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
3597 {
3598         unsigned int start;
3599         *pkts = 0;
3600         *bytes = 0;
3601 
3602         if (!ring)
3603                 return;
3604         do {
3605                 start = u64_stats_fetch_begin_irq(&ring->syncp);
3606                 *pkts = ring->stats.pkts;
3607                 *bytes = ring->stats.bytes;
3608         } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3609 }
3610 
3611 /**
3612  * ice_update_vsi_ring_stats - Update VSI stats counters
3613  * @vsi: the VSI to be updated
3614  */
3615 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
3616 {
3617         struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
3618         struct ice_ring *ring;
3619         u64 pkts, bytes;
3620         int i;
3621 
3622         /* reset netdev stats */
3623         vsi_stats->tx_packets = 0;
3624         vsi_stats->tx_bytes = 0;
3625         vsi_stats->rx_packets = 0;
3626         vsi_stats->rx_bytes = 0;
3627 
3628         /* reset non-netdev (extended) stats */
3629         vsi->tx_restart = 0;
3630         vsi->tx_busy = 0;
3631         vsi->tx_linearize = 0;
3632         vsi->rx_buf_failed = 0;
3633         vsi->rx_page_failed = 0;
3634 
3635         rcu_read_lock();
3636 
3637         /* update Tx rings counters */
3638         ice_for_each_txq(vsi, i) {
3639                 ring = READ_ONCE(vsi->tx_rings[i]);
3640                 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3641                 vsi_stats->tx_packets += pkts;
3642                 vsi_stats->tx_bytes += bytes;
3643                 vsi->tx_restart += ring->tx_stats.restart_q;
3644                 vsi->tx_busy += ring->tx_stats.tx_busy;
3645                 vsi->tx_linearize += ring->tx_stats.tx_linearize;
3646         }
3647 
3648         /* update Rx rings counters */
3649         ice_for_each_rxq(vsi, i) {
3650                 ring = READ_ONCE(vsi->rx_rings[i]);
3651                 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3652                 vsi_stats->rx_packets += pkts;
3653                 vsi_stats->rx_bytes += bytes;
3654                 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
3655                 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
3656         }
3657 
3658         rcu_read_unlock();
3659 }
3660 
3661 /**
3662  * ice_update_vsi_stats - Update VSI stats counters
3663  * @vsi: the VSI to be updated
3664  */
3665 void ice_update_vsi_stats(struct ice_vsi *vsi)
3666 {
3667         struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
3668         struct ice_eth_stats *cur_es = &vsi->eth_stats;
3669         struct ice_pf *pf = vsi->back;
3670 
3671         if (test_bit(__ICE_DOWN, vsi->state) ||
3672             test_bit(__ICE_CFG_BUSY, pf->state))
3673                 return;
3674 
3675         /* get stats as recorded by Tx/Rx rings */
3676         ice_update_vsi_ring_stats(vsi);
3677 
3678         /* get VSI stats as recorded by the hardware */
3679         ice_update_eth_stats(vsi);
3680 
3681         cur_ns->tx_errors = cur_es->tx_errors;
3682         cur_ns->rx_dropped = cur_es->rx_discards;
3683         cur_ns->tx_dropped = cur_es->tx_discards;
3684         cur_ns->multicast = cur_es->rx_multicast;
3685 
3686         /* update some more netdev stats if this is main VSI */
3687         if (vsi->type == ICE_VSI_PF) {
3688                 cur_ns->rx_crc_errors = pf->stats.crc_errors;
3689                 cur_ns->rx_errors = pf->stats.crc_errors +
3690                                     pf->stats.illegal_bytes;
3691                 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
3692                 /* record drops from the port level */
3693                 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
3694         }
3695 }
3696 
3697 /**
3698  * ice_update_pf_stats - Update PF port stats counters
3699  * @pf: PF whose stats needs to be updated
3700  */
3701 void ice_update_pf_stats(struct ice_pf *pf)
3702 {
3703         struct ice_hw_port_stats *prev_ps, *cur_ps;
3704         struct ice_hw *hw = &pf->hw;
3705         u8 port;
3706 
3707         port = hw->port_info->lport;
3708         prev_ps = &pf->stats_prev;
3709         cur_ps = &pf->stats;
3710 
3711         ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
3712                           &prev_ps->eth.rx_bytes,
3713                           &cur_ps->eth.rx_bytes);
3714 
3715         ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
3716                           &prev_ps->eth.rx_unicast,
3717                           &cur_ps->eth.rx_unicast);
3718 
3719         ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
3720                           &prev_ps->eth.rx_multicast,
3721                           &cur_ps->eth.rx_multicast);
3722 
3723         ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
3724                           &prev_ps->eth.rx_broadcast,
3725                           &cur_ps->eth.rx_broadcast);
3726 
3727         ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
3728                           &prev_ps->eth.rx_discards,
3729                           &cur_ps->eth.rx_discards);
3730 
3731         ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
3732                           &prev_ps->eth.tx_bytes,
3733                           &cur_ps->eth.tx_bytes);
3734 
3735         ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
3736                           &prev_ps->eth.tx_unicast,
3737                           &cur_ps->eth.tx_unicast);
3738 
3739         ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
3740                           &prev_ps->eth.tx_multicast,
3741                           &cur_ps->eth.tx_multicast);
3742 
3743         ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
3744                           &prev_ps->eth.tx_broadcast,
3745                           &cur_ps->eth.tx_broadcast);
3746 
3747         ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
3748                           &prev_ps->tx_dropped_link_down,
3749                           &cur_ps->tx_dropped_link_down);
3750 
3751         ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
3752                           &prev_ps->rx_size_64, &cur_ps->rx_size_64);
3753 
3754         ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
3755                           &prev_ps->rx_size_127, &cur_ps->rx_size_127);
3756 
3757         ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
3758                           &prev_ps->rx_size_255, &cur_ps->rx_size_255);
3759 
3760         ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
3761                           &prev_ps->rx_size_511, &cur_ps->rx_size_511);
3762 
3763         ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
3764                           &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
3765 
3766         ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
3767                           &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
3768 
3769         ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
3770                           &prev_ps->rx_size_big, &cur_ps->rx_size_big);
3771 
3772         ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
3773                           &prev_ps->tx_size_64, &cur_ps->tx_size_64);
3774 
3775         ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
3776                           &prev_ps->tx_size_127, &cur_ps->tx_size_127);
3777 
3778         ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
3779                           &prev_ps->tx_size_255, &cur_ps->tx_size_255);
3780 
3781         ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
3782                           &prev_ps->tx_size_511, &cur_ps->tx_size_511);
3783 
3784         ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
3785                           &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
3786 
3787         ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
3788                           &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
3789 
3790         ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
3791                           &prev_ps->tx_size_big, &cur_ps->tx_size_big);
3792 
3793         ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
3794                           &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
3795 
3796         ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
3797                           &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
3798 
3799         ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
3800                           &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
3801 
3802         ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
3803                           &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
3804 
3805         ice_update_dcb_stats(pf);
3806 
3807         ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
3808                           &prev_ps->crc_errors, &cur_ps->crc_errors);
3809 
3810         ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
3811                           &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
3812 
3813         ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
3814                           &prev_ps->mac_local_faults,
3815                           &cur_ps->mac_local_faults);
3816 
3817         ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
3818                           &prev_ps->mac_remote_faults,
3819                           &cur_ps->mac_remote_faults);
3820 
3821         ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
3822                           &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
3823 
3824         ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
3825                           &prev_ps->rx_undersize, &cur_ps->rx_undersize);
3826 
3827         ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
3828                           &prev_ps->rx_fragments, &cur_ps->rx_fragments);
3829 
3830         ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
3831                           &prev_ps->rx_oversize, &cur_ps->rx_oversize);
3832 
3833         ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
3834                           &prev_ps->rx_jabber, &cur_ps->rx_jabber);
3835 
3836         pf->stat_prev_loaded = true;
3837 }
3838 
3839 /**
3840  * ice_get_stats64 - get statistics for network device structure
3841  * @netdev: network interface device structure
3842  * @stats: main device statistics structure
3843  */
3844 static
3845 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3846 {
3847         struct ice_netdev_priv *np = netdev_priv(netdev);
3848         struct rtnl_link_stats64 *vsi_stats;
3849         struct ice_vsi *vsi = np->vsi;
3850 
3851         vsi_stats = &vsi->net_stats;
3852 
3853         if (!vsi->num_txq || !vsi->num_rxq)
3854                 return;
3855 
3856         /* netdev packet/byte stats come from ring counter. These are obtained
3857          * by summing up ring counters (done by ice_update_vsi_ring_stats).
3858          * But, only call the update routine and read the registers if VSI is
3859          * not down.
3860          */
3861         if (!test_bit(__ICE_DOWN, vsi->state))
3862                 ice_update_vsi_ring_stats(vsi);
3863         stats->tx_packets = vsi_stats->tx_packets;
3864         stats->tx_bytes = vsi_stats->tx_bytes;
3865         stats->rx_packets = vsi_stats->rx_packets;
3866         stats->rx_bytes = vsi_stats->rx_bytes;
3867 
3868         /* The rest of the stats can be read from the hardware but instead we
3869          * just return values that the watchdog task has already obtained from
3870          * the hardware.
3871          */
3872         stats->multicast = vsi_stats->multicast;
3873         stats->tx_errors = vsi_stats->tx_errors;
3874         stats->tx_dropped = vsi_stats->tx_dropped;
3875         stats->rx_errors = vsi_stats->rx_errors;
3876         stats->rx_dropped = vsi_stats->rx_dropped;
3877         stats->rx_crc_errors = vsi_stats->rx_crc_errors;
3878         stats->rx_length_errors = vsi_stats->rx_length_errors;
3879 }
3880 
3881 /**
3882  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3883  * @vsi: VSI having NAPI disabled
3884  */
3885 static void ice_napi_disable_all(struct ice_vsi *vsi)
3886 {
3887         int q_idx;
3888 
3889         if (!vsi->netdev)
3890                 return;
3891 
3892         ice_for_each_q_vector(vsi, q_idx) {
3893                 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3894 
3895                 if (q_vector->rx.ring || q_vector->tx.ring)
3896                         napi_disable(&q_vector->napi);
3897         }
3898 }
3899 
3900 /**
3901  * ice_down - Shutdown the connection
3902  * @vsi: The VSI being stopped
3903  */
3904 int ice_down(struct ice_vsi *vsi)
3905 {
3906         int i, tx_err, rx_err, link_err = 0;
3907 
3908         /* Caller of this function is expected to set the
3909          * vsi->state __ICE_DOWN bit
3910          */
3911         if (vsi->netdev) {
3912                 netif_carrier_off(vsi->netdev);
3913                 netif_tx_disable(vsi->netdev);
3914         }
3915 
3916         ice_vsi_dis_irq(vsi);
3917 
3918         tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
3919         if (tx_err)
3920                 netdev_err(vsi->netdev,
3921                            "Failed stop Tx rings, VSI %d error %d\n",
3922                            vsi->vsi_num, tx_err);
3923 
3924         rx_err = ice_vsi_stop_rx_rings(vsi);
3925         if (rx_err)
3926                 netdev_err(vsi->netdev,
3927                            "Failed stop Rx rings, VSI %d error %d\n",
3928                            vsi->vsi_num, rx_err);
3929 
3930         ice_napi_disable_all(vsi);
3931 
3932         if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
3933                 link_err = ice_force_phys_link_state(vsi, false);
3934                 if (link_err)
3935                         netdev_err(vsi->netdev,
3936                                    "Failed to set physical link down, VSI %d error %d\n",
3937                                    vsi->vsi_num, link_err);
3938         }
3939 
3940         ice_for_each_txq(vsi, i)
3941                 ice_clean_tx_ring(vsi->tx_rings[i]);
3942 
3943         ice_for_each_rxq(vsi, i)
3944                 ice_clean_rx_ring(vsi->rx_rings[i]);
3945 
3946         if (tx_err || rx_err || link_err) {
3947                 netdev_err(vsi->netdev,
3948                            "Failed to close VSI 0x%04X on switch 0x%04X\n",
3949                            vsi->vsi_num, vsi->vsw->sw_id);
3950                 return -EIO;
3951         }
3952 
3953         return 0;
3954 }
3955 
3956 /**
3957  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
3958  * @vsi: VSI having resources allocated
3959  *
3960  * Return 0 on success, negative on failure
3961  */
3962 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
3963 {
3964         int i, err = 0;
3965 
3966         if (!vsi->num_txq) {
3967                 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3968                         vsi->vsi_num);
3969                 return -EINVAL;
3970         }
3971 
3972         ice_for_each_txq(vsi, i) {
3973                 struct ice_ring *ring = vsi->tx_rings[i];
3974 
3975                 if (!ring)
3976                         return -EINVAL;
3977 
3978                 ring->netdev = vsi->netdev;
3979                 err = ice_setup_tx_ring(ring);
3980                 if (err)
3981                         break;
3982         }
3983 
3984         return err;
3985 }
3986 
3987 /**
3988  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
3989  * @vsi: VSI having resources allocated
3990  *
3991  * Return 0 on success, negative on failure
3992  */
3993 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
3994 {
3995         int i, err = 0;
3996 
3997         if (!vsi->num_rxq) {
3998                 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3999                         vsi->vsi_num);
4000                 return -EINVAL;
4001         }
4002 
4003         ice_for_each_rxq(vsi, i) {
4004                 struct ice_ring *ring = vsi->rx_rings[i];
4005 
4006                 if (!ring)
4007                         return -EINVAL;
4008 
4009                 ring->netdev = vsi->netdev;
4010                 err = ice_setup_rx_ring(ring);
4011                 if (err)
4012                         break;
4013         }
4014 
4015         return err;
4016 }
4017 
4018 /**
4019  * ice_vsi_open - Called when a network interface is made active
4020  * @vsi: the VSI to open
4021  *
4022  * Initialization of the VSI
4023  *
4024  * Returns 0 on success, negative value on error
4025  */
4026 static int ice_vsi_open(struct ice_vsi *vsi)
4027 {
4028         char int_name[ICE_INT_NAME_STR_LEN];
4029         struct ice_pf *pf = vsi->back;
4030         int err;
4031 
4032         /* allocate descriptors */
4033         err = ice_vsi_setup_tx_rings(vsi);
4034         if (err)
4035                 goto err_setup_tx;
4036 
4037         err = ice_vsi_setup_rx_rings(vsi);
4038         if (err)
4039                 goto err_setup_rx;
4040 
4041         err = ice_vsi_cfg(vsi);
4042         if (err)
4043                 goto err_setup_rx;
4044 
4045         snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4046                  dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4047         err = ice_vsi_req_irq_msix(vsi, int_name);
4048         if (err)
4049                 goto err_setup_rx;
4050 
4051         /* Notify the stack of the actual queue counts. */
4052         err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
4053         if (err)
4054                 goto err_set_qs;
4055 
4056         err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
4057         if (err)
4058                 goto err_set_qs;
4059 
4060         err = ice_up_complete(vsi);
4061         if (err)
4062                 goto err_up_complete;
4063 
4064         return 0;
4065 
4066 err_up_complete:
4067         ice_down(vsi);
4068 err_set_qs:
4069         ice_vsi_free_irq(vsi);
4070 err_setup_rx:
4071         ice_vsi_free_rx_rings(vsi);
4072 err_setup_tx:
4073         ice_vsi_free_tx_rings(vsi);
4074 
4075         return err;
4076 }
4077 
4078 /**
4079  * ice_vsi_release_all - Delete all VSIs
4080  * @pf: PF from which all VSIs are being removed
4081  */
4082 static void ice_vsi_release_all(struct ice_pf *pf)
4083 {
4084         int err, i;
4085 
4086         if (!pf->vsi)
4087                 return;
4088 
4089         ice_for_each_vsi(pf, i) {
4090                 if (!pf->vsi[i])
4091                         continue;
4092 
4093                 err = ice_vsi_release(pf->vsi[i]);
4094                 if (err)
4095                         dev_dbg(&pf->pdev->dev,
4096                                 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
4097                                 i, err, pf->vsi[i]->vsi_num);
4098         }
4099 }
4100 
4101 /**
4102  * ice_ena_vsi - resume a VSI
4103  * @vsi: the VSI being resume
4104  * @locked: is the rtnl_lock already held
4105  */
4106 static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
4107 {
4108         int err = 0;
4109 
4110         if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
4111                 return 0;
4112 
4113         clear_bit(__ICE_NEEDS_RESTART, vsi->state);
4114 
4115         if (vsi->netdev && vsi->type == ICE_VSI_PF) {
4116                 if (netif_running(vsi->netdev)) {
4117                         if (!locked)
4118                                 rtnl_lock();
4119 
4120                         err = ice_open(vsi->netdev);
4121 
4122                         if (!locked)
4123                                 rtnl_unlock();
4124                 }
4125         }
4126 
4127         return err;
4128 }
4129 
4130 /**
4131  * ice_pf_ena_all_vsi - Resume all VSIs on a PF
4132  * @pf: the PF
4133  * @locked: is the rtnl_lock already held
4134  */
4135 #ifdef CONFIG_DCB
4136 int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
4137 {
4138         int v;
4139 
4140         ice_for_each_vsi(pf, v)
4141                 if (pf->vsi[v])
4142                         if (ice_ena_vsi(pf->vsi[v], locked))
4143                                 return -EIO;
4144 
4145         return 0;
4146 }
4147 #endif /* CONFIG_DCB */
4148 
4149 /**
4150  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
4151  * @pf: pointer to the PF instance
4152  * @type: VSI type to rebuild
4153  *
4154  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
4155  */
4156 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
4157 {
4158         enum ice_status status;
4159         int i, err;
4160 
4161         ice_for_each_vsi(pf, i) {
4162                 struct ice_vsi *vsi = pf->vsi[i];
4163 
4164                 if (!vsi || vsi->type != type)
4165                         continue;
4166 
4167                 /* rebuild the VSI */
4168                 err = ice_vsi_rebuild(vsi);
4169                 if (err) {
4170                         dev_err(&pf->pdev->dev,
4171                                 "rebuild VSI failed, err %d, VSI index %d, type %d\n",
4172                                 err, vsi->idx, type);
4173                         return err;
4174                 }
4175 
4176                 /* replay filters for the VSI */
4177                 status = ice_replay_vsi(&pf->hw, vsi->idx);
4178                 if (status) {
4179                         dev_err(&pf->pdev->dev,
4180                                 "replay VSI failed, status %d, VSI index %d, type %d\n",
4181                                 status, vsi->idx, type);
4182                         return -EIO;
4183                 }
4184 
4185                 /* Re-map HW VSI number, using VSI handle that has been
4186                  * previously validated in ice_replay_vsi() call above
4187                  */
4188                 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
4189 
4190                 /* enable the VSI */
4191                 err = ice_ena_vsi(vsi, false);
4192                 if (err) {
4193                         dev_err(&pf->pdev->dev,
4194                                 "enable VSI failed, err %d, VSI index %d, type %d\n",
4195                                 err, vsi->idx, type);
4196                         return err;
4197                 }
4198 
4199                 dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
4200                          vsi->idx, type);
4201         }
4202 
4203         return 0;
4204 }
4205 
4206 /**
4207  * ice_update_pf_netdev_link - Update PF netdev link status
4208  * @pf: pointer to the PF instance
4209  */
4210 static void ice_update_pf_netdev_link(struct ice_pf *pf)
4211 {
4212         bool link_up;
4213         int i;
4214 
4215         ice_for_each_vsi(pf, i) {
4216                 struct ice_vsi *vsi = pf->vsi[i];
4217 
4218                 if (!vsi || vsi->type != ICE_VSI_PF)
4219                         return;
4220 
4221                 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
4222                 if (link_up) {
4223                         netif_carrier_on(pf->vsi[i]->netdev);
4224                         netif_tx_wake_all_queues(pf->vsi[i]->netdev);
4225                 } else {
4226                         netif_carrier_off(pf->vsi[i]->netdev);
4227                         netif_tx_stop_all_queues(pf->vsi[i]->netdev);
4228                 }
4229         }
4230 }
4231 
4232 /**
4233  * ice_rebuild - rebuild after reset
4234  * @pf: PF to rebuild
4235  * @reset_type: type of reset
4236  */
4237 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
4238 {
4239         struct device *dev = &pf->pdev->dev;
4240         struct ice_hw *hw = &pf->hw;
4241         enum ice_status ret;
4242         int err;
4243 
4244         if (test_bit(__ICE_DOWN, pf->state))
4245                 goto clear_recovery;
4246 
4247         dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
4248 
4249         ret = ice_init_all_ctrlq(hw);
4250         if (ret) {
4251                 dev_err(dev, "control queues init failed %d\n", ret);
4252                 goto err_init_ctrlq;
4253         }
4254 
4255         /* if DDP was previously loaded successfully */
4256         if (!ice_is_safe_mode(pf)) {
4257                 /* reload the SW DB of filter tables */
4258                 if (reset_type == ICE_RESET_PFR)
4259                         ice_fill_blk_tbls(hw);
4260                 else
4261                         /* Reload DDP Package after CORER/GLOBR reset */
4262                         ice_load_pkg(NULL, pf);
4263         }
4264 
4265         ret = ice_clear_pf_cfg(hw);
4266         if (ret) {
4267                 dev_err(dev, "clear PF configuration failed %d\n", ret);
4268                 goto err_init_ctrlq;
4269         }
4270 
4271         ice_clear_pxe_mode(hw);
4272 
4273         ret = ice_get_caps(hw);
4274         if (ret) {
4275                 dev_err(dev, "ice_get_caps failed %d\n", ret);
4276                 goto err_init_ctrlq;
4277         }
4278 
4279         err = ice_sched_init_port(hw->port_info);
4280         if (err)
4281                 goto err_sched_init_port;
4282 
4283         err = ice_update_link_info(hw->port_info);
4284         if (err)
4285                 dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
4286 
4287         /* start misc vector */
4288         err = ice_req_irq_msix_misc(pf);
4289         if (err) {
4290                 dev_err(dev, "misc vector setup failed: %d\n", err);
4291                 goto err_sched_init_port;
4292         }
4293 
4294         if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
4295                 ice_dcb_rebuild(pf);
4296 
4297         /* rebuild PF VSI */
4298         err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
4299         if (err) {
4300                 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
4301                 goto err_vsi_rebuild;
4302         }
4303 
4304         if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4305                 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
4306                 if (err) {
4307                         dev_err(dev, "VF VSI rebuild failed: %d\n", err);
4308                         goto err_vsi_rebuild;
4309                 }
4310         }
4311 
4312         ice_update_pf_netdev_link(pf);
4313 
4314         /* tell the firmware we are up */
4315         ret = ice_send_version(pf);
4316         if (ret) {
4317                 dev_err(dev,
4318                         "Rebuild failed due to error sending driver version: %d\n",
4319                         ret);
4320                 goto err_vsi_rebuild;
4321         }
4322 
4323         ice_replay_post(hw);
4324 
4325         /* if we get here, reset flow is successful */
4326         clear_bit(__ICE_RESET_FAILED, pf->state);
4327         return;
4328 
4329 err_vsi_rebuild:
4330 err_sched_init_port:
4331         ice_sched_cleanup_all(hw);
4332 err_init_ctrlq:
4333         ice_shutdown_all_ctrlq(hw);
4334         set_bit(__ICE_RESET_FAILED, pf->state);
4335 clear_recovery:
4336         /* set this bit in PF state to control service task scheduling */
4337         set_bit(__ICE_NEEDS_RESTART, pf->state);
4338         dev_err(dev, "Rebuild failed, unload and reload driver\n");
4339 }
4340 
4341 /**
4342  * ice_change_mtu - NDO callback to change the MTU
4343  * @netdev: network interface device structure
4344  * @new_mtu: new value for maximum frame size
4345  *
4346  * Returns 0 on success, negative on failure
4347  */
4348 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
4349 {
4350         struct ice_netdev_priv *np = netdev_priv(netdev);
4351         struct ice_vsi *vsi = np->vsi;
4352         struct ice_pf *pf = vsi->back;
4353         u8 count = 0;
4354 
4355         if (new_mtu == netdev->mtu) {
4356                 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
4357                 return 0;
4358         }
4359 
4360         if (new_mtu < netdev->min_mtu) {
4361                 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
4362                            netdev->min_mtu);
4363                 return -EINVAL;
4364         } else if (new_mtu > netdev->max_mtu) {
4365                 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
4366                            netdev->min_mtu);
4367                 return -EINVAL;
4368         }
4369         /* if a reset is in progress, wait for some time for it to complete */
4370         do {
4371                 if (ice_is_reset_in_progress(pf->state)) {
4372                         count++;
4373                         usleep_range(1000, 2000);
4374                 } else {
4375                         break;
4376                 }
4377 
4378         } while (count < 100);
4379 
4380         if (count == 100) {
4381                 netdev_err(netdev, "can't change MTU. Device is busy\n");
4382                 return -EBUSY;
4383         }
4384 
4385         netdev->mtu = new_mtu;
4386 
4387         /* if VSI is up, bring it down and then back up */
4388         if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
4389                 int err;
4390 
4391                 err = ice_down(vsi);
4392                 if (err) {
4393                         netdev_err(netdev, "change MTU if_up err %d\n", err);
4394                         return err;
4395                 }
4396 
4397                 err = ice_up(vsi);
4398                 if (err) {
4399                         netdev_err(netdev, "change MTU if_up err %d\n", err);
4400                         return err;
4401                 }
4402         }
4403 
4404         netdev_info(netdev, "changed MTU to %d\n", new_mtu);
4405         return 0;
4406 }
4407 
4408 /**
4409  * ice_set_rss - Set RSS keys and lut
4410  * @vsi: Pointer to VSI structure
4411  * @seed: RSS hash seed
4412  * @lut: Lookup table
4413  * @lut_size: Lookup table size
4414  *
4415  * Returns 0 on success, negative on failure
4416  */
4417 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
4418 {
4419         struct ice_pf *pf = vsi->back;
4420         struct ice_hw *hw = &pf->hw;
4421         enum ice_status status;
4422 
4423         if (seed) {
4424                 struct ice_aqc_get_set_rss_keys *buf =
4425                                   (struct ice_aqc_get_set_rss_keys *)seed;
4426 
4427                 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
4428 
4429                 if (status) {
4430                         dev_err(&pf->pdev->dev,
4431                                 "Cannot set RSS key, err %d aq_err %d\n",
4432                                 status, hw->adminq.rq_last_status);
4433                         return -EIO;
4434                 }
4435         }
4436 
4437         if (lut) {
4438                 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
4439                                             lut, lut_size);
4440                 if (status) {
4441                         dev_err(&pf->pdev->dev,
4442                                 "Cannot set RSS lut, err %d aq_err %d\n",
4443                                 status, hw->adminq.rq_last_status);
4444                         return -EIO;
4445                 }
4446         }
4447 
4448         return 0;
4449 }
4450 
4451 /**
4452  * ice_get_rss - Get RSS keys and lut
4453  * @vsi: Pointer to VSI structure
4454  * @seed: Buffer to store the keys
4455  * @lut: Buffer to store the lookup table entries
4456  * @lut_size: Size of buffer to store the lookup table entries
4457  *
4458  * Returns 0 on success, negative on failure
4459  */
4460 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
4461 {
4462         struct ice_pf *pf = vsi->back;
4463         struct ice_hw *hw = &pf->hw;
4464         enum ice_status status;
4465 
4466         if (seed) {
4467                 struct ice_aqc_get_set_rss_keys *buf =
4468                                   (struct ice_aqc_get_set_rss_keys *)seed;
4469 
4470                 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
4471                 if (status) {
4472                         dev_err(&pf->pdev->dev,
4473                                 "Cannot get RSS key, err %d aq_err %d\n",
4474                                 status, hw->adminq.rq_last_status);
4475                         return -EIO;
4476                 }
4477         }
4478 
4479         if (lut) {
4480                 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
4481                                             lut, lut_size);
4482                 if (status) {
4483                         dev_err(&pf->pdev->dev,
4484                                 "Cannot get RSS lut, err %d aq_err %d\n",
4485                                 status, hw->adminq.rq_last_status);
4486                         return -EIO;
4487                 }
4488         }
4489 
4490         return 0;
4491 }
4492 
4493 /**
4494  * ice_bridge_getlink - Get the hardware bridge mode
4495  * @skb: skb buff
4496  * @pid: process ID
4497  * @seq: RTNL message seq
4498  * @dev: the netdev being configured
4499  * @filter_mask: filter mask passed in
4500  * @nlflags: netlink flags passed in
4501  *
4502  * Return the bridge mode (VEB/VEPA)
4503  */
4504 static int
4505 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4506                    struct net_device *dev, u32 filter_mask, int nlflags)
4507 {
4508         struct ice_netdev_priv *np = netdev_priv(dev);
4509         struct ice_vsi *vsi = np->vsi;
4510         struct ice_pf *pf = vsi->back;
4511         u16 bmode;
4512 
4513         bmode = pf->first_sw->bridge_mode;
4514 
4515         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
4516                                        filter_mask, NULL);
4517 }
4518 
4519 /**
4520  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
4521  * @vsi: Pointer to VSI structure
4522  * @bmode: Hardware bridge mode (VEB/VEPA)
4523  *
4524  * Returns 0 on success, negative on failure
4525  */
4526 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
4527 {
4528         struct device *dev = &vsi->back->pdev->dev;
4529         struct ice_aqc_vsi_props *vsi_props;
4530         struct ice_hw *hw = &vsi->back->hw;
4531         struct ice_vsi_ctx *ctxt;
4532         enum ice_status status;
4533         int ret = 0;
4534 
4535         vsi_props = &vsi->info;
4536 
4537         ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
4538         if (!ctxt)
4539                 return -ENOMEM;
4540 
4541         ctxt->info = vsi->info;
4542 
4543         if (bmode == BRIDGE_MODE_VEB)
4544                 /* change from VEPA to VEB mode */
4545                 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4546         else
4547                 /* change from VEB to VEPA mode */
4548                 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4549         ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4550 
4551         status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4552         if (status) {
4553                 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
4554                         bmode, status, hw->adminq.sq_last_status);
4555                 ret = -EIO;
4556                 goto out;
4557         }
4558         /* Update sw flags for book keeping */
4559         vsi_props->sw_flags = ctxt->info.sw_flags;
4560 
4561 out:
4562         devm_kfree(dev, ctxt);
4563         return ret;
4564 }
4565 
4566 /**
4567  * ice_bridge_setlink - Set the hardware bridge mode
4568  * @dev: the netdev being configured
4569  * @nlh: RTNL message
4570  * @flags: bridge setlink flags
4571  * @extack: netlink extended ack
4572  *
4573  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
4574  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
4575  * not already set for all VSIs connected to this switch. And also update the
4576  * unicast switch filter rules for the corresponding switch of the netdev.
4577  */
4578 static int
4579 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4580                    u16 __always_unused flags,
4581                    struct netlink_ext_ack __always_unused *extack)
4582 {
4583         struct ice_netdev_priv *np = netdev_priv(dev);
4584         struct ice_pf *pf = np->vsi->back;
4585         struct nlattr *attr, *br_spec;
4586         struct ice_hw *hw = &pf->hw;
4587         enum ice_status status;
4588         struct ice_sw *pf_sw;
4589         int rem, v, err = 0;
4590 
4591         pf_sw = pf->first_sw;
4592         /* find the attribute in the netlink message */
4593         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4594 
4595         nla_for_each_nested(attr, br_spec, rem) {
4596                 __u16 mode;
4597 
4598                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4599                         continue;
4600                 mode = nla_get_u16(attr);
4601                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4602                         return -EINVAL;
4603                 /* Continue  if bridge mode is not being flipped */
4604                 if (mode == pf_sw->bridge_mode)
4605                         continue;
4606                 /* Iterates through the PF VSI list and update the loopback
4607                  * mode of the VSI
4608                  */
4609                 ice_for_each_vsi(pf, v) {
4610                         if (!pf->vsi[v])
4611                                 continue;
4612                         err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
4613                         if (err)
4614                                 return err;
4615                 }
4616 
4617                 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
4618                 /* Update the unicast switch filter rules for the corresponding
4619                  * switch of the netdev
4620                  */
4621                 status = ice_update_sw_rule_bridge_mode(hw);
4622                 if (status) {
4623                         netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
4624                                    mode, status, hw->adminq.sq_last_status);
4625                         /* revert hw->evb_veb */
4626                         hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
4627                         return -EIO;
4628                 }
4629 
4630                 pf_sw->bridge_mode = mode;
4631         }
4632 
4633         return 0;
4634 }
4635 
4636 /**
4637  * ice_tx_timeout - Respond to a Tx Hang
4638  * @netdev: network interface device structure
4639  */
4640 static void ice_tx_timeout(struct net_device *netdev)
4641 {
4642         struct ice_netdev_priv *np = netdev_priv(netdev);
4643         struct ice_ring *tx_ring = NULL;
4644         struct ice_vsi *vsi = np->vsi;
4645         struct ice_pf *pf = vsi->back;
4646         int hung_queue = -1;
4647         u32 i;
4648 
4649         pf->tx_timeout_count++;
4650 
4651         /* find the stopped queue the same way dev_watchdog() does */
4652         for (i = 0; i < netdev->num_tx_queues; i++) {
4653                 unsigned long trans_start;
4654                 struct netdev_queue *q;
4655 
4656                 q = netdev_get_tx_queue(netdev, i);
4657                 trans_start = q->trans_start;
4658                 if (netif_xmit_stopped(q) &&
4659                     time_after(jiffies,
4660                                trans_start + netdev->watchdog_timeo)) {
4661                         hung_queue = i;
4662                         break;
4663                 }
4664         }
4665 
4666         if (i == netdev->num_tx_queues)
4667                 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
4668         else
4669                 /* now that we have an index, find the tx_ring struct */
4670                 for (i = 0; i < vsi->num_txq; i++)
4671                         if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
4672                                 if (hung_queue == vsi->tx_rings[i]->q_index) {
4673                                         tx_ring = vsi->tx_rings[i];
4674                                         break;
4675                                 }
4676 
4677         /* Reset recovery level if enough time has elapsed after last timeout.
4678          * Also ensure no new reset action happens before next timeout period.
4679          */
4680         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
4681                 pf->tx_timeout_recovery_level = 1;
4682         else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
4683                                        netdev->watchdog_timeo)))
4684                 return;
4685 
4686         if (tx_ring) {
4687                 struct ice_hw *hw = &pf->hw;
4688                 u32 head, val = 0;
4689 
4690                 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
4691                         QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
4692                 /* Read interrupt register */
4693                 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
4694 
4695                 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
4696                             vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
4697                             head, tx_ring->next_to_use, val);
4698         }
4699 
4700         pf->tx_timeout_last_recovery = jiffies;
4701         netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
4702                     pf->tx_timeout_recovery_level, hung_queue);
4703 
4704         switch (pf->tx_timeout_recovery_level) {
4705         case 1:
4706                 set_bit(__ICE_PFR_REQ, pf->state);
4707                 break;
4708         case 2:
4709                 set_bit(__ICE_CORER_REQ, pf->state);
4710                 break;
4711         case 3:
4712                 set_bit(__ICE_GLOBR_REQ, pf->state);
4713                 break;
4714         default:
4715                 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
4716                 set_bit(__ICE_DOWN, pf->state);
4717                 set_bit(__ICE_NEEDS_RESTART, vsi->state);
4718                 set_bit(__ICE_SERVICE_DIS, pf->state);
4719                 break;
4720         }
4721 
4722         ice_service_task_schedule(pf);
4723         pf->tx_timeout_recovery_level++;
4724 }
4725 
4726 /**
4727  * ice_open - Called when a network interface becomes active
4728  * @netdev: network interface device structure
4729  *
4730  * The open entry point is called when a network interface is made
4731  * active by the system (IFF_UP). At this point all resources needed
4732  * for transmit and receive operations are allocated, the interrupt
4733  * handler is registered with the OS, the netdev watchdog is enabled,
4734  * and the stack is notified that the interface is ready.
4735  *
4736  * Returns 0 on success, negative value on failure
4737  */
4738 int ice_open(struct net_device *netdev)
4739 {
4740         struct ice_netdev_priv *np = netdev_priv(netdev);
4741         struct ice_vsi *vsi = np->vsi;
4742         struct ice_port_info *pi;
4743         int err;
4744 
4745         if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
4746                 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
4747                 return -EIO;
4748         }
4749 
4750         netif_carrier_off(netdev);
4751 
4752         pi = vsi->port_info;
4753         err = ice_update_link_info(pi);
4754         if (err) {
4755                 netdev_err(netdev, "Failed to get link info, error %d\n",
4756                            err);
4757                 return err;
4758         }
4759 
4760         /* Set PHY if there is media, otherwise, turn off PHY */
4761         if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
4762                 err = ice_force_phys_link_state(vsi, true);
4763                 if (err) {
4764                         netdev_err(netdev,
4765                                    "Failed to set physical link up, error %d\n",
4766                                    err);
4767                         return err;
4768                 }
4769         } else {
4770                 err = ice_aq_set_link_restart_an(pi, false, NULL);
4771                 if (err) {
4772                         netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
4773                                    vsi->vsi_num, err);
4774                         return err;
4775                 }
4776                 set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
4777         }
4778 
4779         err = ice_vsi_open(vsi);
4780         if (err)
4781                 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
4782                            vsi->vsi_num, vsi->vsw->sw_id);
4783         return err;
4784 }
4785 
4786 /**
4787  * ice_stop - Disables a network interface
4788  * @netdev: network interface device structure
4789  *
4790  * The stop entry point is called when an interface is de-activated by the OS,
4791  * and the netdevice enters the DOWN state. The hardware is still under the
4792  * driver's control, but the netdev interface is disabled.
4793  *
4794  * Returns success only - not allowed to fail
4795  */
4796 int ice_stop(struct net_device *netdev)
4797 {
4798         struct ice_netdev_priv *np = netdev_priv(netdev);
4799         struct ice_vsi *vsi = np->vsi;
4800 
4801         ice_vsi_close(vsi);
4802 
4803         return 0;
4804 }
4805 
4806 /**
4807  * ice_features_check - Validate encapsulated packet conforms to limits
4808  * @skb: skb buffer
4809  * @netdev: This port's netdev
4810  * @features: Offload features that the stack believes apply
4811  */
4812 static netdev_features_t
4813 ice_features_check(struct sk_buff *skb,
4814                    struct net_device __always_unused *netdev,
4815                    netdev_features_t features)
4816 {
4817         size_t len;
4818 
4819         /* No point in doing any of this if neither checksum nor GSO are
4820          * being requested for this frame. We can rule out both by just
4821          * checking for CHECKSUM_PARTIAL
4822          */
4823         if (skb->ip_summed != CHECKSUM_PARTIAL)
4824                 return features;
4825 
4826         /* We cannot support GSO if the MSS is going to be less than
4827          * 64 bytes. If it is then we need to drop support for GSO.
4828          */
4829         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4830                 features &= ~NETIF_F_GSO_MASK;
4831 
4832         len = skb_network_header(skb) - skb->data;
4833         if (len & ~(ICE_TXD_MACLEN_MAX))
4834                 goto out_rm_features;
4835 
4836         len = skb_transport_header(skb) - skb_network_header(skb);
4837         if (len & ~(ICE_TXD_IPLEN_MAX))
4838                 goto out_rm_features;
4839 
4840         if (skb->encapsulation) {
4841                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4842                 if (len & ~(ICE_TXD_L4LEN_MAX))
4843                         goto out_rm_features;
4844 
4845                 len = skb_inner_transport_header(skb) -
4846                       skb_inner_network_header(skb);
4847                 if (len & ~(ICE_TXD_IPLEN_MAX))
4848                         goto out_rm_features;
4849         }
4850 
4851         return features;
4852 out_rm_features:
4853         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4854 }
4855 
4856 static const struct net_device_ops ice_netdev_safe_mode_ops = {
4857         .ndo_open = ice_open,
4858         .ndo_stop = ice_stop,
4859         .ndo_start_xmit = ice_start_xmit,
4860         .ndo_set_mac_address = ice_set_mac_address,
4861         .ndo_validate_addr = eth_validate_addr,
4862         .ndo_change_mtu = ice_change_mtu,
4863         .ndo_get_stats64 = ice_get_stats64,
4864         .ndo_tx_timeout = ice_tx_timeout,
4865 };
4866 
4867 static const struct net_device_ops ice_netdev_ops = {
4868         .ndo_open = ice_open,
4869         .ndo_stop = ice_stop,
4870         .ndo_start_xmit = ice_start_xmit,
4871         .ndo_features_check = ice_features_check,
4872         .ndo_set_rx_mode = ice_set_rx_mode,
4873         .ndo_set_mac_address = ice_set_mac_address,
4874         .ndo_validate_addr = eth_validate_addr,
4875         .ndo_change_mtu = ice_change_mtu,
4876         .ndo_get_stats64 = ice_get_stats64,
4877         .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
4878         .ndo_set_vf_mac = ice_set_vf_mac,
4879         .ndo_get_vf_config = ice_get_vf_cfg,
4880         .ndo_set_vf_trust = ice_set_vf_trust,
4881         .ndo_set_vf_vlan = ice_set_vf_port_vlan,
4882         .ndo_set_vf_link_state = ice_set_vf_link_state,
4883         .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
4884         .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
4885         .ndo_set_features = ice_set_features,
4886         .ndo_bridge_getlink = ice_bridge_getlink,
4887         .ndo_bridge_setlink = ice_bridge_setlink,
4888         .ndo_fdb_add = ice_fdb_add,
4889         .ndo_fdb_del = ice_fdb_del,
4890         .ndo_tx_timeout = ice_tx_timeout,
4891 };

/* [<][>][^][v][top][bottom][index][help] */