root/drivers/net/ethernet/sfc/siena_sriov.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. abs_index
  2. efx_siena_sriov_cmd
  3. efx_siena_sriov_usrev
  4. efx_siena_sriov_memcpy
  5. efx_siena_sriov_reset_tx_filter
  6. efx_siena_sriov_reset_rx_filter
  7. __efx_siena_sriov_update_vf_addr
  8. __efx_siena_sriov_push_vf_status
  9. efx_siena_sriov_bufs
  10. bad_vf_index
  11. bad_buf_count
  12. map_vi_index
  13. efx_vfdi_init_evq
  14. efx_vfdi_init_rxq
  15. efx_vfdi_init_txq
  16. efx_vfdi_flush_wake
  17. efx_vfdi_flush_clear
  18. efx_vfdi_fini_all_queues
  19. efx_vfdi_insert_filter
  20. efx_vfdi_remove_all_filters
  21. efx_vfdi_set_status_page
  22. efx_vfdi_clear_status_page
  23. efx_siena_sriov_vfdi
  24. efx_siena_sriov_reset_vf
  25. efx_siena_sriov_reset_vf_work
  26. efx_siena_sriov_handle_no_channel
  27. efx_siena_sriov_probe_channel
  28. efx_siena_sriov_get_channel_name
  29. efx_siena_sriov_probe
  30. efx_siena_sriov_peer_work
  31. efx_siena_sriov_free_local
  32. efx_siena_sriov_vf_alloc
  33. efx_siena_sriov_vfs_fini
  34. efx_siena_sriov_vfs_init
  35. efx_siena_sriov_init
  36. efx_siena_sriov_fini
  37. efx_siena_sriov_event
  38. efx_siena_sriov_flr
  39. efx_siena_sriov_mac_address_changed
  40. efx_siena_sriov_tx_flush_done
  41. efx_siena_sriov_rx_flush_done
  42. efx_siena_sriov_desc_fetch_err
  43. efx_siena_sriov_reset
  44. efx_init_sriov
  45. efx_fini_sriov
  46. efx_siena_sriov_set_vf_mac
  47. efx_siena_sriov_set_vf_vlan
  48. efx_siena_sriov_set_vf_spoofchk
  49. efx_siena_sriov_get_vf_config
  50. efx_siena_sriov_wanted
  51. efx_siena_sriov_configure

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /****************************************************************************
   3  * Driver for Solarflare network controllers and boards
   4  * Copyright 2010-2012 Solarflare Communications Inc.
   5  */
   6 #include <linux/pci.h>
   7 #include <linux/module.h>
   8 #include "net_driver.h"
   9 #include "efx.h"
  10 #include "nic.h"
  11 #include "io.h"
  12 #include "mcdi.h"
  13 #include "filter.h"
  14 #include "mcdi_pcol.h"
  15 #include "farch_regs.h"
  16 #include "siena_sriov.h"
  17 #include "vfdi.h"
  18 
  19 /* Number of longs required to track all the VIs in a VF */
  20 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
  21 
  22 /* Maximum number of RX queues supported */
  23 #define VF_MAX_RX_QUEUES 63
  24 
  25 /**
  26  * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
  27  * @VF_TX_FILTER_OFF: Disabled
  28  * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
  29  *      2 TX queues allowed per VF.
  30  * @VF_TX_FILTER_ON: Enabled
  31  */
  32 enum efx_vf_tx_filter_mode {
  33         VF_TX_FILTER_OFF,
  34         VF_TX_FILTER_AUTO,
  35         VF_TX_FILTER_ON,
  36 };
  37 
  38 /**
  39  * struct siena_vf - Back-end resource and protocol state for a PCI VF
  40  * @efx: The Efx NIC owning this VF
  41  * @pci_rid: The PCI requester ID for this VF
  42  * @pci_name: The PCI name (formatted address) of this VF
  43  * @index: Index of VF within its port and PF.
  44  * @req: VFDI incoming request work item. Incoming USR_EV events are received
  45  *      by the NAPI handler, but must be handled by executing MCDI requests
  46  *      inside a work item.
  47  * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
  48  * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
  49  * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
  50  * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
  51  *      @status_lock
  52  * @busy: VFDI request queued to be processed or being processed. Receiving
  53  *      a VFDI request when @busy is set is an error condition.
  54  * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
  55  * @buftbl_base: Buffer table entries for this VF start at this index.
  56  * @rx_filtering: Receive filtering has been requested by the VF driver.
  57  * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
  58  * @rx_filter_qid: VF relative qid for RX filter requested by VF.
  59  * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
  60  * @tx_filter_mode: Transmit MAC filtering mode.
  61  * @tx_filter_id: Transmit MAC filter ID.
  62  * @addr: The MAC address and outer vlan tag of the VF.
  63  * @status_addr: VF DMA address of page for &struct vfdi_status updates.
  64  * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
  65  *      @peer_page_addrs and @peer_page_count from simultaneous
  66  *      updates by the VM and consumption by
  67  *      efx_siena_sriov_update_vf_addr()
  68  * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
  69  * @peer_page_count: Number of entries in @peer_page_count.
  70  * @evq0_addrs: Array of guest pages backing evq0.
  71  * @evq0_count: Number of entries in @evq0_addrs.
  72  * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
  73  *      to wait for flush completions.
  74  * @txq_lock: Mutex for TX queue allocation.
  75  * @txq_mask: Mask of initialized transmit queues.
  76  * @txq_count: Number of initialized transmit queues.
  77  * @rxq_mask: Mask of initialized receive queues.
  78  * @rxq_count: Number of initialized receive queues.
  79  * @rxq_retry_mask: Mask or receive queues that need to be flushed again
  80  *      due to flush failure.
  81  * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
  82  * @reset_work: Work item to schedule a VF reset.
  83  */
  84 struct siena_vf {
  85         struct efx_nic *efx;
  86         unsigned int pci_rid;
  87         char pci_name[13]; /* dddd:bb:dd.f */
  88         unsigned int index;
  89         struct work_struct req;
  90         u64 req_addr;
  91         int req_type;
  92         unsigned req_seqno;
  93         unsigned msg_seqno;
  94         bool busy;
  95         struct efx_buffer buf;
  96         unsigned buftbl_base;
  97         bool rx_filtering;
  98         enum efx_filter_flags rx_filter_flags;
  99         unsigned rx_filter_qid;
 100         int rx_filter_id;
 101         enum efx_vf_tx_filter_mode tx_filter_mode;
 102         int tx_filter_id;
 103         struct vfdi_endpoint addr;
 104         u64 status_addr;
 105         struct mutex status_lock;
 106         u64 *peer_page_addrs;
 107         unsigned peer_page_count;
 108         u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
 109                        EFX_BUF_SIZE];
 110         unsigned evq0_count;
 111         wait_queue_head_t flush_waitq;
 112         struct mutex txq_lock;
 113         unsigned long txq_mask[VI_MASK_LENGTH];
 114         unsigned txq_count;
 115         unsigned long rxq_mask[VI_MASK_LENGTH];
 116         unsigned rxq_count;
 117         unsigned long rxq_retry_mask[VI_MASK_LENGTH];
 118         atomic_t rxq_retry_count;
 119         struct work_struct reset_work;
 120 };
 121 
 122 struct efx_memcpy_req {
 123         unsigned int from_rid;
 124         void *from_buf;
 125         u64 from_addr;
 126         unsigned int to_rid;
 127         u64 to_addr;
 128         unsigned length;
 129 };
 130 
 131 /**
 132  * struct efx_local_addr - A MAC address on the vswitch without a VF.
 133  *
 134  * Siena does not have a switch, so VFs can't transmit data to each
 135  * other. Instead the VFs must be made aware of the local addresses
 136  * on the vswitch, so that they can arrange for an alternative
 137  * software datapath to be used.
 138  *
 139  * @link: List head for insertion into efx->local_addr_list.
 140  * @addr: Ethernet address
 141  */
 142 struct efx_local_addr {
 143         struct list_head link;
 144         u8 addr[ETH_ALEN];
 145 };
 146 
 147 /**
 148  * struct efx_endpoint_page - Page of vfdi_endpoint structures
 149  *
 150  * @link: List head for insertion into efx->local_page_list.
 151  * @ptr: Pointer to page.
 152  * @addr: DMA address of page.
 153  */
 154 struct efx_endpoint_page {
 155         struct list_head link;
 156         void *ptr;
 157         dma_addr_t addr;
 158 };
 159 
 160 /* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
 161 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid)                                  \
 162         ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
 163 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid)                                  \
 164         (EFX_BUFTBL_TXQ_BASE(_vf, _qid) +                               \
 165          (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
 166 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid)                                  \
 167         (EFX_BUFTBL_TXQ_BASE(_vf, _qid) +                               \
 168          (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
 169 
 170 #define EFX_FIELD_MASK(_field)                  \
 171         ((1 << _field ## _WIDTH) - 1)
 172 
 173 /* VFs can only use this many transmit channels */
 174 static unsigned int vf_max_tx_channels = 2;
 175 module_param(vf_max_tx_channels, uint, 0444);
 176 MODULE_PARM_DESC(vf_max_tx_channels,
 177                  "Limit the number of TX channels VFs can use");
 178 
 179 static int max_vfs = -1;
 180 module_param(max_vfs, int, 0444);
 181 MODULE_PARM_DESC(max_vfs,
 182                  "Reduce the number of VFs initialized by the driver");
 183 
 184 /* Workqueue used by VFDI communication.  We can't use the global
 185  * workqueue because it may be running the VF driver's probe()
 186  * routine, which will be blocked there waiting for a VFDI response.
 187  */
 188 static struct workqueue_struct *vfdi_workqueue;
 189 
 190 static unsigned abs_index(struct siena_vf *vf, unsigned index)
 191 {
 192         return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
 193 }
 194 
 195 static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
 196                                unsigned *vi_scale_out, unsigned *vf_total_out)
 197 {
 198         MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
 199         MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
 200         unsigned vi_scale, vf_total;
 201         size_t outlen;
 202         int rc;
 203 
 204         MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
 205         MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
 206         MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
 207 
 208         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
 209                                 outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
 210         if (rc)
 211                 return rc;
 212         if (outlen < MC_CMD_SRIOV_OUT_LEN)
 213                 return -EIO;
 214 
 215         vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
 216         vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
 217         if (vi_scale > EFX_VI_SCALE_MAX)
 218                 return -EOPNOTSUPP;
 219 
 220         if (vi_scale_out)
 221                 *vi_scale_out = vi_scale;
 222         if (vf_total_out)
 223                 *vf_total_out = vf_total;
 224 
 225         return 0;
 226 }
 227 
 228 static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
 229 {
 230         struct siena_nic_data *nic_data = efx->nic_data;
 231         efx_oword_t reg;
 232 
 233         EFX_POPULATE_OWORD_2(reg,
 234                              FRF_CZ_USREV_DIS, enabled ? 0 : 1,
 235                              FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
 236         efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
 237 }
 238 
 239 static int efx_siena_sriov_memcpy(struct efx_nic *efx,
 240                                   struct efx_memcpy_req *req,
 241                                   unsigned int count)
 242 {
 243         MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
 244         MCDI_DECLARE_STRUCT_PTR(record);
 245         unsigned int index, used;
 246         u64 from_addr;
 247         u32 from_rid;
 248         int rc;
 249 
 250         mb();   /* Finish writing source/reading dest before DMA starts */
 251 
 252         if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
 253                 return -ENOBUFS;
 254         used = MC_CMD_MEMCPY_IN_LEN(count);
 255 
 256         for (index = 0; index < count; index++) {
 257                 record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
 258                 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
 259                                count);
 260                 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
 261                                req->to_rid);
 262                 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
 263                                req->to_addr);
 264                 if (req->from_buf == NULL) {
 265                         from_rid = req->from_rid;
 266                         from_addr = req->from_addr;
 267                 } else {
 268                         if (WARN_ON(used + req->length >
 269                                     MCDI_CTL_SDU_LEN_MAX_V1)) {
 270                                 rc = -ENOBUFS;
 271                                 goto out;
 272                         }
 273 
 274                         from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
 275                         from_addr = used;
 276                         memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
 277                                req->length);
 278                         used += req->length;
 279                 }
 280 
 281                 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
 282                 MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
 283                                from_addr);
 284                 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
 285                                req->length);
 286 
 287                 ++req;
 288         }
 289 
 290         rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
 291 out:
 292         mb();   /* Don't write source/read dest before DMA is complete */
 293 
 294         return rc;
 295 }
 296 
 297 /* The TX filter is entirely controlled by this driver, and is modified
 298  * underneath the feet of the VF
 299  */
 300 static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
 301 {
 302         struct efx_nic *efx = vf->efx;
 303         struct efx_filter_spec filter;
 304         u16 vlan;
 305         int rc;
 306 
 307         if (vf->tx_filter_id != -1) {
 308                 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
 309                                           vf->tx_filter_id);
 310                 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
 311                           vf->pci_name, vf->tx_filter_id);
 312                 vf->tx_filter_id = -1;
 313         }
 314 
 315         if (is_zero_ether_addr(vf->addr.mac_addr))
 316                 return;
 317 
 318         /* Turn on TX filtering automatically if not explicitly
 319          * enabled or disabled.
 320          */
 321         if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
 322                 vf->tx_filter_mode = VF_TX_FILTER_ON;
 323 
 324         vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
 325         efx_filter_init_tx(&filter, abs_index(vf, 0));
 326         rc = efx_filter_set_eth_local(&filter,
 327                                       vlan ? vlan : EFX_FILTER_VID_UNSPEC,
 328                                       vf->addr.mac_addr);
 329         BUG_ON(rc);
 330 
 331         rc = efx_filter_insert_filter(efx, &filter, true);
 332         if (rc < 0) {
 333                 netif_warn(efx, hw, efx->net_dev,
 334                            "Unable to migrate tx filter for vf %s\n",
 335                            vf->pci_name);
 336         } else {
 337                 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
 338                           vf->pci_name, rc);
 339                 vf->tx_filter_id = rc;
 340         }
 341 }
 342 
 343 /* The RX filter is managed here on behalf of the VF driver */
 344 static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
 345 {
 346         struct efx_nic *efx = vf->efx;
 347         struct efx_filter_spec filter;
 348         u16 vlan;
 349         int rc;
 350 
 351         if (vf->rx_filter_id != -1) {
 352                 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
 353                                           vf->rx_filter_id);
 354                 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
 355                           vf->pci_name, vf->rx_filter_id);
 356                 vf->rx_filter_id = -1;
 357         }
 358 
 359         if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
 360                 return;
 361 
 362         vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
 363         efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
 364                            vf->rx_filter_flags,
 365                            abs_index(vf, vf->rx_filter_qid));
 366         rc = efx_filter_set_eth_local(&filter,
 367                                       vlan ? vlan : EFX_FILTER_VID_UNSPEC,
 368                                       vf->addr.mac_addr);
 369         BUG_ON(rc);
 370 
 371         rc = efx_filter_insert_filter(efx, &filter, true);
 372         if (rc < 0) {
 373                 netif_warn(efx, hw, efx->net_dev,
 374                            "Unable to insert rx filter for vf %s\n",
 375                            vf->pci_name);
 376         } else {
 377                 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
 378                           vf->pci_name, rc);
 379                 vf->rx_filter_id = rc;
 380         }
 381 }
 382 
 383 static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
 384 {
 385         struct efx_nic *efx = vf->efx;
 386         struct siena_nic_data *nic_data = efx->nic_data;
 387 
 388         efx_siena_sriov_reset_tx_filter(vf);
 389         efx_siena_sriov_reset_rx_filter(vf);
 390         queue_work(vfdi_workqueue, &nic_data->peer_work);
 391 }
 392 
 393 /* Push the peer list to this VF. The caller must hold status_lock to interlock
 394  * with VFDI requests, and they must be serialised against manipulation of
 395  * local_page_list, either by acquiring local_lock or by running from
 396  * efx_siena_sriov_peer_work()
 397  */
 398 static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
 399 {
 400         struct efx_nic *efx = vf->efx;
 401         struct siena_nic_data *nic_data = efx->nic_data;
 402         struct vfdi_status *status = nic_data->vfdi_status.addr;
 403         struct efx_memcpy_req copy[4];
 404         struct efx_endpoint_page *epp;
 405         unsigned int pos, count;
 406         unsigned data_offset;
 407         efx_qword_t event;
 408 
 409         WARN_ON(!mutex_is_locked(&vf->status_lock));
 410         WARN_ON(!vf->status_addr);
 411 
 412         status->local = vf->addr;
 413         status->generation_end = ++status->generation_start;
 414 
 415         memset(copy, '\0', sizeof(copy));
 416         /* Write generation_start */
 417         copy[0].from_buf = &status->generation_start;
 418         copy[0].to_rid = vf->pci_rid;
 419         copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
 420                                                      generation_start);
 421         copy[0].length = sizeof(status->generation_start);
 422         /* DMA the rest of the structure (excluding the generations). This
 423          * assumes that the non-generation portion of vfdi_status is in
 424          * one chunk starting at the version member.
 425          */
 426         data_offset = offsetof(struct vfdi_status, version);
 427         copy[1].from_rid = efx->pci_dev->devfn;
 428         copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
 429         copy[1].to_rid = vf->pci_rid;
 430         copy[1].to_addr = vf->status_addr + data_offset;
 431         copy[1].length =  status->length - data_offset;
 432 
 433         /* Copy the peer pages */
 434         pos = 2;
 435         count = 0;
 436         list_for_each_entry(epp, &nic_data->local_page_list, link) {
 437                 if (count == vf->peer_page_count) {
 438                         /* The VF driver will know they need to provide more
 439                          * pages because peer_addr_count is too large.
 440                          */
 441                         break;
 442                 }
 443                 copy[pos].from_buf = NULL;
 444                 copy[pos].from_rid = efx->pci_dev->devfn;
 445                 copy[pos].from_addr = epp->addr;
 446                 copy[pos].to_rid = vf->pci_rid;
 447                 copy[pos].to_addr = vf->peer_page_addrs[count];
 448                 copy[pos].length = EFX_PAGE_SIZE;
 449 
 450                 if (++pos == ARRAY_SIZE(copy)) {
 451                         efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
 452                         pos = 0;
 453                 }
 454                 ++count;
 455         }
 456 
 457         /* Write generation_end */
 458         copy[pos].from_buf = &status->generation_end;
 459         copy[pos].to_rid = vf->pci_rid;
 460         copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
 461                                                        generation_end);
 462         copy[pos].length = sizeof(status->generation_end);
 463         efx_siena_sriov_memcpy(efx, copy, pos + 1);
 464 
 465         /* Notify the guest */
 466         EFX_POPULATE_QWORD_3(event,
 467                              FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
 468                              VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
 469                              VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
 470         ++vf->msg_seqno;
 471         efx_farch_generate_event(efx,
 472                                  EFX_VI_BASE + vf->index * efx_vf_size(efx),
 473                                  &event);
 474 }
 475 
 476 static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
 477                                  u64 *addr, unsigned count)
 478 {
 479         efx_qword_t buf;
 480         unsigned pos;
 481 
 482         for (pos = 0; pos < count; ++pos) {
 483                 EFX_POPULATE_QWORD_3(buf,
 484                                      FRF_AZ_BUF_ADR_REGION, 0,
 485                                      FRF_AZ_BUF_ADR_FBUF,
 486                                      addr ? addr[pos] >> 12 : 0,
 487                                      FRF_AZ_BUF_OWNER_ID_FBUF, 0);
 488                 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
 489                                 &buf, offset + pos);
 490         }
 491 }
 492 
 493 static bool bad_vf_index(struct efx_nic *efx, unsigned index)
 494 {
 495         return index >= efx_vf_size(efx);
 496 }
 497 
 498 static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
 499 {
 500         unsigned max_buf_count = max_entry_count *
 501                 sizeof(efx_qword_t) / EFX_BUF_SIZE;
 502 
 503         return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
 504 }
 505 
 506 /* Check that VI specified by per-port index belongs to a VF.
 507  * Optionally set VF index and VI index within the VF.
 508  */
 509 static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
 510                          struct siena_vf **vf_out, unsigned *rel_index_out)
 511 {
 512         struct siena_nic_data *nic_data = efx->nic_data;
 513         unsigned vf_i;
 514 
 515         if (abs_index < EFX_VI_BASE)
 516                 return true;
 517         vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
 518         if (vf_i >= efx->vf_init_count)
 519                 return true;
 520 
 521         if (vf_out)
 522                 *vf_out = nic_data->vf + vf_i;
 523         if (rel_index_out)
 524                 *rel_index_out = abs_index % efx_vf_size(efx);
 525         return false;
 526 }
 527 
 528 static int efx_vfdi_init_evq(struct siena_vf *vf)
 529 {
 530         struct efx_nic *efx = vf->efx;
 531         struct vfdi_req *req = vf->buf.addr;
 532         unsigned vf_evq = req->u.init_evq.index;
 533         unsigned buf_count = req->u.init_evq.buf_count;
 534         unsigned abs_evq = abs_index(vf, vf_evq);
 535         unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
 536         efx_oword_t reg;
 537 
 538         if (bad_vf_index(efx, vf_evq) ||
 539             bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
 540                 if (net_ratelimit())
 541                         netif_err(efx, hw, efx->net_dev,
 542                                   "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
 543                                   vf->pci_name, vf_evq, buf_count);
 544                 return VFDI_RC_EINVAL;
 545         }
 546 
 547         efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
 548 
 549         EFX_POPULATE_OWORD_3(reg,
 550                              FRF_CZ_TIMER_Q_EN, 1,
 551                              FRF_CZ_HOST_NOTIFY_MODE, 0,
 552                              FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
 553         efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
 554         EFX_POPULATE_OWORD_3(reg,
 555                              FRF_AZ_EVQ_EN, 1,
 556                              FRF_AZ_EVQ_SIZE, __ffs(buf_count),
 557                              FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
 558         efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
 559 
 560         if (vf_evq == 0) {
 561                 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
 562                        buf_count * sizeof(u64));
 563                 vf->evq0_count = buf_count;
 564         }
 565 
 566         return VFDI_RC_SUCCESS;
 567 }
 568 
 569 static int efx_vfdi_init_rxq(struct siena_vf *vf)
 570 {
 571         struct efx_nic *efx = vf->efx;
 572         struct vfdi_req *req = vf->buf.addr;
 573         unsigned vf_rxq = req->u.init_rxq.index;
 574         unsigned vf_evq = req->u.init_rxq.evq;
 575         unsigned buf_count = req->u.init_rxq.buf_count;
 576         unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
 577         unsigned label;
 578         efx_oword_t reg;
 579 
 580         if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
 581             vf_rxq >= VF_MAX_RX_QUEUES ||
 582             bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
 583                 if (net_ratelimit())
 584                         netif_err(efx, hw, efx->net_dev,
 585                                   "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
 586                                   "buf_count %d\n", vf->pci_name, vf_rxq,
 587                                   vf_evq, buf_count);
 588                 return VFDI_RC_EINVAL;
 589         }
 590         if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
 591                 ++vf->rxq_count;
 592         efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
 593 
 594         label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
 595         EFX_POPULATE_OWORD_6(reg,
 596                              FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
 597                              FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
 598                              FRF_AZ_RX_DESCQ_LABEL, label,
 599                              FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
 600                              FRF_AZ_RX_DESCQ_JUMBO,
 601                              !!(req->u.init_rxq.flags &
 602                                 VFDI_RXQ_FLAG_SCATTER_EN),
 603                              FRF_AZ_RX_DESCQ_EN, 1);
 604         efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
 605                          abs_index(vf, vf_rxq));
 606 
 607         return VFDI_RC_SUCCESS;
 608 }
 609 
 610 static int efx_vfdi_init_txq(struct siena_vf *vf)
 611 {
 612         struct efx_nic *efx = vf->efx;
 613         struct vfdi_req *req = vf->buf.addr;
 614         unsigned vf_txq = req->u.init_txq.index;
 615         unsigned vf_evq = req->u.init_txq.evq;
 616         unsigned buf_count = req->u.init_txq.buf_count;
 617         unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
 618         unsigned label, eth_filt_en;
 619         efx_oword_t reg;
 620 
 621         if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
 622             vf_txq >= vf_max_tx_channels ||
 623             bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
 624                 if (net_ratelimit())
 625                         netif_err(efx, hw, efx->net_dev,
 626                                   "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
 627                                   "buf_count %d\n", vf->pci_name, vf_txq,
 628                                   vf_evq, buf_count);
 629                 return VFDI_RC_EINVAL;
 630         }
 631 
 632         mutex_lock(&vf->txq_lock);
 633         if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
 634                 ++vf->txq_count;
 635         mutex_unlock(&vf->txq_lock);
 636         efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
 637 
 638         eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
 639 
 640         label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
 641         EFX_POPULATE_OWORD_8(reg,
 642                              FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
 643                              FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
 644                              FRF_AZ_TX_DESCQ_EN, 1,
 645                              FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
 646                              FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
 647                              FRF_AZ_TX_DESCQ_LABEL, label,
 648                              FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
 649                              FRF_BZ_TX_NON_IP_DROP_DIS, 1);
 650         efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
 651                          abs_index(vf, vf_txq));
 652 
 653         return VFDI_RC_SUCCESS;
 654 }
 655 
 656 /* Returns true when efx_vfdi_fini_all_queues should wake */
 657 static bool efx_vfdi_flush_wake(struct siena_vf *vf)
 658 {
 659         /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
 660         smp_mb();
 661 
 662         return (!vf->txq_count && !vf->rxq_count) ||
 663                 atomic_read(&vf->rxq_retry_count);
 664 }
 665 
 666 static void efx_vfdi_flush_clear(struct siena_vf *vf)
 667 {
 668         memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
 669         vf->txq_count = 0;
 670         memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
 671         vf->rxq_count = 0;
 672         memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
 673         atomic_set(&vf->rxq_retry_count, 0);
 674 }
 675 
 676 static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
 677 {
 678         struct efx_nic *efx = vf->efx;
 679         efx_oword_t reg;
 680         unsigned count = efx_vf_size(efx);
 681         unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
 682         unsigned timeout = HZ;
 683         unsigned index, rxqs_count;
 684         MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
 685         int rc;
 686 
 687         BUILD_BUG_ON(VF_MAX_RX_QUEUES >
 688                      MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
 689 
 690         rtnl_lock();
 691         siena_prepare_flush(efx);
 692         rtnl_unlock();
 693 
 694         /* Flush all the initialized queues */
 695         rxqs_count = 0;
 696         for (index = 0; index < count; ++index) {
 697                 if (test_bit(index, vf->txq_mask)) {
 698                         EFX_POPULATE_OWORD_2(reg,
 699                                              FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
 700                                              FRF_AZ_TX_FLUSH_DESCQ,
 701                                              vf_offset + index);
 702                         efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
 703                 }
 704                 if (test_bit(index, vf->rxq_mask)) {
 705                         MCDI_SET_ARRAY_DWORD(
 706                                 inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
 707                                 rxqs_count, vf_offset + index);
 708                         rxqs_count++;
 709                 }
 710         }
 711 
 712         atomic_set(&vf->rxq_retry_count, 0);
 713         while (timeout && (vf->rxq_count || vf->txq_count)) {
 714                 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
 715                                   MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
 716                                   NULL, 0, NULL);
 717                 WARN_ON(rc < 0);
 718 
 719                 timeout = wait_event_timeout(vf->flush_waitq,
 720                                              efx_vfdi_flush_wake(vf),
 721                                              timeout);
 722                 rxqs_count = 0;
 723                 for (index = 0; index < count; ++index) {
 724                         if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
 725                                 atomic_dec(&vf->rxq_retry_count);
 726                                 MCDI_SET_ARRAY_DWORD(
 727                                         inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
 728                                         rxqs_count, vf_offset + index);
 729                                 rxqs_count++;
 730                         }
 731                 }
 732         }
 733 
 734         rtnl_lock();
 735         siena_finish_flush(efx);
 736         rtnl_unlock();
 737 
 738         /* Irrespective of success/failure, fini the queues */
 739         EFX_ZERO_OWORD(reg);
 740         for (index = 0; index < count; ++index) {
 741                 efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
 742                                  vf_offset + index);
 743                 efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
 744                                  vf_offset + index);
 745                 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
 746                                  vf_offset + index);
 747                 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
 748                                  vf_offset + index);
 749         }
 750         efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
 751                              EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
 752         efx_vfdi_flush_clear(vf);
 753 
 754         vf->evq0_count = 0;
 755 
 756         return timeout ? 0 : VFDI_RC_ETIMEDOUT;
 757 }
 758 
 759 static int efx_vfdi_insert_filter(struct siena_vf *vf)
 760 {
 761         struct efx_nic *efx = vf->efx;
 762         struct siena_nic_data *nic_data = efx->nic_data;
 763         struct vfdi_req *req = vf->buf.addr;
 764         unsigned vf_rxq = req->u.mac_filter.rxq;
 765         unsigned flags;
 766 
 767         if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
 768                 if (net_ratelimit())
 769                         netif_err(efx, hw, efx->net_dev,
 770                                   "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
 771                                   "flags 0x%x\n", vf->pci_name, vf_rxq,
 772                                   req->u.mac_filter.flags);
 773                 return VFDI_RC_EINVAL;
 774         }
 775 
 776         flags = 0;
 777         if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
 778                 flags |= EFX_FILTER_FLAG_RX_RSS;
 779         if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
 780                 flags |= EFX_FILTER_FLAG_RX_SCATTER;
 781         vf->rx_filter_flags = flags;
 782         vf->rx_filter_qid = vf_rxq;
 783         vf->rx_filtering = true;
 784 
 785         efx_siena_sriov_reset_rx_filter(vf);
 786         queue_work(vfdi_workqueue, &nic_data->peer_work);
 787 
 788         return VFDI_RC_SUCCESS;
 789 }
 790 
 791 static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
 792 {
 793         struct efx_nic *efx = vf->efx;
 794         struct siena_nic_data *nic_data = efx->nic_data;
 795 
 796         vf->rx_filtering = false;
 797         efx_siena_sriov_reset_rx_filter(vf);
 798         queue_work(vfdi_workqueue, &nic_data->peer_work);
 799 
 800         return VFDI_RC_SUCCESS;
 801 }
 802 
 803 static int efx_vfdi_set_status_page(struct siena_vf *vf)
 804 {
 805         struct efx_nic *efx = vf->efx;
 806         struct siena_nic_data *nic_data = efx->nic_data;
 807         struct vfdi_req *req = vf->buf.addr;
 808         u64 page_count = req->u.set_status_page.peer_page_count;
 809         u64 max_page_count =
 810                 (EFX_PAGE_SIZE -
 811                  offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
 812                 / sizeof(req->u.set_status_page.peer_page_addr[0]);
 813 
 814         if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
 815                 if (net_ratelimit())
 816                         netif_err(efx, hw, efx->net_dev,
 817                                   "ERROR: Invalid SET_STATUS_PAGE from %s\n",
 818                                   vf->pci_name);
 819                 return VFDI_RC_EINVAL;
 820         }
 821 
 822         mutex_lock(&nic_data->local_lock);
 823         mutex_lock(&vf->status_lock);
 824         vf->status_addr = req->u.set_status_page.dma_addr;
 825 
 826         kfree(vf->peer_page_addrs);
 827         vf->peer_page_addrs = NULL;
 828         vf->peer_page_count = 0;
 829 
 830         if (page_count) {
 831                 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
 832                                               GFP_KERNEL);
 833                 if (vf->peer_page_addrs) {
 834                         memcpy(vf->peer_page_addrs,
 835                                req->u.set_status_page.peer_page_addr,
 836                                page_count * sizeof(u64));
 837                         vf->peer_page_count = page_count;
 838                 }
 839         }
 840 
 841         __efx_siena_sriov_push_vf_status(vf);
 842         mutex_unlock(&vf->status_lock);
 843         mutex_unlock(&nic_data->local_lock);
 844 
 845         return VFDI_RC_SUCCESS;
 846 }
 847 
 848 static int efx_vfdi_clear_status_page(struct siena_vf *vf)
 849 {
 850         mutex_lock(&vf->status_lock);
 851         vf->status_addr = 0;
 852         mutex_unlock(&vf->status_lock);
 853 
 854         return VFDI_RC_SUCCESS;
 855 }
 856 
 857 typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
 858 
 859 static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
 860         [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
 861         [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
 862         [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
 863         [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
 864         [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
 865         [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
 866         [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
 867         [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
 868 };
 869 
 870 static void efx_siena_sriov_vfdi(struct work_struct *work)
 871 {
 872         struct siena_vf *vf = container_of(work, struct siena_vf, req);
 873         struct efx_nic *efx = vf->efx;
 874         struct vfdi_req *req = vf->buf.addr;
 875         struct efx_memcpy_req copy[2];
 876         int rc;
 877 
 878         /* Copy this page into the local address space */
 879         memset(copy, '\0', sizeof(copy));
 880         copy[0].from_rid = vf->pci_rid;
 881         copy[0].from_addr = vf->req_addr;
 882         copy[0].to_rid = efx->pci_dev->devfn;
 883         copy[0].to_addr = vf->buf.dma_addr;
 884         copy[0].length = EFX_PAGE_SIZE;
 885         rc = efx_siena_sriov_memcpy(efx, copy, 1);
 886         if (rc) {
 887                 /* If we can't get the request, we can't reply to the caller */
 888                 if (net_ratelimit())
 889                         netif_err(efx, hw, efx->net_dev,
 890                                   "ERROR: Unable to fetch VFDI request from %s rc %d\n",
 891                                   vf->pci_name, -rc);
 892                 vf->busy = false;
 893                 return;
 894         }
 895 
 896         if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
 897                 rc = vfdi_ops[req->op](vf);
 898                 if (rc == 0) {
 899                         netif_dbg(efx, hw, efx->net_dev,
 900                                   "vfdi request %d from %s ok\n",
 901                                   req->op, vf->pci_name);
 902                 }
 903         } else {
 904                 netif_dbg(efx, hw, efx->net_dev,
 905                           "ERROR: Unrecognised request %d from VF %s addr "
 906                           "%llx\n", req->op, vf->pci_name,
 907                           (unsigned long long)vf->req_addr);
 908                 rc = VFDI_RC_EOPNOTSUPP;
 909         }
 910 
 911         /* Allow subsequent VF requests */
 912         vf->busy = false;
 913         smp_wmb();
 914 
 915         /* Respond to the request */
 916         req->rc = rc;
 917         req->op = VFDI_OP_RESPONSE;
 918 
 919         memset(copy, '\0', sizeof(copy));
 920         copy[0].from_buf = &req->rc;
 921         copy[0].to_rid = vf->pci_rid;
 922         copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
 923         copy[0].length = sizeof(req->rc);
 924         copy[1].from_buf = &req->op;
 925         copy[1].to_rid = vf->pci_rid;
 926         copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
 927         copy[1].length = sizeof(req->op);
 928 
 929         (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
 930 }
 931 
 932 
 933 
 934 /* After a reset the event queues inside the guests no longer exist. Fill the
 935  * event ring in guest memory with VFDI reset events, then (re-initialise) the
 936  * event queue to raise an interrupt. The guest driver will then recover.
 937  */
 938 
 939 static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
 940                                      struct efx_buffer *buffer)
 941 {
 942         struct efx_nic *efx = vf->efx;
 943         struct efx_memcpy_req copy_req[4];
 944         efx_qword_t event;
 945         unsigned int pos, count, k, buftbl, abs_evq;
 946         efx_oword_t reg;
 947         efx_dword_t ptr;
 948         int rc;
 949 
 950         BUG_ON(buffer->len != EFX_PAGE_SIZE);
 951 
 952         if (!vf->evq0_count)
 953                 return;
 954         BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
 955 
 956         mutex_lock(&vf->status_lock);
 957         EFX_POPULATE_QWORD_3(event,
 958                              FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
 959                              VFDI_EV_SEQ, vf->msg_seqno,
 960                              VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
 961         vf->msg_seqno++;
 962         for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
 963                 memcpy(buffer->addr + pos, &event, sizeof(event));
 964 
 965         for (pos = 0; pos < vf->evq0_count; pos += count) {
 966                 count = min_t(unsigned, vf->evq0_count - pos,
 967                               ARRAY_SIZE(copy_req));
 968                 for (k = 0; k < count; k++) {
 969                         copy_req[k].from_buf = NULL;
 970                         copy_req[k].from_rid = efx->pci_dev->devfn;
 971                         copy_req[k].from_addr = buffer->dma_addr;
 972                         copy_req[k].to_rid = vf->pci_rid;
 973                         copy_req[k].to_addr = vf->evq0_addrs[pos + k];
 974                         copy_req[k].length = EFX_PAGE_SIZE;
 975                 }
 976                 rc = efx_siena_sriov_memcpy(efx, copy_req, count);
 977                 if (rc) {
 978                         if (net_ratelimit())
 979                                 netif_err(efx, hw, efx->net_dev,
 980                                           "ERROR: Unable to notify %s of reset"
 981                                           ": %d\n", vf->pci_name, -rc);
 982                         break;
 983                 }
 984         }
 985 
 986         /* Reinitialise, arm and trigger evq0 */
 987         abs_evq = abs_index(vf, 0);
 988         buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
 989         efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
 990 
 991         EFX_POPULATE_OWORD_3(reg,
 992                              FRF_CZ_TIMER_Q_EN, 1,
 993                              FRF_CZ_HOST_NOTIFY_MODE, 0,
 994                              FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
 995         efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
 996         EFX_POPULATE_OWORD_3(reg,
 997                              FRF_AZ_EVQ_EN, 1,
 998                              FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
 999                              FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
1000         efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
1001         EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
1002         efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
1003 
1004         mutex_unlock(&vf->status_lock);
1005 }
1006 
1007 static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
1008 {
1009         struct siena_vf *vf = container_of(work, struct siena_vf, req);
1010         struct efx_nic *efx = vf->efx;
1011         struct efx_buffer buf;
1012 
1013         if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
1014                 efx_siena_sriov_reset_vf(vf, &buf);
1015                 efx_nic_free_buffer(efx, &buf);
1016         }
1017 }
1018 
1019 static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
1020 {
1021         netif_err(efx, drv, efx->net_dev,
1022                   "ERROR: IOV requires MSI-X and 1 additional interrupt"
1023                   "vector. IOV disabled\n");
1024         efx->vf_count = 0;
1025 }
1026 
1027 static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
1028 {
1029         struct siena_nic_data *nic_data = channel->efx->nic_data;
1030         nic_data->vfdi_channel = channel;
1031 
1032         return 0;
1033 }
1034 
1035 static void
1036 efx_siena_sriov_get_channel_name(struct efx_channel *channel,
1037                                  char *buf, size_t len)
1038 {
1039         snprintf(buf, len, "%s-iov", channel->efx->name);
1040 }
1041 
1042 static const struct efx_channel_type efx_siena_sriov_channel_type = {
1043         .handle_no_channel      = efx_siena_sriov_handle_no_channel,
1044         .pre_probe              = efx_siena_sriov_probe_channel,
1045         .post_remove            = efx_channel_dummy_op_void,
1046         .get_name               = efx_siena_sriov_get_channel_name,
1047         /* no copy operation; channel must not be reallocated */
1048         .keep_eventq            = true,
1049 };
1050 
1051 void efx_siena_sriov_probe(struct efx_nic *efx)
1052 {
1053         unsigned count;
1054 
1055         if (!max_vfs)
1056                 return;
1057 
1058         if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
1059                 netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
1060                 return;
1061         }
1062         if (count > 0 && count > max_vfs)
1063                 count = max_vfs;
1064 
1065         /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1066         efx->vf_count = count;
1067 
1068         efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
1069 }
1070 
1071 /* Copy the list of individual addresses into the vfdi_status.peers
1072  * array and auxiliary pages, protected by %local_lock. Drop that lock
1073  * and then broadcast the address list to every VF.
1074  */
1075 static void efx_siena_sriov_peer_work(struct work_struct *data)
1076 {
1077         struct siena_nic_data *nic_data = container_of(data,
1078                                                        struct siena_nic_data,
1079                                                        peer_work);
1080         struct efx_nic *efx = nic_data->efx;
1081         struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
1082         struct siena_vf *vf;
1083         struct efx_local_addr *local_addr;
1084         struct vfdi_endpoint *peer;
1085         struct efx_endpoint_page *epp;
1086         struct list_head pages;
1087         unsigned int peer_space;
1088         unsigned int peer_count;
1089         unsigned int pos;
1090 
1091         mutex_lock(&nic_data->local_lock);
1092 
1093         /* Move the existing peer pages off %local_page_list */
1094         INIT_LIST_HEAD(&pages);
1095         list_splice_tail_init(&nic_data->local_page_list, &pages);
1096 
1097         /* Populate the VF addresses starting from entry 1 (entry 0 is
1098          * the PF address)
1099          */
1100         peer = vfdi_status->peers + 1;
1101         peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1102         peer_count = 1;
1103         for (pos = 0; pos < efx->vf_count; ++pos) {
1104                 vf = nic_data->vf + pos;
1105 
1106                 mutex_lock(&vf->status_lock);
1107                 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1108                         *peer++ = vf->addr;
1109                         ++peer_count;
1110                         --peer_space;
1111                         BUG_ON(peer_space == 0);
1112                 }
1113                 mutex_unlock(&vf->status_lock);
1114         }
1115 
1116         /* Fill the remaining addresses */
1117         list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
1118                 ether_addr_copy(peer->mac_addr, local_addr->addr);
1119                 peer->tci = 0;
1120                 ++peer;
1121                 ++peer_count;
1122                 if (--peer_space == 0) {
1123                         if (list_empty(&pages)) {
1124                                 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1125                                 if (!epp)
1126                                         break;
1127                                 epp->ptr = dma_alloc_coherent(
1128                                         &efx->pci_dev->dev, EFX_PAGE_SIZE,
1129                                         &epp->addr, GFP_KERNEL);
1130                                 if (!epp->ptr) {
1131                                         kfree(epp);
1132                                         break;
1133                                 }
1134                         } else {
1135                                 epp = list_first_entry(
1136                                         &pages, struct efx_endpoint_page, link);
1137                                 list_del(&epp->link);
1138                         }
1139 
1140                         list_add_tail(&epp->link, &nic_data->local_page_list);
1141                         peer = (struct vfdi_endpoint *)epp->ptr;
1142                         peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1143                 }
1144         }
1145         vfdi_status->peer_count = peer_count;
1146         mutex_unlock(&nic_data->local_lock);
1147 
1148         /* Free any now unused endpoint pages */
1149         while (!list_empty(&pages)) {
1150                 epp = list_first_entry(
1151                         &pages, struct efx_endpoint_page, link);
1152                 list_del(&epp->link);
1153                 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1154                                   epp->ptr, epp->addr);
1155                 kfree(epp);
1156         }
1157 
1158         /* Finally, push the pages */
1159         for (pos = 0; pos < efx->vf_count; ++pos) {
1160                 vf = nic_data->vf + pos;
1161 
1162                 mutex_lock(&vf->status_lock);
1163                 if (vf->status_addr)
1164                         __efx_siena_sriov_push_vf_status(vf);
1165                 mutex_unlock(&vf->status_lock);
1166         }
1167 }
1168 
1169 static void efx_siena_sriov_free_local(struct efx_nic *efx)
1170 {
1171         struct siena_nic_data *nic_data = efx->nic_data;
1172         struct efx_local_addr *local_addr;
1173         struct efx_endpoint_page *epp;
1174 
1175         while (!list_empty(&nic_data->local_addr_list)) {
1176                 local_addr = list_first_entry(&nic_data->local_addr_list,
1177                                               struct efx_local_addr, link);
1178                 list_del(&local_addr->link);
1179                 kfree(local_addr);
1180         }
1181 
1182         while (!list_empty(&nic_data->local_page_list)) {
1183                 epp = list_first_entry(&nic_data->local_page_list,
1184                                        struct efx_endpoint_page, link);
1185                 list_del(&epp->link);
1186                 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1187                                   epp->ptr, epp->addr);
1188                 kfree(epp);
1189         }
1190 }
1191 
1192 static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
1193 {
1194         unsigned index;
1195         struct siena_vf *vf;
1196         struct siena_nic_data *nic_data = efx->nic_data;
1197 
1198         nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
1199                                GFP_KERNEL);
1200         if (!nic_data->vf)
1201                 return -ENOMEM;
1202 
1203         for (index = 0; index < efx->vf_count; ++index) {
1204                 vf = nic_data->vf + index;
1205 
1206                 vf->efx = efx;
1207                 vf->index = index;
1208                 vf->rx_filter_id = -1;
1209                 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1210                 vf->tx_filter_id = -1;
1211                 INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
1212                 INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
1213                 init_waitqueue_head(&vf->flush_waitq);
1214                 mutex_init(&vf->status_lock);
1215                 mutex_init(&vf->txq_lock);
1216         }
1217 
1218         return 0;
1219 }
1220 
1221 static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
1222 {
1223         struct siena_nic_data *nic_data = efx->nic_data;
1224         struct siena_vf *vf;
1225         unsigned int pos;
1226 
1227         for (pos = 0; pos < efx->vf_count; ++pos) {
1228                 vf = nic_data->vf + pos;
1229 
1230                 efx_nic_free_buffer(efx, &vf->buf);
1231                 kfree(vf->peer_page_addrs);
1232                 vf->peer_page_addrs = NULL;
1233                 vf->peer_page_count = 0;
1234 
1235                 vf->evq0_count = 0;
1236         }
1237 }
1238 
1239 static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
1240 {
1241         struct pci_dev *pci_dev = efx->pci_dev;
1242         struct siena_nic_data *nic_data = efx->nic_data;
1243         unsigned index, devfn, sriov, buftbl_base;
1244         u16 offset, stride;
1245         struct siena_vf *vf;
1246         int rc;
1247 
1248         sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1249         if (!sriov)
1250                 return -ENOENT;
1251 
1252         pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1253         pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1254 
1255         buftbl_base = nic_data->vf_buftbl_base;
1256         devfn = pci_dev->devfn + offset;
1257         for (index = 0; index < efx->vf_count; ++index) {
1258                 vf = nic_data->vf + index;
1259 
1260                 /* Reserve buffer entries */
1261                 vf->buftbl_base = buftbl_base;
1262                 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1263 
1264                 vf->pci_rid = devfn;
1265                 snprintf(vf->pci_name, sizeof(vf->pci_name),
1266                          "%04x:%02x:%02x.%d",
1267                          pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1268                          PCI_SLOT(devfn), PCI_FUNC(devfn));
1269 
1270                 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
1271                                           GFP_KERNEL);
1272                 if (rc)
1273                         goto fail;
1274 
1275                 devfn += stride;
1276         }
1277 
1278         return 0;
1279 
1280 fail:
1281         efx_siena_sriov_vfs_fini(efx);
1282         return rc;
1283 }
1284 
1285 int efx_siena_sriov_init(struct efx_nic *efx)
1286 {
1287         struct net_device *net_dev = efx->net_dev;
1288         struct siena_nic_data *nic_data = efx->nic_data;
1289         struct vfdi_status *vfdi_status;
1290         int rc;
1291 
1292         /* Ensure there's room for vf_channel */
1293         BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1294         /* Ensure that VI_BASE is aligned on VI_SCALE */
1295         BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1296 
1297         if (efx->vf_count == 0)
1298                 return 0;
1299 
1300         rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
1301         if (rc)
1302                 goto fail_cmd;
1303 
1304         rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
1305                                   sizeof(*vfdi_status), GFP_KERNEL);
1306         if (rc)
1307                 goto fail_status;
1308         vfdi_status = nic_data->vfdi_status.addr;
1309         memset(vfdi_status, 0, sizeof(*vfdi_status));
1310         vfdi_status->version = 1;
1311         vfdi_status->length = sizeof(*vfdi_status);
1312         vfdi_status->max_tx_channels = vf_max_tx_channels;
1313         vfdi_status->vi_scale = efx->vi_scale;
1314         vfdi_status->rss_rxq_count = efx->rss_spread;
1315         vfdi_status->peer_count = 1 + efx->vf_count;
1316         vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1317 
1318         rc = efx_siena_sriov_vf_alloc(efx);
1319         if (rc)
1320                 goto fail_alloc;
1321 
1322         mutex_init(&nic_data->local_lock);
1323         INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
1324         INIT_LIST_HEAD(&nic_data->local_addr_list);
1325         INIT_LIST_HEAD(&nic_data->local_page_list);
1326 
1327         rc = efx_siena_sriov_vfs_init(efx);
1328         if (rc)
1329                 goto fail_vfs;
1330 
1331         rtnl_lock();
1332         ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
1333         efx->vf_init_count = efx->vf_count;
1334         rtnl_unlock();
1335 
1336         efx_siena_sriov_usrev(efx, true);
1337 
1338         /* At this point we must be ready to accept VFDI requests */
1339 
1340         rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1341         if (rc)
1342                 goto fail_pci;
1343 
1344         netif_info(efx, probe, net_dev,
1345                    "enabled SR-IOV for %d VFs, %d VI per VF\n",
1346                    efx->vf_count, efx_vf_size(efx));
1347         return 0;
1348 
1349 fail_pci:
1350         efx_siena_sriov_usrev(efx, false);
1351         rtnl_lock();
1352         efx->vf_init_count = 0;
1353         rtnl_unlock();
1354         efx_siena_sriov_vfs_fini(efx);
1355 fail_vfs:
1356         cancel_work_sync(&nic_data->peer_work);
1357         efx_siena_sriov_free_local(efx);
1358         kfree(nic_data->vf);
1359 fail_alloc:
1360         efx_nic_free_buffer(efx, &nic_data->vfdi_status);
1361 fail_status:
1362         efx_siena_sriov_cmd(efx, false, NULL, NULL);
1363 fail_cmd:
1364         return rc;
1365 }
1366 
1367 void efx_siena_sriov_fini(struct efx_nic *efx)
1368 {
1369         struct siena_vf *vf;
1370         unsigned int pos;
1371         struct siena_nic_data *nic_data = efx->nic_data;
1372 
1373         if (efx->vf_init_count == 0)
1374                 return;
1375 
1376         /* Disable all interfaces to reconfiguration */
1377         BUG_ON(nic_data->vfdi_channel->enabled);
1378         efx_siena_sriov_usrev(efx, false);
1379         rtnl_lock();
1380         efx->vf_init_count = 0;
1381         rtnl_unlock();
1382 
1383         /* Flush all reconfiguration work */
1384         for (pos = 0; pos < efx->vf_count; ++pos) {
1385                 vf = nic_data->vf + pos;
1386                 cancel_work_sync(&vf->req);
1387                 cancel_work_sync(&vf->reset_work);
1388         }
1389         cancel_work_sync(&nic_data->peer_work);
1390 
1391         pci_disable_sriov(efx->pci_dev);
1392 
1393         /* Tear down back-end state */
1394         efx_siena_sriov_vfs_fini(efx);
1395         efx_siena_sriov_free_local(efx);
1396         kfree(nic_data->vf);
1397         efx_nic_free_buffer(efx, &nic_data->vfdi_status);
1398         efx_siena_sriov_cmd(efx, false, NULL, NULL);
1399 }
1400 
1401 void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1402 {
1403         struct efx_nic *efx = channel->efx;
1404         struct siena_vf *vf;
1405         unsigned qid, seq, type, data;
1406 
1407         qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1408 
1409         /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1410         BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1411         seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1412         type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1413         data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1414 
1415         netif_vdbg(efx, hw, efx->net_dev,
1416                    "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1417                    qid, seq, type, data);
1418 
1419         if (map_vi_index(efx, qid, &vf, NULL))
1420                 return;
1421         if (vf->busy)
1422                 goto error;
1423 
1424         if (type == VFDI_EV_TYPE_REQ_WORD0) {
1425                 /* Resynchronise */
1426                 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1427                 vf->req_seqno = seq + 1;
1428                 vf->req_addr = 0;
1429         } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1430                 goto error;
1431 
1432         switch (vf->req_type) {
1433         case VFDI_EV_TYPE_REQ_WORD0:
1434         case VFDI_EV_TYPE_REQ_WORD1:
1435         case VFDI_EV_TYPE_REQ_WORD2:
1436                 vf->req_addr |= (u64)data << (vf->req_type << 4);
1437                 ++vf->req_type;
1438                 return;
1439 
1440         case VFDI_EV_TYPE_REQ_WORD3:
1441                 vf->req_addr |= (u64)data << 48;
1442                 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1443                 vf->busy = true;
1444                 queue_work(vfdi_workqueue, &vf->req);
1445                 return;
1446         }
1447 
1448 error:
1449         if (net_ratelimit())
1450                 netif_err(efx, hw, efx->net_dev,
1451                           "ERROR: Screaming VFDI request from %s\n",
1452                           vf->pci_name);
1453         /* Reset the request and sequence number */
1454         vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1455         vf->req_seqno = seq + 1;
1456 }
1457 
1458 void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1459 {
1460         struct siena_nic_data *nic_data = efx->nic_data;
1461         struct siena_vf *vf;
1462 
1463         if (vf_i > efx->vf_init_count)
1464                 return;
1465         vf = nic_data->vf + vf_i;
1466         netif_info(efx, hw, efx->net_dev,
1467                    "FLR on VF %s\n", vf->pci_name);
1468 
1469         vf->status_addr = 0;
1470         efx_vfdi_remove_all_filters(vf);
1471         efx_vfdi_flush_clear(vf);
1472 
1473         vf->evq0_count = 0;
1474 }
1475 
1476 int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
1477 {
1478         struct siena_nic_data *nic_data = efx->nic_data;
1479         struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
1480 
1481         if (!efx->vf_init_count)
1482                 return 0;
1483         ether_addr_copy(vfdi_status->peers[0].mac_addr,
1484                         efx->net_dev->dev_addr);
1485         queue_work(vfdi_workqueue, &nic_data->peer_work);
1486 
1487         return 0;
1488 }
1489 
1490 void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1491 {
1492         struct siena_vf *vf;
1493         unsigned queue, qid;
1494 
1495         queue = EFX_QWORD_FIELD(*event,  FSF_AZ_DRIVER_EV_SUBDATA);
1496         if (map_vi_index(efx, queue, &vf, &qid))
1497                 return;
1498         /* Ignore flush completions triggered by an FLR */
1499         if (!test_bit(qid, vf->txq_mask))
1500                 return;
1501 
1502         __clear_bit(qid, vf->txq_mask);
1503         --vf->txq_count;
1504 
1505         if (efx_vfdi_flush_wake(vf))
1506                 wake_up(&vf->flush_waitq);
1507 }
1508 
1509 void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1510 {
1511         struct siena_vf *vf;
1512         unsigned ev_failed, queue, qid;
1513 
1514         queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1515         ev_failed = EFX_QWORD_FIELD(*event,
1516                                     FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1517         if (map_vi_index(efx, queue, &vf, &qid))
1518                 return;
1519         if (!test_bit(qid, vf->rxq_mask))
1520                 return;
1521 
1522         if (ev_failed) {
1523                 set_bit(qid, vf->rxq_retry_mask);
1524                 atomic_inc(&vf->rxq_retry_count);
1525         } else {
1526                 __clear_bit(qid, vf->rxq_mask);
1527                 --vf->rxq_count;
1528         }
1529         if (efx_vfdi_flush_wake(vf))
1530                 wake_up(&vf->flush_waitq);
1531 }
1532 
1533 /* Called from napi. Schedule the reset work item */
1534 void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1535 {
1536         struct siena_vf *vf;
1537         unsigned int rel;
1538 
1539         if (map_vi_index(efx, dmaq, &vf, &rel))
1540                 return;
1541 
1542         if (net_ratelimit())
1543                 netif_err(efx, hw, efx->net_dev,
1544                           "VF %d DMA Q %d reports descriptor fetch error.\n",
1545                           vf->index, rel);
1546         queue_work(vfdi_workqueue, &vf->reset_work);
1547 }
1548 
1549 /* Reset all VFs */
1550 void efx_siena_sriov_reset(struct efx_nic *efx)
1551 {
1552         struct siena_nic_data *nic_data = efx->nic_data;
1553         unsigned int vf_i;
1554         struct efx_buffer buf;
1555         struct siena_vf *vf;
1556 
1557         ASSERT_RTNL();
1558 
1559         if (efx->vf_init_count == 0)
1560                 return;
1561 
1562         efx_siena_sriov_usrev(efx, true);
1563         (void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
1564 
1565         if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
1566                 return;
1567 
1568         for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1569                 vf = nic_data->vf + vf_i;
1570                 efx_siena_sriov_reset_vf(vf, &buf);
1571         }
1572 
1573         efx_nic_free_buffer(efx, &buf);
1574 }
1575 
1576 int efx_init_sriov(void)
1577 {
1578         /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and
1579          * efx_siena_sriov_peer_work() spend almost all their time sleeping for
1580          * MCDI to complete anyway
1581          */
1582         vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1583         if (!vfdi_workqueue)
1584                 return -ENOMEM;
1585         return 0;
1586 }
1587 
1588 void efx_fini_sriov(void)
1589 {
1590         destroy_workqueue(vfdi_workqueue);
1591 }
1592 
1593 int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
1594 {
1595         struct siena_nic_data *nic_data = efx->nic_data;
1596         struct siena_vf *vf;
1597 
1598         if (vf_i >= efx->vf_init_count)
1599                 return -EINVAL;
1600         vf = nic_data->vf + vf_i;
1601 
1602         mutex_lock(&vf->status_lock);
1603         ether_addr_copy(vf->addr.mac_addr, mac);
1604         __efx_siena_sriov_update_vf_addr(vf);
1605         mutex_unlock(&vf->status_lock);
1606 
1607         return 0;
1608 }
1609 
1610 int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
1611                                 u16 vlan, u8 qos)
1612 {
1613         struct siena_nic_data *nic_data = efx->nic_data;
1614         struct siena_vf *vf;
1615         u16 tci;
1616 
1617         if (vf_i >= efx->vf_init_count)
1618                 return -EINVAL;
1619         vf = nic_data->vf + vf_i;
1620 
1621         mutex_lock(&vf->status_lock);
1622         tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1623         vf->addr.tci = htons(tci);
1624         __efx_siena_sriov_update_vf_addr(vf);
1625         mutex_unlock(&vf->status_lock);
1626 
1627         return 0;
1628 }
1629 
1630 int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
1631                                     bool spoofchk)
1632 {
1633         struct siena_nic_data *nic_data = efx->nic_data;
1634         struct siena_vf *vf;
1635         int rc;
1636 
1637         if (vf_i >= efx->vf_init_count)
1638                 return -EINVAL;
1639         vf = nic_data->vf + vf_i;
1640 
1641         mutex_lock(&vf->txq_lock);
1642         if (vf->txq_count == 0) {
1643                 vf->tx_filter_mode =
1644                         spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1645                 rc = 0;
1646         } else {
1647                 /* This cannot be changed while TX queues are running */
1648                 rc = -EBUSY;
1649         }
1650         mutex_unlock(&vf->txq_lock);
1651         return rc;
1652 }
1653 
1654 int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
1655                                   struct ifla_vf_info *ivi)
1656 {
1657         struct siena_nic_data *nic_data = efx->nic_data;
1658         struct siena_vf *vf;
1659         u16 tci;
1660 
1661         if (vf_i >= efx->vf_init_count)
1662                 return -EINVAL;
1663         vf = nic_data->vf + vf_i;
1664 
1665         ivi->vf = vf_i;
1666         ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1667         ivi->max_tx_rate = 0;
1668         ivi->min_tx_rate = 0;
1669         tci = ntohs(vf->addr.tci);
1670         ivi->vlan = tci & VLAN_VID_MASK;
1671         ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1672         ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
1673 
1674         return 0;
1675 }
1676 
1677 bool efx_siena_sriov_wanted(struct efx_nic *efx)
1678 {
1679         return efx->vf_count != 0;
1680 }
1681 
1682 int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
1683 {
1684         return 0;
1685 }

/* [<][>][^][v][top][bottom][index][help] */