root/drivers/net/vmxnet3/vmxnet3_ethtool.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmxnet3_get_stats64
  2. vmxnet3_get_sset_count
  3. vmxnet3_get_regs_len
  4. vmxnet3_get_drvinfo
  5. vmxnet3_get_strings
  6. vmxnet3_fix_features
  7. vmxnet3_set_features
  8. vmxnet3_get_ethtool_stats
  9. vmxnet3_get_regs
  10. vmxnet3_get_wol
  11. vmxnet3_set_wol
  12. vmxnet3_get_link_ksettings
  13. vmxnet3_get_ringparam
  14. vmxnet3_set_ringparam
  15. vmxnet3_get_rxnfc
  16. vmxnet3_get_rss_indir_size
  17. vmxnet3_get_rss
  18. vmxnet3_set_rss
  19. vmxnet3_get_coalesce
  20. vmxnet3_set_coalesce
  21. vmxnet3_set_ethtool_ops

   1 /*
   2  * Linux driver for VMware's vmxnet3 ethernet NIC.
   3  *
   4  * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
   5  *
   6  * This program is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License as published by the
   8  * Free Software Foundation; version 2 of the License and no later version.
   9  *
  10  * This program is distributed in the hope that it will be useful, but
  11  * WITHOUT ANY WARRANTY; without even the implied warranty of
  12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13  * NON INFRINGEMENT.  See the GNU General Public License for more
  14  * details.
  15  *
  16  * You should have received a copy of the GNU General Public License
  17  * along with this program; if not, write to the Free Software
  18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * The full GNU General Public License is included in this distribution in
  21  * the file called "COPYING".
  22  *
  23  * Maintained by: pv-drivers@vmware.com
  24  *
  25  */
  26 
  27 
  28 #include "vmxnet3_int.h"
  29 
  30 struct vmxnet3_stat_desc {
  31         char desc[ETH_GSTRING_LEN];
  32         int  offset;
  33 };
  34 
  35 
  36 /* per tq stats maintained by the device */
  37 static const struct vmxnet3_stat_desc
  38 vmxnet3_tq_dev_stats[] = {
  39         /* description,         offset */
  40         { "Tx Queue#",        0 },
  41         { "  TSO pkts tx",      offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
  42         { "  TSO bytes tx",     offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
  43         { "  ucast pkts tx",    offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
  44         { "  ucast bytes tx",   offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
  45         { "  mcast pkts tx",    offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
  46         { "  mcast bytes tx",   offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
  47         { "  bcast pkts tx",    offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
  48         { "  bcast bytes tx",   offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
  49         { "  pkts tx err",      offsetof(struct UPT1_TxStats, pktsTxError) },
  50         { "  pkts tx discard",  offsetof(struct UPT1_TxStats, pktsTxDiscard) },
  51 };
  52 
  53 /* per tq stats maintained by the driver */
  54 static const struct vmxnet3_stat_desc
  55 vmxnet3_tq_driver_stats[] = {
  56         /* description,         offset */
  57         {"  drv dropped tx total",      offsetof(struct vmxnet3_tq_driver_stats,
  58                                                  drop_total) },
  59         { "     too many frags", offsetof(struct vmxnet3_tq_driver_stats,
  60                                           drop_too_many_frags) },
  61         { "     giant hdr",     offsetof(struct vmxnet3_tq_driver_stats,
  62                                          drop_oversized_hdr) },
  63         { "     hdr err",       offsetof(struct vmxnet3_tq_driver_stats,
  64                                          drop_hdr_inspect_err) },
  65         { "     tso",           offsetof(struct vmxnet3_tq_driver_stats,
  66                                          drop_tso) },
  67         { "  ring full",        offsetof(struct vmxnet3_tq_driver_stats,
  68                                          tx_ring_full) },
  69         { "  pkts linearized",  offsetof(struct vmxnet3_tq_driver_stats,
  70                                          linearized) },
  71         { "  hdr cloned",       offsetof(struct vmxnet3_tq_driver_stats,
  72                                          copy_skb_header) },
  73         { "  giant hdr",        offsetof(struct vmxnet3_tq_driver_stats,
  74                                          oversized_hdr) },
  75 };
  76 
  77 /* per rq stats maintained by the device */
  78 static const struct vmxnet3_stat_desc
  79 vmxnet3_rq_dev_stats[] = {
  80         { "Rx Queue#",        0 },
  81         { "  LRO pkts rx",      offsetof(struct UPT1_RxStats, LROPktsRxOK) },
  82         { "  LRO byte rx",      offsetof(struct UPT1_RxStats, LROBytesRxOK) },
  83         { "  ucast pkts rx",    offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
  84         { "  ucast bytes rx",   offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
  85         { "  mcast pkts rx",    offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
  86         { "  mcast bytes rx",   offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
  87         { "  bcast pkts rx",    offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
  88         { "  bcast bytes rx",   offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
  89         { "  pkts rx OOB",      offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
  90         { "  pkts rx err",      offsetof(struct UPT1_RxStats, pktsRxError) },
  91 };
  92 
  93 /* per rq stats maintained by the driver */
  94 static const struct vmxnet3_stat_desc
  95 vmxnet3_rq_driver_stats[] = {
  96         /* description,         offset */
  97         { "  drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
  98                                              drop_total) },
  99         { "     err",           offsetof(struct vmxnet3_rq_driver_stats,
 100                                          drop_err) },
 101         { "     fcs",           offsetof(struct vmxnet3_rq_driver_stats,
 102                                          drop_fcs) },
 103         { "  rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
 104                                           rx_buf_alloc_failure) },
 105 };
 106 
 107 /* global stats maintained by the driver */
 108 static const struct vmxnet3_stat_desc
 109 vmxnet3_global_stats[] = {
 110         /* description,         offset */
 111         { "tx timeout count",   offsetof(struct vmxnet3_adapter,
 112                                          tx_timeout_count) }
 113 };
 114 
 115 
 116 void
 117 vmxnet3_get_stats64(struct net_device *netdev,
 118                    struct rtnl_link_stats64 *stats)
 119 {
 120         struct vmxnet3_adapter *adapter;
 121         struct vmxnet3_tq_driver_stats *drvTxStats;
 122         struct vmxnet3_rq_driver_stats *drvRxStats;
 123         struct UPT1_TxStats *devTxStats;
 124         struct UPT1_RxStats *devRxStats;
 125         unsigned long flags;
 126         int i;
 127 
 128         adapter = netdev_priv(netdev);
 129 
 130         /* Collect the dev stats into the shared area */
 131         spin_lock_irqsave(&adapter->cmd_lock, flags);
 132         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 133         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 134 
 135         for (i = 0; i < adapter->num_tx_queues; i++) {
 136                 devTxStats = &adapter->tqd_start[i].stats;
 137                 drvTxStats = &adapter->tx_queue[i].stats;
 138                 stats->tx_packets += devTxStats->ucastPktsTxOK +
 139                                      devTxStats->mcastPktsTxOK +
 140                                      devTxStats->bcastPktsTxOK;
 141                 stats->tx_bytes += devTxStats->ucastBytesTxOK +
 142                                    devTxStats->mcastBytesTxOK +
 143                                    devTxStats->bcastBytesTxOK;
 144                 stats->tx_errors += devTxStats->pktsTxError;
 145                 stats->tx_dropped += drvTxStats->drop_total;
 146         }
 147 
 148         for (i = 0; i < adapter->num_rx_queues; i++) {
 149                 devRxStats = &adapter->rqd_start[i].stats;
 150                 drvRxStats = &adapter->rx_queue[i].stats;
 151                 stats->rx_packets += devRxStats->ucastPktsRxOK +
 152                                      devRxStats->mcastPktsRxOK +
 153                                      devRxStats->bcastPktsRxOK;
 154 
 155                 stats->rx_bytes += devRxStats->ucastBytesRxOK +
 156                                    devRxStats->mcastBytesRxOK +
 157                                    devRxStats->bcastBytesRxOK;
 158 
 159                 stats->rx_errors += devRxStats->pktsRxError;
 160                 stats->rx_dropped += drvRxStats->drop_total;
 161                 stats->multicast +=  devRxStats->mcastPktsRxOK;
 162         }
 163 }
 164 
 165 static int
 166 vmxnet3_get_sset_count(struct net_device *netdev, int sset)
 167 {
 168         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 169         switch (sset) {
 170         case ETH_SS_STATS:
 171                 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
 172                         ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
 173                        adapter->num_tx_queues +
 174                        (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
 175                         ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
 176                        adapter->num_rx_queues +
 177                         ARRAY_SIZE(vmxnet3_global_stats);
 178         default:
 179                 return -EOPNOTSUPP;
 180         }
 181 }
 182 
 183 
 184 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
 185  * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
 186  * Therefore, if any registers are added, removed or modified, then a version
 187  * bump and a corresponding change in the vmxnet3 support for ethtool(8)
 188  * --register-dump would be required.
 189  */
 190 static int
 191 vmxnet3_get_regs_len(struct net_device *netdev)
 192 {
 193         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 194 
 195         return ((9 /* BAR1 registers */ +
 196                 (1 + adapter->intr.num_intrs) +
 197                 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) +
 198                 (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) *
 199                 sizeof(u32));
 200 }
 201 
 202 
 203 static void
 204 vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 205 {
 206         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 207 
 208         strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
 209 
 210         strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
 211                 sizeof(drvinfo->version));
 212 
 213         strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 214                 sizeof(drvinfo->bus_info));
 215 }
 216 
 217 
 218 static void
 219 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
 220 {
 221          struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 222         if (stringset == ETH_SS_STATS) {
 223                 int i, j;
 224                 for (j = 0; j < adapter->num_tx_queues; j++) {
 225                         for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
 226                                 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
 227                                        ETH_GSTRING_LEN);
 228                                 buf += ETH_GSTRING_LEN;
 229                         }
 230                         for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
 231                              i++) {
 232                                 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
 233                                        ETH_GSTRING_LEN);
 234                                 buf += ETH_GSTRING_LEN;
 235                         }
 236                 }
 237 
 238                 for (j = 0; j < adapter->num_rx_queues; j++) {
 239                         for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
 240                                 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
 241                                        ETH_GSTRING_LEN);
 242                                 buf += ETH_GSTRING_LEN;
 243                         }
 244                         for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
 245                              i++) {
 246                                 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
 247                                        ETH_GSTRING_LEN);
 248                                 buf += ETH_GSTRING_LEN;
 249                         }
 250                 }
 251 
 252                 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
 253                         memcpy(buf, vmxnet3_global_stats[i].desc,
 254                                 ETH_GSTRING_LEN);
 255                         buf += ETH_GSTRING_LEN;
 256                 }
 257         }
 258 }
 259 
 260 netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
 261                                        netdev_features_t features)
 262 {
 263         /* If Rx checksum is disabled, then LRO should also be disabled */
 264         if (!(features & NETIF_F_RXCSUM))
 265                 features &= ~NETIF_F_LRO;
 266 
 267         return features;
 268 }
 269 
 270 int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
 271 {
 272         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 273         unsigned long flags;
 274         netdev_features_t changed = features ^ netdev->features;
 275 
 276         if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
 277                        NETIF_F_HW_VLAN_CTAG_RX)) {
 278                 if (features & NETIF_F_RXCSUM)
 279                         adapter->shared->devRead.misc.uptFeatures |=
 280                         UPT1_F_RXCSUM;
 281                 else
 282                         adapter->shared->devRead.misc.uptFeatures &=
 283                         ~UPT1_F_RXCSUM;
 284 
 285                 /* update hardware LRO capability accordingly */
 286                 if (features & NETIF_F_LRO)
 287                         adapter->shared->devRead.misc.uptFeatures |=
 288                                                         UPT1_F_LRO;
 289                 else
 290                         adapter->shared->devRead.misc.uptFeatures &=
 291                                                         ~UPT1_F_LRO;
 292 
 293                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
 294                         adapter->shared->devRead.misc.uptFeatures |=
 295                         UPT1_F_RXVLAN;
 296                 else
 297                         adapter->shared->devRead.misc.uptFeatures &=
 298                         ~UPT1_F_RXVLAN;
 299 
 300                 spin_lock_irqsave(&adapter->cmd_lock, flags);
 301                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 302                                        VMXNET3_CMD_UPDATE_FEATURE);
 303                 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 304         }
 305         return 0;
 306 }
 307 
 308 static void
 309 vmxnet3_get_ethtool_stats(struct net_device *netdev,
 310                           struct ethtool_stats *stats, u64  *buf)
 311 {
 312         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 313         unsigned long flags;
 314         u8 *base;
 315         int i;
 316         int j = 0;
 317 
 318         spin_lock_irqsave(&adapter->cmd_lock, flags);
 319         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 320         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 321 
 322         /* this does assume each counter is 64-bit wide */
 323         for (j = 0; j < adapter->num_tx_queues; j++) {
 324                 base = (u8 *)&adapter->tqd_start[j].stats;
 325                 *buf++ = (u64)j;
 326                 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
 327                         *buf++ = *(u64 *)(base +
 328                                           vmxnet3_tq_dev_stats[i].offset);
 329 
 330                 base = (u8 *)&adapter->tx_queue[j].stats;
 331                 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
 332                         *buf++ = *(u64 *)(base +
 333                                           vmxnet3_tq_driver_stats[i].offset);
 334         }
 335 
 336         for (j = 0; j < adapter->num_rx_queues; j++) {
 337                 base = (u8 *)&adapter->rqd_start[j].stats;
 338                 *buf++ = (u64) j;
 339                 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
 340                         *buf++ = *(u64 *)(base +
 341                                           vmxnet3_rq_dev_stats[i].offset);
 342 
 343                 base = (u8 *)&adapter->rx_queue[j].stats;
 344                 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
 345                         *buf++ = *(u64 *)(base +
 346                                           vmxnet3_rq_driver_stats[i].offset);
 347         }
 348 
 349         base = (u8 *)adapter;
 350         for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
 351                 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
 352 }
 353 
 354 
 355 /* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
 356  * the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
 357  * Therefore, if any registers are added, removed or modified, then a version
 358  * bump and a corresponding change in the vmxnet3 support for ethtool(8)
 359  * --register-dump would be required.
 360  */
 361 static void
 362 vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
 363 {
 364         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 365         u32 *buf = p;
 366         int i = 0, j = 0;
 367 
 368         memset(p, 0, vmxnet3_get_regs_len(netdev));
 369 
 370         regs->version = 2;
 371 
 372         /* Update vmxnet3_get_regs_len if we want to dump more registers */
 373 
 374         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
 375         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
 376         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL);
 377         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH);
 378         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
 379         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
 380         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
 381         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
 382         buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR);
 383 
 384         buf[j++] = adapter->intr.num_intrs;
 385         for (i = 0; i < adapter->intr.num_intrs; i++) {
 386                 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR
 387                                                  + i * VMXNET3_REG_ALIGN);
 388         }
 389 
 390         buf[j++] = adapter->num_tx_queues;
 391         for (i = 0; i < adapter->num_tx_queues; i++) {
 392                 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
 393 
 394                 buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_TXPROD +
 395                                                  i * VMXNET3_REG_ALIGN);
 396 
 397                 buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
 398                 buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
 399                 buf[j++] = tq->tx_ring.size;
 400                 buf[j++] = tq->tx_ring.next2fill;
 401                 buf[j++] = tq->tx_ring.next2comp;
 402                 buf[j++] = tq->tx_ring.gen;
 403 
 404                 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
 405                 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
 406                 buf[j++] = tq->data_ring.size;
 407                 buf[j++] = tq->txdata_desc_size;
 408 
 409                 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
 410                 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
 411                 buf[j++] = tq->comp_ring.size;
 412                 buf[j++] = tq->comp_ring.next2proc;
 413                 buf[j++] = tq->comp_ring.gen;
 414 
 415                 buf[j++] = tq->stopped;
 416         }
 417 
 418         buf[j++] = adapter->num_rx_queues;
 419         for (i = 0; i < adapter->num_rx_queues; i++) {
 420                 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
 421 
 422                 buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD +
 423                                                   i * VMXNET3_REG_ALIGN);
 424                 buf[j++] =  VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_RXPROD2 +
 425                                                   i * VMXNET3_REG_ALIGN);
 426 
 427                 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
 428                 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
 429                 buf[j++] = rq->rx_ring[0].size;
 430                 buf[j++] = rq->rx_ring[0].next2fill;
 431                 buf[j++] = rq->rx_ring[0].next2comp;
 432                 buf[j++] = rq->rx_ring[0].gen;
 433 
 434                 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
 435                 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
 436                 buf[j++] = rq->rx_ring[1].size;
 437                 buf[j++] = rq->rx_ring[1].next2fill;
 438                 buf[j++] = rq->rx_ring[1].next2comp;
 439                 buf[j++] = rq->rx_ring[1].gen;
 440 
 441                 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
 442                 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
 443                 buf[j++] = rq->rx_ring[0].size;
 444                 buf[j++] = rq->data_ring.desc_size;
 445 
 446                 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
 447                 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
 448                 buf[j++] = rq->comp_ring.size;
 449                 buf[j++] = rq->comp_ring.next2proc;
 450                 buf[j++] = rq->comp_ring.gen;
 451         }
 452 }
 453 
 454 
 455 static void
 456 vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 457 {
 458         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 459 
 460         wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
 461         wol->wolopts = adapter->wol;
 462 }
 463 
 464 
 465 static int
 466 vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 467 {
 468         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 469 
 470         if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
 471                             WAKE_MAGICSECURE)) {
 472                 return -EOPNOTSUPP;
 473         }
 474 
 475         adapter->wol = wol->wolopts;
 476 
 477         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 478 
 479         return 0;
 480 }
 481 
 482 
 483 static int
 484 vmxnet3_get_link_ksettings(struct net_device *netdev,
 485                            struct ethtool_link_ksettings *ecmd)
 486 {
 487         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 488 
 489         ethtool_link_ksettings_zero_link_mode(ecmd, supported);
 490         ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
 491         ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
 492         ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
 493         ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
 494         ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
 495         ecmd->base.port = PORT_TP;
 496 
 497         if (adapter->link_speed) {
 498                 ecmd->base.speed = adapter->link_speed;
 499                 ecmd->base.duplex = DUPLEX_FULL;
 500         } else {
 501                 ecmd->base.speed = SPEED_UNKNOWN;
 502                 ecmd->base.duplex = DUPLEX_UNKNOWN;
 503         }
 504         return 0;
 505 }
 506 
 507 
 508 static void
 509 vmxnet3_get_ringparam(struct net_device *netdev,
 510                       struct ethtool_ringparam *param)
 511 {
 512         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 513 
 514         param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
 515         param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
 516         param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
 517                 VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
 518         param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
 519 
 520         param->rx_pending = adapter->rx_ring_size;
 521         param->tx_pending = adapter->tx_ring_size;
 522         param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
 523                 adapter->rxdata_desc_size : 0;
 524         param->rx_jumbo_pending = adapter->rx_ring2_size;
 525 }
 526 
 527 
 528 static int
 529 vmxnet3_set_ringparam(struct net_device *netdev,
 530                       struct ethtool_ringparam *param)
 531 {
 532         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 533         u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
 534         u16 new_rxdata_desc_size;
 535         u32 sz;
 536         int err = 0;
 537 
 538         if (param->tx_pending == 0 || param->tx_pending >
 539                                                 VMXNET3_TX_RING_MAX_SIZE)
 540                 return -EINVAL;
 541 
 542         if (param->rx_pending == 0 || param->rx_pending >
 543                                                 VMXNET3_RX_RING_MAX_SIZE)
 544                 return -EINVAL;
 545 
 546         if (param->rx_jumbo_pending == 0 ||
 547             param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
 548                 return -EINVAL;
 549 
 550         /* if adapter not yet initialized, do nothing */
 551         if (adapter->rx_buf_per_pkt == 0) {
 552                 netdev_err(netdev, "adapter not completely initialized, "
 553                            "ring size cannot be changed yet\n");
 554                 return -EOPNOTSUPP;
 555         }
 556 
 557         if (VMXNET3_VERSION_GE_3(adapter)) {
 558                 if (param->rx_mini_pending < 0 ||
 559                     param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
 560                         return -EINVAL;
 561                 }
 562         } else if (param->rx_mini_pending != 0) {
 563                 return -EINVAL;
 564         }
 565 
 566         /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
 567         new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
 568                                                         ~VMXNET3_RING_SIZE_MASK;
 569         new_tx_ring_size = min_t(u32, new_tx_ring_size,
 570                                  VMXNET3_TX_RING_MAX_SIZE);
 571         if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
 572                                                 VMXNET3_RING_SIZE_ALIGN) != 0)
 573                 return -EINVAL;
 574 
 575         /* ring0 has to be a multiple of
 576          * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
 577          */
 578         sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
 579         new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
 580         new_rx_ring_size = min_t(u32, new_rx_ring_size,
 581                                  VMXNET3_RX_RING_MAX_SIZE / sz * sz);
 582         if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
 583                                                            sz) != 0)
 584                 return -EINVAL;
 585 
 586         /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
 587         new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
 588                                 ~VMXNET3_RING_SIZE_MASK;
 589         new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
 590                                   VMXNET3_RX_RING2_MAX_SIZE);
 591 
 592         /* rx data ring buffer size has to be a multiple of
 593          * VMXNET3_RXDATA_DESC_SIZE_ALIGN
 594          */
 595         new_rxdata_desc_size =
 596                 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
 597                 ~VMXNET3_RXDATA_DESC_SIZE_MASK;
 598         new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
 599                                      VMXNET3_RXDATA_DESC_MAX_SIZE);
 600 
 601         if (new_tx_ring_size == adapter->tx_ring_size &&
 602             new_rx_ring_size == adapter->rx_ring_size &&
 603             new_rx_ring2_size == adapter->rx_ring2_size &&
 604             new_rxdata_desc_size == adapter->rxdata_desc_size) {
 605                 return 0;
 606         }
 607 
 608         /*
 609          * Reset_work may be in the middle of resetting the device, wait for its
 610          * completion.
 611          */
 612         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
 613                 usleep_range(1000, 2000);
 614 
 615         if (netif_running(netdev)) {
 616                 vmxnet3_quiesce_dev(adapter);
 617                 vmxnet3_reset_dev(adapter);
 618 
 619                 /* recreate the rx queue and the tx queue based on the
 620                  * new sizes */
 621                 vmxnet3_tq_destroy_all(adapter);
 622                 vmxnet3_rq_destroy_all(adapter);
 623 
 624                 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
 625                                             new_rx_ring_size, new_rx_ring2_size,
 626                                             adapter->txdata_desc_size,
 627                                             new_rxdata_desc_size);
 628                 if (err) {
 629                         /* failed, most likely because of OOM, try default
 630                          * size */
 631                         netdev_err(netdev, "failed to apply new sizes, "
 632                                    "try the default ones\n");
 633                         new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
 634                         new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
 635                         new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
 636                         new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
 637                                 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
 638 
 639                         err = vmxnet3_create_queues(adapter,
 640                                                     new_tx_ring_size,
 641                                                     new_rx_ring_size,
 642                                                     new_rx_ring2_size,
 643                                                     adapter->txdata_desc_size,
 644                                                     new_rxdata_desc_size);
 645                         if (err) {
 646                                 netdev_err(netdev, "failed to create queues "
 647                                            "with default sizes. Closing it\n");
 648                                 goto out;
 649                         }
 650                 }
 651 
 652                 err = vmxnet3_activate_dev(adapter);
 653                 if (err)
 654                         netdev_err(netdev, "failed to re-activate, error %d."
 655                                    " Closing it\n", err);
 656         }
 657         adapter->tx_ring_size = new_tx_ring_size;
 658         adapter->rx_ring_size = new_rx_ring_size;
 659         adapter->rx_ring2_size = new_rx_ring2_size;
 660         adapter->rxdata_desc_size = new_rxdata_desc_size;
 661 
 662 out:
 663         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
 664         if (err)
 665                 vmxnet3_force_close(adapter);
 666 
 667         return err;
 668 }
 669 
 670 
 671 static int
 672 vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
 673                   u32 *rules)
 674 {
 675         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 676         switch (info->cmd) {
 677         case ETHTOOL_GRXRINGS:
 678                 info->data = adapter->num_rx_queues;
 679                 return 0;
 680         }
 681         return -EOPNOTSUPP;
 682 }
 683 
 684 #ifdef VMXNET3_RSS
 685 static u32
 686 vmxnet3_get_rss_indir_size(struct net_device *netdev)
 687 {
 688         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 689         struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 690 
 691         return rssConf->indTableSize;
 692 }
 693 
 694 static int
 695 vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
 696 {
 697         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 698         struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 699         unsigned int n = rssConf->indTableSize;
 700 
 701         if (hfunc)
 702                 *hfunc = ETH_RSS_HASH_TOP;
 703         if (!p)
 704                 return 0;
 705         while (n--)
 706                 p[n] = rssConf->indTable[n];
 707         return 0;
 708 
 709 }
 710 
 711 static int
 712 vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
 713                 const u8 hfunc)
 714 {
 715         unsigned int i;
 716         unsigned long flags;
 717         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 718         struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 719 
 720         /* We do not allow change in unsupported parameters */
 721         if (key ||
 722             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
 723                 return -EOPNOTSUPP;
 724         if (!p)
 725                 return 0;
 726         for (i = 0; i < rssConf->indTableSize; i++)
 727                 rssConf->indTable[i] = p[i];
 728 
 729         spin_lock_irqsave(&adapter->cmd_lock, flags);
 730         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 731                                VMXNET3_CMD_UPDATE_RSSIDT);
 732         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 733 
 734         return 0;
 735 
 736 }
 737 #endif
 738 
 739 static int
 740 vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
 741 {
 742         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 743 
 744         if (!VMXNET3_VERSION_GE_3(adapter))
 745                 return -EOPNOTSUPP;
 746 
 747         switch (adapter->coal_conf->coalMode) {
 748         case VMXNET3_COALESCE_DISABLED:
 749                 /* struct ethtool_coalesce is already initialized to 0 */
 750                 break;
 751         case VMXNET3_COALESCE_ADAPT:
 752                 ec->use_adaptive_rx_coalesce = true;
 753                 break;
 754         case VMXNET3_COALESCE_STATIC:
 755                 ec->tx_max_coalesced_frames =
 756                         adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
 757                 ec->rx_max_coalesced_frames =
 758                         adapter->coal_conf->coalPara.coalStatic.rx_depth;
 759                 break;
 760         case VMXNET3_COALESCE_RBC: {
 761                 u32 rbc_rate;
 762 
 763                 rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
 764                 ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
 765         }
 766                 break;
 767         default:
 768                 return -EOPNOTSUPP;
 769         }
 770 
 771         return 0;
 772 }
 773 
 774 static int
 775 vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
 776 {
 777         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 778         struct Vmxnet3_DriverShared *shared = adapter->shared;
 779         union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
 780         unsigned long flags;
 781 
 782         if (!VMXNET3_VERSION_GE_3(adapter))
 783                 return -EOPNOTSUPP;
 784 
 785         if (ec->rx_coalesce_usecs_irq ||
 786             ec->rx_max_coalesced_frames_irq ||
 787             ec->tx_coalesce_usecs ||
 788             ec->tx_coalesce_usecs_irq ||
 789             ec->tx_max_coalesced_frames_irq ||
 790             ec->stats_block_coalesce_usecs ||
 791             ec->use_adaptive_tx_coalesce ||
 792             ec->pkt_rate_low ||
 793             ec->rx_coalesce_usecs_low ||
 794             ec->rx_max_coalesced_frames_low ||
 795             ec->tx_coalesce_usecs_low ||
 796             ec->tx_max_coalesced_frames_low ||
 797             ec->pkt_rate_high ||
 798             ec->rx_coalesce_usecs_high ||
 799             ec->rx_max_coalesced_frames_high ||
 800             ec->tx_coalesce_usecs_high ||
 801             ec->tx_max_coalesced_frames_high ||
 802             ec->rate_sample_interval) {
 803                 return -EINVAL;
 804         }
 805 
 806         if ((ec->rx_coalesce_usecs == 0) &&
 807             (ec->use_adaptive_rx_coalesce == 0) &&
 808             (ec->tx_max_coalesced_frames == 0) &&
 809             (ec->rx_max_coalesced_frames == 0)) {
 810                 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
 811                 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
 812                 goto done;
 813         }
 814 
 815         if (ec->rx_coalesce_usecs != 0) {
 816                 u32 rbc_rate;
 817 
 818                 if ((ec->use_adaptive_rx_coalesce != 0) ||
 819                     (ec->tx_max_coalesced_frames != 0) ||
 820                     (ec->rx_max_coalesced_frames != 0)) {
 821                         return -EINVAL;
 822                 }
 823 
 824                 rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
 825                 if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
 826                     rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
 827                         return -EINVAL;
 828                 }
 829 
 830                 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
 831                 adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
 832                 adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
 833                 goto done;
 834         }
 835 
 836         if (ec->use_adaptive_rx_coalesce != 0) {
 837                 if ((ec->rx_coalesce_usecs != 0) ||
 838                     (ec->tx_max_coalesced_frames != 0) ||
 839                     (ec->rx_max_coalesced_frames != 0)) {
 840                         return -EINVAL;
 841                 }
 842                 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
 843                 adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
 844                 goto done;
 845         }
 846 
 847         if ((ec->tx_max_coalesced_frames != 0) ||
 848             (ec->rx_max_coalesced_frames != 0)) {
 849                 if ((ec->rx_coalesce_usecs != 0) ||
 850                     (ec->use_adaptive_rx_coalesce != 0)) {
 851                         return -EINVAL;
 852                 }
 853 
 854                 if ((ec->tx_max_coalesced_frames >
 855                     VMXNET3_COAL_STATIC_MAX_DEPTH) ||
 856                     (ec->rx_max_coalesced_frames >
 857                      VMXNET3_COAL_STATIC_MAX_DEPTH)) {
 858                         return -EINVAL;
 859                 }
 860 
 861                 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
 862                 adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
 863 
 864                 adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
 865                         (ec->tx_max_coalesced_frames ?
 866                          ec->tx_max_coalesced_frames :
 867                          VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
 868 
 869                 adapter->coal_conf->coalPara.coalStatic.rx_depth =
 870                         (ec->rx_max_coalesced_frames ?
 871                          ec->rx_max_coalesced_frames :
 872                          VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
 873 
 874                 adapter->coal_conf->coalPara.coalStatic.tx_depth =
 875                          VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
 876                 goto done;
 877         }
 878 
 879 done:
 880         adapter->default_coal_mode = false;
 881         if (netif_running(netdev)) {
 882                 spin_lock_irqsave(&adapter->cmd_lock, flags);
 883                 cmdInfo->varConf.confVer = 1;
 884                 cmdInfo->varConf.confLen =
 885                         cpu_to_le32(sizeof(*adapter->coal_conf));
 886                 cmdInfo->varConf.confPA  = cpu_to_le64(adapter->coal_conf_pa);
 887                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 888                                        VMXNET3_CMD_SET_COALESCE);
 889                 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 890         }
 891 
 892         return 0;
 893 }
 894 
 895 static const struct ethtool_ops vmxnet3_ethtool_ops = {
 896         .get_drvinfo       = vmxnet3_get_drvinfo,
 897         .get_regs_len      = vmxnet3_get_regs_len,
 898         .get_regs          = vmxnet3_get_regs,
 899         .get_wol           = vmxnet3_get_wol,
 900         .set_wol           = vmxnet3_set_wol,
 901         .get_link          = ethtool_op_get_link,
 902         .get_coalesce      = vmxnet3_get_coalesce,
 903         .set_coalesce      = vmxnet3_set_coalesce,
 904         .get_strings       = vmxnet3_get_strings,
 905         .get_sset_count    = vmxnet3_get_sset_count,
 906         .get_ethtool_stats = vmxnet3_get_ethtool_stats,
 907         .get_ringparam     = vmxnet3_get_ringparam,
 908         .set_ringparam     = vmxnet3_set_ringparam,
 909         .get_rxnfc         = vmxnet3_get_rxnfc,
 910 #ifdef VMXNET3_RSS
 911         .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
 912         .get_rxfh          = vmxnet3_get_rss,
 913         .set_rxfh          = vmxnet3_set_rss,
 914 #endif
 915         .get_link_ksettings = vmxnet3_get_link_ksettings,
 916 };
 917 
 918 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
 919 {
 920         netdev->ethtool_ops = &vmxnet3_ethtool_ops;
 921 }

/* [<][>][^][v][top][bottom][index][help] */