root/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lio_vf_rep_send_soft_command
  2. lio_vf_rep_open
  3. lio_vf_rep_stop
  4. lio_vf_rep_tx_timeout
  5. lio_vf_rep_get_stats64
  6. lio_vf_rep_change_mtu
  7. lio_vf_rep_phys_port_name
  8. lio_vf_rep_get_ndev
  9. lio_vf_rep_copy_packet
  10. lio_vf_rep_pkt_recv
  11. lio_vf_rep_packet_sent_callback
  12. lio_vf_rep_pkt_xmit
  13. lio_vf_get_port_parent_id
  14. lio_vf_rep_fetch_stats
  15. lio_vf_rep_create
  16. lio_vf_rep_destroy
  17. lio_vf_rep_netdev_event
  18. lio_vf_rep_modinit
  19. lio_vf_rep_modexit

   1 /**********************************************************************
   2  * Author: Cavium, Inc.
   3  *
   4  * Contact: support@cavium.com
   5  *          Please include "LiquidIO" in the subject.
   6  *
   7  * Copyright (c) 2003-2017 Cavium, Inc.
   8  *
   9  * This file is free software; you can redistribute it and/or modify
  10  * it under the terms of the GNU General Public License, Version 2, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This file is distributed in the hope that it will be useful, but
  14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17  ***********************************************************************/
  18 #include <linux/pci.h>
  19 #include <linux/if_vlan.h>
  20 #include "liquidio_common.h"
  21 #include "octeon_droq.h"
  22 #include "octeon_iq.h"
  23 #include "response_manager.h"
  24 #include "octeon_device.h"
  25 #include "octeon_nic.h"
  26 #include "octeon_main.h"
  27 #include "octeon_network.h"
  28 #include "lio_vf_rep.h"
  29 
  30 static int lio_vf_rep_open(struct net_device *ndev);
  31 static int lio_vf_rep_stop(struct net_device *ndev);
  32 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
  33                                        struct net_device *ndev);
  34 static void lio_vf_rep_tx_timeout(struct net_device *netdev);
  35 static int lio_vf_rep_phys_port_name(struct net_device *dev,
  36                                      char *buf, size_t len);
  37 static void lio_vf_rep_get_stats64(struct net_device *dev,
  38                                    struct rtnl_link_stats64 *stats64);
  39 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
  40 static int lio_vf_get_port_parent_id(struct net_device *dev,
  41                                      struct netdev_phys_item_id *ppid);
  42 
  43 static const struct net_device_ops lio_vf_rep_ndev_ops = {
  44         .ndo_open = lio_vf_rep_open,
  45         .ndo_stop = lio_vf_rep_stop,
  46         .ndo_start_xmit = lio_vf_rep_pkt_xmit,
  47         .ndo_tx_timeout = lio_vf_rep_tx_timeout,
  48         .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
  49         .ndo_get_stats64 = lio_vf_rep_get_stats64,
  50         .ndo_change_mtu = lio_vf_rep_change_mtu,
  51         .ndo_get_port_parent_id = lio_vf_get_port_parent_id,
  52 };
  53 
  54 static int
  55 lio_vf_rep_send_soft_command(struct octeon_device *oct,
  56                              void *req, int req_size,
  57                              void *resp, int resp_size)
  58 {
  59         int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
  60         struct octeon_soft_command *sc = NULL;
  61         struct lio_vf_rep_resp *rep_resp;
  62         void *sc_req;
  63         int err;
  64 
  65         sc = (struct octeon_soft_command *)
  66                 octeon_alloc_soft_command(oct, req_size,
  67                                           tot_resp_size, 0);
  68         if (!sc)
  69                 return -ENOMEM;
  70 
  71         init_completion(&sc->complete);
  72         sc->sc_status = OCTEON_REQUEST_PENDING;
  73 
  74         sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
  75         memcpy(sc_req, req, req_size);
  76 
  77         rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
  78         memset(rep_resp, 0, tot_resp_size);
  79         WRITE_ONCE(rep_resp->status, 1);
  80 
  81         sc->iq_no = 0;
  82         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  83                                     OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
  84 
  85         err = octeon_send_soft_command(oct, sc);
  86         if (err == IQ_SEND_FAILED)
  87                 goto free_buff;
  88 
  89         err = wait_for_sc_completion_timeout(oct, sc, 0);
  90         if (err)
  91                 return err;
  92 
  93         err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
  94         if (err)
  95                 dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
  96         else if (resp)
  97                 memcpy(resp, (rep_resp + 1), resp_size);
  98 
  99         WRITE_ONCE(sc->caller_is_done, true);
 100         return err;
 101 
 102 free_buff:
 103         octeon_free_soft_command(oct, sc);
 104 
 105         return err;
 106 }
 107 
 108 static int
 109 lio_vf_rep_open(struct net_device *ndev)
 110 {
 111         struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
 112         struct lio_vf_rep_req rep_cfg;
 113         struct octeon_device *oct;
 114         int ret;
 115 
 116         oct = vf_rep->oct;
 117 
 118         memset(&rep_cfg, 0, sizeof(rep_cfg));
 119         rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
 120         rep_cfg.ifidx = vf_rep->ifidx;
 121         rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
 122 
 123         ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
 124                                            sizeof(rep_cfg), NULL, 0);
 125 
 126         if (ret) {
 127                 dev_err(&oct->pci_dev->dev,
 128                         "VF_REP open failed with err %d\n", ret);
 129                 return -EIO;
 130         }
 131 
 132         atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
 133                                       LIO_IFSTATE_RUNNING));
 134 
 135         netif_carrier_on(ndev);
 136         netif_start_queue(ndev);
 137 
 138         return 0;
 139 }
 140 
 141 static int
 142 lio_vf_rep_stop(struct net_device *ndev)
 143 {
 144         struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
 145         struct lio_vf_rep_req rep_cfg;
 146         struct octeon_device *oct;
 147         int ret;
 148 
 149         oct = vf_rep->oct;
 150 
 151         memset(&rep_cfg, 0, sizeof(rep_cfg));
 152         rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
 153         rep_cfg.ifidx = vf_rep->ifidx;
 154         rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
 155 
 156         ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
 157                                            sizeof(rep_cfg), NULL, 0);
 158 
 159         if (ret) {
 160                 dev_err(&oct->pci_dev->dev,
 161                         "VF_REP dev stop failed with err %d\n", ret);
 162                 return -EIO;
 163         }
 164 
 165         atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
 166                                       ~LIO_IFSTATE_RUNNING));
 167 
 168         netif_tx_disable(ndev);
 169         netif_carrier_off(ndev);
 170 
 171         return 0;
 172 }
 173 
 174 static void
 175 lio_vf_rep_tx_timeout(struct net_device *ndev)
 176 {
 177         netif_trans_update(ndev);
 178 
 179         netif_wake_queue(ndev);
 180 }
 181 
 182 static void
 183 lio_vf_rep_get_stats64(struct net_device *dev,
 184                        struct rtnl_link_stats64 *stats64)
 185 {
 186         struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
 187 
 188         /* Swap tx and rx stats as VF rep is a switch port */
 189         stats64->tx_packets = vf_rep->stats.rx_packets;
 190         stats64->tx_bytes   = vf_rep->stats.rx_bytes;
 191         stats64->tx_dropped = vf_rep->stats.rx_dropped;
 192 
 193         stats64->rx_packets = vf_rep->stats.tx_packets;
 194         stats64->rx_bytes   = vf_rep->stats.tx_bytes;
 195         stats64->rx_dropped = vf_rep->stats.tx_dropped;
 196 }
 197 
 198 static int
 199 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
 200 {
 201         struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
 202         struct lio_vf_rep_req rep_cfg;
 203         struct octeon_device *oct;
 204         int ret;
 205 
 206         oct = vf_rep->oct;
 207 
 208         memset(&rep_cfg, 0, sizeof(rep_cfg));
 209         rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
 210         rep_cfg.ifidx = vf_rep->ifidx;
 211         rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
 212 
 213         ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
 214                                            sizeof(rep_cfg), NULL, 0);
 215         if (ret) {
 216                 dev_err(&oct->pci_dev->dev,
 217                         "Change MTU failed with err %d\n", ret);
 218                 return -EIO;
 219         }
 220 
 221         ndev->mtu = new_mtu;
 222 
 223         return 0;
 224 }
 225 
 226 static int
 227 lio_vf_rep_phys_port_name(struct net_device *dev,
 228                           char *buf, size_t len)
 229 {
 230         struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
 231         struct octeon_device *oct = vf_rep->oct;
 232         int ret;
 233 
 234         ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
 235                        vf_rep->ifidx - oct->pf_num * 64 - 1);
 236         if (ret >= len)
 237                 return -EOPNOTSUPP;
 238 
 239         return 0;
 240 }
 241 
 242 static struct net_device *
 243 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
 244 {
 245         int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
 246         int vfid_mask = max_vfs - 1;
 247 
 248         if (ifidx <= oct->pf_num * max_vfs ||
 249             ifidx >= oct->pf_num * max_vfs + max_vfs)
 250                 return NULL;
 251 
 252         /* ifidx 1-63 for PF0 VFs
 253          * ifidx 65-127 for PF1 VFs
 254          */
 255         vf_id = (ifidx & vfid_mask) - 1;
 256 
 257         return oct->vf_rep_list.ndev[vf_id];
 258 }
 259 
 260 static void
 261 lio_vf_rep_copy_packet(struct octeon_device *oct,
 262                        struct sk_buff *skb,
 263                        int len)
 264 {
 265         if (likely(len > MIN_SKB_SIZE)) {
 266                 struct octeon_skb_page_info *pg_info;
 267                 unsigned char *va;
 268 
 269                 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 270                 if (pg_info->page) {
 271                         va = page_address(pg_info->page) +
 272                                 pg_info->page_offset;
 273                         memcpy(skb->data, va, MIN_SKB_SIZE);
 274                         skb_put(skb, MIN_SKB_SIZE);
 275                 }
 276 
 277                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 278                                 pg_info->page,
 279                                 pg_info->page_offset + MIN_SKB_SIZE,
 280                                 len - MIN_SKB_SIZE,
 281                                 LIO_RXBUFFER_SZ);
 282         } else {
 283                 struct octeon_skb_page_info *pg_info =
 284                         ((struct octeon_skb_page_info *)(skb->cb));
 285 
 286                 skb_copy_to_linear_data(skb, page_address(pg_info->page) +
 287                                         pg_info->page_offset, len);
 288                 skb_put(skb, len);
 289                 put_page(pg_info->page);
 290         }
 291 }
 292 
 293 static int
 294 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
 295 {
 296         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
 297         struct lio_vf_rep_desc *vf_rep;
 298         struct net_device *vf_ndev;
 299         struct octeon_device *oct;
 300         union octeon_rh *rh;
 301         struct sk_buff *skb;
 302         int i, ifidx;
 303 
 304         oct = lio_get_device(recv_pkt->octeon_id);
 305         if (!oct)
 306                 goto free_buffers;
 307 
 308         skb = recv_pkt->buffer_ptr[0];
 309         rh = &recv_pkt->rh;
 310         ifidx = rh->r.ossp;
 311 
 312         vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
 313         if (!vf_ndev)
 314                 goto free_buffers;
 315 
 316         vf_rep = netdev_priv(vf_ndev);
 317         if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
 318             recv_pkt->buffer_count > 1)
 319                 goto free_buffers;
 320 
 321         skb->dev = vf_ndev;
 322 
 323         /* Multiple buffers are not used for vf_rep packets.
 324          * So just buffer_size[0] is valid.
 325          */
 326         lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
 327 
 328         skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
 329         skb->protocol = eth_type_trans(skb, skb->dev);
 330         skb->ip_summed = CHECKSUM_NONE;
 331 
 332         netif_rx(skb);
 333 
 334         octeon_free_recv_info(recv_info);
 335 
 336         return 0;
 337 
 338 free_buffers:
 339         for (i = 0; i < recv_pkt->buffer_count; i++)
 340                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
 341 
 342         octeon_free_recv_info(recv_info);
 343 
 344         return 0;
 345 }
 346 
 347 static void
 348 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
 349                                 u32 status, void *buf)
 350 {
 351         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
 352         struct sk_buff *skb = sc->ctxptr;
 353         struct net_device *ndev = skb->dev;
 354         u32 iq_no;
 355 
 356         dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
 357                          sc->datasize, DMA_TO_DEVICE);
 358         dev_kfree_skb_any(skb);
 359         iq_no = sc->iq_no;
 360         octeon_free_soft_command(oct, sc);
 361 
 362         if (octnet_iq_is_full(oct, iq_no))
 363                 return;
 364 
 365         if (netif_queue_stopped(ndev))
 366                 netif_wake_queue(ndev);
 367 }
 368 
 369 static netdev_tx_t
 370 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
 371 {
 372         struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
 373         struct net_device *parent_ndev = vf_rep->parent_ndev;
 374         struct octeon_device *oct = vf_rep->oct;
 375         struct octeon_instr_pki_ih3 *pki_ih3;
 376         struct octeon_soft_command *sc;
 377         struct lio *parent_lio;
 378         int status;
 379 
 380         parent_lio = GET_LIO(parent_ndev);
 381 
 382         if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
 383             skb->len <= 0)
 384                 goto xmit_failed;
 385 
 386         if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
 387                 dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
 388                 netif_stop_queue(ndev);
 389                 return NETDEV_TX_BUSY;
 390         }
 391 
 392         sc = (struct octeon_soft_command *)
 393                 octeon_alloc_soft_command(oct, 0, 16, 0);
 394         if (!sc) {
 395                 dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
 396                 goto xmit_failed;
 397         }
 398 
 399         /* Multiple buffers are not used for vf_rep packets. */
 400         if (skb_shinfo(skb)->nr_frags != 0) {
 401                 dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
 402                 octeon_free_soft_command(oct, sc);
 403                 goto xmit_failed;
 404         }
 405 
 406         sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
 407                                      skb->data, skb->len, DMA_TO_DEVICE);
 408         if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
 409                 dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
 410                 octeon_free_soft_command(oct, sc);
 411                 goto xmit_failed;
 412         }
 413 
 414         sc->virtdptr = skb->data;
 415         sc->datasize = skb->len;
 416         sc->ctxptr = skb;
 417         sc->iq_no = parent_lio->txq;
 418 
 419         octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
 420                                     vf_rep->ifidx, 0, 0);
 421         pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
 422         pki_ih3->tagtype = ORDERED_TAG;
 423 
 424         sc->callback = lio_vf_rep_packet_sent_callback;
 425         sc->callback_arg = sc;
 426 
 427         status = octeon_send_soft_command(oct, sc);
 428         if (status == IQ_SEND_FAILED) {
 429                 dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
 430                                  sc->datasize, DMA_TO_DEVICE);
 431                 octeon_free_soft_command(oct, sc);
 432                 goto xmit_failed;
 433         }
 434 
 435         if (status == IQ_SEND_STOP)
 436                 netif_stop_queue(ndev);
 437 
 438         netif_trans_update(ndev);
 439 
 440         return NETDEV_TX_OK;
 441 
 442 xmit_failed:
 443         dev_kfree_skb_any(skb);
 444 
 445         return NETDEV_TX_OK;
 446 }
 447 
 448 static int lio_vf_get_port_parent_id(struct net_device *dev,
 449                                      struct netdev_phys_item_id *ppid)
 450 {
 451         struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
 452         struct net_device *parent_ndev = vf_rep->parent_ndev;
 453         struct lio *lio = GET_LIO(parent_ndev);
 454 
 455         ppid->id_len = ETH_ALEN;
 456         ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
 457 
 458         return 0;
 459 }
 460 
 461 static void
 462 lio_vf_rep_fetch_stats(struct work_struct *work)
 463 {
 464         struct cavium_wk *wk = (struct cavium_wk *)work;
 465         struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
 466         struct lio_vf_rep_stats stats;
 467         struct lio_vf_rep_req rep_cfg;
 468         struct octeon_device *oct;
 469         int ret;
 470 
 471         oct = vf_rep->oct;
 472 
 473         memset(&rep_cfg, 0, sizeof(rep_cfg));
 474         rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
 475         rep_cfg.ifidx = vf_rep->ifidx;
 476 
 477         ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
 478                                            &stats, sizeof(stats));
 479 
 480         if (!ret) {
 481                 octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
 482                 memcpy(&vf_rep->stats, &stats, sizeof(stats));
 483         }
 484 
 485         schedule_delayed_work(&vf_rep->stats_wk.work,
 486                               msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
 487 }
 488 
 489 int
 490 lio_vf_rep_create(struct octeon_device *oct)
 491 {
 492         struct lio_vf_rep_desc *vf_rep;
 493         struct net_device *ndev;
 494         int i, num_vfs;
 495 
 496         if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
 497                 return 0;
 498 
 499         if (!oct->sriov_info.sriov_enabled)
 500                 return 0;
 501 
 502         num_vfs = oct->sriov_info.num_vfs_alloced;
 503 
 504         oct->vf_rep_list.num_vfs = 0;
 505         for (i = 0; i < num_vfs; i++) {
 506                 ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
 507 
 508                 if (!ndev) {
 509                         dev_err(&oct->pci_dev->dev,
 510                                 "VF rep device %d creation failed\n", i);
 511                         goto cleanup;
 512                 }
 513 
 514                 ndev->min_mtu = LIO_MIN_MTU_SIZE;
 515                 ndev->max_mtu = LIO_MAX_MTU_SIZE;
 516                 ndev->netdev_ops = &lio_vf_rep_ndev_ops;
 517 
 518                 vf_rep = netdev_priv(ndev);
 519                 memset(vf_rep, 0, sizeof(*vf_rep));
 520 
 521                 vf_rep->ndev = ndev;
 522                 vf_rep->oct = oct;
 523                 vf_rep->parent_ndev = oct->props[0].netdev;
 524                 vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
 525 
 526                 eth_hw_addr_random(ndev);
 527 
 528                 if (register_netdev(ndev)) {
 529                         dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
 530 
 531                         free_netdev(ndev);
 532                         goto cleanup;
 533                 }
 534 
 535                 netif_carrier_off(ndev);
 536 
 537                 INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
 538                                   lio_vf_rep_fetch_stats);
 539                 vf_rep->stats_wk.ctxptr = (void *)vf_rep;
 540                 schedule_delayed_work(&vf_rep->stats_wk.work,
 541                                       msecs_to_jiffies
 542                                       (LIO_VF_REP_STATS_POLL_TIME_MS));
 543                 oct->vf_rep_list.num_vfs++;
 544                 oct->vf_rep_list.ndev[i] = ndev;
 545         }
 546 
 547         if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
 548                                         OPCODE_NIC_VF_REP_PKT,
 549                                         lio_vf_rep_pkt_recv, oct)) {
 550                 dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
 551 
 552                 goto cleanup;
 553         }
 554 
 555         return 0;
 556 
 557 cleanup:
 558         for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
 559                 ndev = oct->vf_rep_list.ndev[i];
 560                 oct->vf_rep_list.ndev[i] = NULL;
 561                 if (ndev) {
 562                         vf_rep = netdev_priv(ndev);
 563                         cancel_delayed_work_sync
 564                                 (&vf_rep->stats_wk.work);
 565                         unregister_netdev(ndev);
 566                         free_netdev(ndev);
 567                 }
 568         }
 569 
 570         oct->vf_rep_list.num_vfs = 0;
 571 
 572         return -1;
 573 }
 574 
 575 void
 576 lio_vf_rep_destroy(struct octeon_device *oct)
 577 {
 578         struct lio_vf_rep_desc *vf_rep;
 579         struct net_device *ndev;
 580         int i;
 581 
 582         if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
 583                 return;
 584 
 585         if (!oct->sriov_info.sriov_enabled)
 586                 return;
 587 
 588         for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
 589                 ndev = oct->vf_rep_list.ndev[i];
 590                 oct->vf_rep_list.ndev[i] = NULL;
 591                 if (ndev) {
 592                         vf_rep = netdev_priv(ndev);
 593                         cancel_delayed_work_sync
 594                                 (&vf_rep->stats_wk.work);
 595                         netif_tx_disable(ndev);
 596                         netif_carrier_off(ndev);
 597 
 598                         unregister_netdev(ndev);
 599                         free_netdev(ndev);
 600                 }
 601         }
 602 
 603         oct->vf_rep_list.num_vfs = 0;
 604 }
 605 
 606 static int
 607 lio_vf_rep_netdev_event(struct notifier_block *nb,
 608                         unsigned long event, void *ptr)
 609 {
 610         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
 611         struct lio_vf_rep_desc *vf_rep;
 612         struct lio_vf_rep_req rep_cfg;
 613         struct octeon_device *oct;
 614         int ret;
 615 
 616         switch (event) {
 617         case NETDEV_REGISTER:
 618         case NETDEV_CHANGENAME:
 619                 break;
 620 
 621         default:
 622                 return NOTIFY_DONE;
 623         }
 624 
 625         if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
 626                 return NOTIFY_DONE;
 627 
 628         vf_rep = netdev_priv(ndev);
 629         oct = vf_rep->oct;
 630 
 631         if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
 632                 dev_err(&oct->pci_dev->dev,
 633                         "Device name change sync failed as the size is > %d\n",
 634                         LIO_IF_NAME_SIZE);
 635                 return NOTIFY_DONE;
 636         }
 637 
 638         memset(&rep_cfg, 0, sizeof(rep_cfg));
 639         rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
 640         rep_cfg.ifidx = vf_rep->ifidx;
 641         strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
 642 
 643         ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
 644                                            sizeof(rep_cfg), NULL, 0);
 645         if (ret)
 646                 dev_err(&oct->pci_dev->dev,
 647                         "vf_rep netdev name change failed with err %d\n", ret);
 648 
 649         return NOTIFY_DONE;
 650 }
 651 
 652 static struct notifier_block lio_vf_rep_netdev_notifier = {
 653         .notifier_call = lio_vf_rep_netdev_event,
 654 };
 655 
 656 int
 657 lio_vf_rep_modinit(void)
 658 {
 659         if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
 660                 pr_err("netdev notifier registration failed\n");
 661                 return -EFAULT;
 662         }
 663 
 664         return 0;
 665 }
 666 
 667 void
 668 lio_vf_rep_modexit(void)
 669 {
 670         if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
 671                 pr_err("netdev notifier unregister failed\n");
 672 }

/* [<][>][^][v][top][bottom][index][help] */