root/drivers/net/ethernet/netronome/nfp/nfp_net_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. nfp_net_get_mac_addr
  2. nfp_net_find_port
  3. nfp_net_pf_get_num_ports
  4. nfp_net_pf_get_app_id
  5. nfp_net_pf_free_vnic
  6. nfp_net_pf_free_vnics
  7. nfp_net_pf_alloc_vnic
  8. nfp_net_pf_init_vnic
  9. nfp_net_pf_alloc_vnics
  10. nfp_net_pf_clean_vnic
  11. nfp_net_pf_alloc_irqs
  12. nfp_net_pf_free_irqs
  13. nfp_net_pf_init_vnics
  14. nfp_net_pf_app_init
  15. nfp_net_pf_app_clean
  16. nfp_net_pf_app_start_ctrl
  17. nfp_net_pf_app_stop_ctrl
  18. nfp_net_pf_app_start
  19. nfp_net_pf_app_stop
  20. nfp_net_pci_unmap_mem
  21. nfp_net_pci_map_mem
  22. nfp_net_eth_port_update
  23. nfp_net_refresh_port_table_sync
  24. nfp_net_refresh_vnics
  25. nfp_net_refresh_port_table
  26. nfp_net_refresh_eth_port
  27. nfp_net_pci_probe
  28. nfp_net_pci_remove

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3 
   4 /*
   5  * nfp_net_main.c
   6  * Netronome network device driver: Main entry point
   7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
   8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
   9  *          Jason McMullan <jason.mcmullan@netronome.com>
  10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  11  */
  12 
  13 #include <linux/etherdevice.h>
  14 #include <linux/kernel.h>
  15 #include <linux/init.h>
  16 #include <linux/lockdep.h>
  17 #include <linux/pci.h>
  18 #include <linux/pci_regs.h>
  19 #include <linux/msi.h>
  20 #include <linux/random.h>
  21 #include <linux/rtnetlink.h>
  22 
  23 #include "nfpcore/nfp.h"
  24 #include "nfpcore/nfp_cpp.h"
  25 #include "nfpcore/nfp_nffw.h"
  26 #include "nfpcore/nfp_nsp.h"
  27 #include "nfpcore/nfp6000_pcie.h"
  28 #include "nfp_app.h"
  29 #include "nfp_net_ctrl.h"
  30 #include "nfp_net_sriov.h"
  31 #include "nfp_net.h"
  32 #include "nfp_main.h"
  33 #include "nfp_port.h"
  34 
  35 #define NFP_PF_CSR_SLICE_SIZE   (32 * 1024)
  36 
  37 /**
  38  * nfp_net_get_mac_addr() - Get the MAC address.
  39  * @pf:       NFP PF handle
  40  * @netdev:   net_device to set MAC address on
  41  * @port:     NFP port structure
  42  *
  43  * First try to get the MAC address from NSP ETH table. If that
  44  * fails generate a random address.
  45  */
  46 void
  47 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
  48                      struct nfp_port *port)
  49 {
  50         struct nfp_eth_table_port *eth_port;
  51 
  52         eth_port = __nfp_port_get_eth_port(port);
  53         if (!eth_port) {
  54                 eth_hw_addr_random(netdev);
  55                 return;
  56         }
  57 
  58         ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
  59         ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
  60 }
  61 
  62 static struct nfp_eth_table_port *
  63 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
  64 {
  65         int i;
  66 
  67         for (i = 0; eth_tbl && i < eth_tbl->count; i++)
  68                 if (eth_tbl->ports[i].index == index)
  69                         return &eth_tbl->ports[i];
  70 
  71         return NULL;
  72 }
  73 
  74 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
  75 {
  76         return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
  77 }
  78 
  79 static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
  80 {
  81         return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
  82                                           NFP_APP_CORE_NIC);
  83 }
  84 
  85 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
  86 {
  87         if (nfp_net_is_data_vnic(nn))
  88                 nfp_app_vnic_free(pf->app, nn);
  89         nfp_port_free(nn->port);
  90         list_del(&nn->vnic_list);
  91         pf->num_vnics--;
  92         nfp_net_free(nn);
  93 }
  94 
  95 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
  96 {
  97         struct nfp_net *nn, *next;
  98 
  99         list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
 100                 if (nfp_net_is_data_vnic(nn))
 101                         nfp_net_pf_free_vnic(pf, nn);
 102 }
 103 
 104 static struct nfp_net *
 105 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
 106                       void __iomem *ctrl_bar, void __iomem *qc_bar,
 107                       int stride, unsigned int id)
 108 {
 109         u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
 110         struct nfp_net *nn;
 111         int err;
 112 
 113         tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
 114         rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
 115         n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
 116         n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 117 
 118         /* Allocate and initialise the vNIC */
 119         nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
 120                            n_tx_rings, n_rx_rings);
 121         if (IS_ERR(nn))
 122                 return nn;
 123 
 124         nn->app = pf->app;
 125         nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
 126         nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
 127         nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
 128         nn->dp.is_vf = 0;
 129         nn->stride_rx = stride;
 130         nn->stride_tx = stride;
 131 
 132         if (needs_netdev) {
 133                 err = nfp_app_vnic_alloc(pf->app, nn, id);
 134                 if (err) {
 135                         nfp_net_free(nn);
 136                         return ERR_PTR(err);
 137                 }
 138         }
 139 
 140         pf->num_vnics++;
 141         list_add_tail(&nn->vnic_list, &pf->vnics);
 142 
 143         return nn;
 144 }
 145 
 146 static int
 147 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
 148 {
 149         int err;
 150 
 151         nn->id = id;
 152 
 153         if (nn->port) {
 154                 err = nfp_devlink_port_register(pf->app, nn->port);
 155                 if (err)
 156                         return err;
 157         }
 158 
 159         err = nfp_net_init(nn);
 160         if (err)
 161                 goto err_devlink_port_clean;
 162 
 163         nfp_net_debugfs_vnic_add(nn, pf->ddir);
 164 
 165         if (nn->port)
 166                 nfp_devlink_port_type_eth_set(nn->port);
 167 
 168         nfp_net_info(nn);
 169 
 170         if (nfp_net_is_data_vnic(nn)) {
 171                 err = nfp_app_vnic_init(pf->app, nn);
 172                 if (err)
 173                         goto err_devlink_port_type_clean;
 174         }
 175 
 176         return 0;
 177 
 178 err_devlink_port_type_clean:
 179         if (nn->port)
 180                 nfp_devlink_port_type_clear(nn->port);
 181         nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 182         nfp_net_clean(nn);
 183 err_devlink_port_clean:
 184         if (nn->port)
 185                 nfp_devlink_port_unregister(nn->port);
 186         return err;
 187 }
 188 
 189 static int
 190 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
 191                        void __iomem *qc_bar, int stride)
 192 {
 193         struct nfp_net *nn;
 194         unsigned int i;
 195         int err;
 196 
 197         for (i = 0; i < pf->max_data_vnics; i++) {
 198                 nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
 199                                            stride, i);
 200                 if (IS_ERR(nn)) {
 201                         err = PTR_ERR(nn);
 202                         goto err_free_prev;
 203                 }
 204 
 205                 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
 206 
 207                 /* Kill the vNIC if app init marked it as invalid */
 208                 if (nn->port && nn->port->type == NFP_PORT_INVALID)
 209                         nfp_net_pf_free_vnic(pf, nn);
 210         }
 211 
 212         if (list_empty(&pf->vnics))
 213                 return -ENODEV;
 214 
 215         return 0;
 216 
 217 err_free_prev:
 218         nfp_net_pf_free_vnics(pf);
 219         return err;
 220 }
 221 
 222 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
 223 {
 224         if (nfp_net_is_data_vnic(nn))
 225                 nfp_app_vnic_clean(pf->app, nn);
 226         if (nn->port)
 227                 nfp_devlink_port_type_clear(nn->port);
 228         nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
 229         nfp_net_clean(nn);
 230         if (nn->port)
 231                 nfp_devlink_port_unregister(nn->port);
 232 }
 233 
 234 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
 235 {
 236         unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
 237         struct nfp_net *nn;
 238 
 239         /* Get MSI-X vectors */
 240         wanted_irqs = 0;
 241         list_for_each_entry(nn, &pf->vnics, vnic_list)
 242                 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
 243         pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
 244                                   GFP_KERNEL);
 245         if (!pf->irq_entries)
 246                 return -ENOMEM;
 247 
 248         num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
 249                                       NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
 250                                       wanted_irqs);
 251         if (!num_irqs) {
 252                 nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
 253                 kfree(pf->irq_entries);
 254                 return -ENOMEM;
 255         }
 256 
 257         /* Distribute IRQs to vNICs */
 258         irqs_left = num_irqs;
 259         vnics_left = pf->num_vnics;
 260         list_for_each_entry(nn, &pf->vnics, vnic_list) {
 261                 unsigned int n;
 262 
 263                 n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
 264                         DIV_ROUND_UP(irqs_left, vnics_left));
 265                 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
 266                                     n);
 267                 irqs_left -= n;
 268                 vnics_left--;
 269         }
 270 
 271         return 0;
 272 }
 273 
 274 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
 275 {
 276         nfp_net_irqs_disable(pf->pdev);
 277         kfree(pf->irq_entries);
 278 }
 279 
 280 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
 281 {
 282         struct nfp_net *nn;
 283         unsigned int id;
 284         int err;
 285 
 286         /* Finish vNIC init and register */
 287         id = 0;
 288         list_for_each_entry(nn, &pf->vnics, vnic_list) {
 289                 if (!nfp_net_is_data_vnic(nn))
 290                         continue;
 291                 err = nfp_net_pf_init_vnic(pf, nn, id);
 292                 if (err)
 293                         goto err_prev_deinit;
 294 
 295                 id++;
 296         }
 297 
 298         return 0;
 299 
 300 err_prev_deinit:
 301         list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
 302                 if (nfp_net_is_data_vnic(nn))
 303                         nfp_net_pf_clean_vnic(pf, nn);
 304         return err;
 305 }
 306 
 307 static int
 308 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
 309 {
 310         u8 __iomem *ctrl_bar;
 311         int err;
 312 
 313         pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
 314         if (IS_ERR(pf->app))
 315                 return PTR_ERR(pf->app);
 316 
 317         mutex_lock(&pf->lock);
 318         err = nfp_app_init(pf->app);
 319         mutex_unlock(&pf->lock);
 320         if (err)
 321                 goto err_free;
 322 
 323         if (!nfp_app_needs_ctrl_vnic(pf->app))
 324                 return 0;
 325 
 326         ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
 327                                     NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
 328         if (IS_ERR(ctrl_bar)) {
 329                 nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
 330                 err = PTR_ERR(ctrl_bar);
 331                 goto err_app_clean;
 332         }
 333 
 334         pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
 335                                               stride, 0);
 336         if (IS_ERR(pf->ctrl_vnic)) {
 337                 err = PTR_ERR(pf->ctrl_vnic);
 338                 goto err_unmap;
 339         }
 340 
 341         return 0;
 342 
 343 err_unmap:
 344         nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 345 err_app_clean:
 346         mutex_lock(&pf->lock);
 347         nfp_app_clean(pf->app);
 348         mutex_unlock(&pf->lock);
 349 err_free:
 350         nfp_app_free(pf->app);
 351         pf->app = NULL;
 352         return err;
 353 }
 354 
 355 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
 356 {
 357         if (pf->ctrl_vnic) {
 358                 nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
 359                 nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
 360         }
 361 
 362         mutex_lock(&pf->lock);
 363         nfp_app_clean(pf->app);
 364         mutex_unlock(&pf->lock);
 365 
 366         nfp_app_free(pf->app);
 367         pf->app = NULL;
 368 }
 369 
 370 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
 371 {
 372         int err;
 373 
 374         if (!pf->ctrl_vnic)
 375                 return 0;
 376         err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
 377         if (err)
 378                 return err;
 379 
 380         err = nfp_ctrl_open(pf->ctrl_vnic);
 381         if (err)
 382                 goto err_clean_ctrl;
 383 
 384         return 0;
 385 
 386 err_clean_ctrl:
 387         nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
 388         return err;
 389 }
 390 
 391 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
 392 {
 393         if (!pf->ctrl_vnic)
 394                 return;
 395         nfp_ctrl_close(pf->ctrl_vnic);
 396         nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
 397 }
 398 
 399 static int nfp_net_pf_app_start(struct nfp_pf *pf)
 400 {
 401         int err;
 402 
 403         err = nfp_net_pf_app_start_ctrl(pf);
 404         if (err)
 405                 return err;
 406 
 407         err = nfp_app_start(pf->app, pf->ctrl_vnic);
 408         if (err)
 409                 goto err_ctrl_stop;
 410 
 411         if (pf->num_vfs) {
 412                 err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
 413                 if (err)
 414                         goto err_app_stop;
 415         }
 416 
 417         return 0;
 418 
 419 err_app_stop:
 420         nfp_app_stop(pf->app);
 421 err_ctrl_stop:
 422         nfp_net_pf_app_stop_ctrl(pf);
 423         return err;
 424 }
 425 
 426 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
 427 {
 428         if (pf->num_vfs)
 429                 nfp_app_sriov_disable(pf->app);
 430         nfp_app_stop(pf->app);
 431         nfp_net_pf_app_stop_ctrl(pf);
 432 }
 433 
 434 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
 435 {
 436         if (pf->vfcfg_tbl2_area)
 437                 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
 438         if (pf->vf_cfg_bar)
 439                 nfp_cpp_area_release_free(pf->vf_cfg_bar);
 440         if (pf->mac_stats_bar)
 441                 nfp_cpp_area_release_free(pf->mac_stats_bar);
 442         nfp_cpp_area_release_free(pf->qc_area);
 443         nfp_cpp_area_release_free(pf->data_vnic_bar);
 444 }
 445 
 446 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
 447 {
 448         u32 min_size, cpp_id;
 449         u8 __iomem *mem;
 450         int err;
 451 
 452         min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
 453         mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
 454                                min_size, &pf->data_vnic_bar);
 455         if (IS_ERR(mem)) {
 456                 nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
 457                 return PTR_ERR(mem);
 458         }
 459 
 460         if (pf->eth_tbl) {
 461                 min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
 462                 pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
 463                                                   "net.macstats", min_size,
 464                                                   &pf->mac_stats_bar);
 465                 if (IS_ERR(pf->mac_stats_mem)) {
 466                         if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
 467                                 err = PTR_ERR(pf->mac_stats_mem);
 468                                 goto err_unmap_ctrl;
 469                         }
 470                         pf->mac_stats_mem = NULL;
 471                 }
 472         }
 473 
 474         pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
 475                                           NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
 476                                           &pf->vf_cfg_bar);
 477         if (IS_ERR(pf->vf_cfg_mem)) {
 478                 if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
 479                         err = PTR_ERR(pf->vf_cfg_mem);
 480                         goto err_unmap_mac_stats;
 481                 }
 482                 pf->vf_cfg_mem = NULL;
 483         }
 484 
 485         min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
 486         pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
 487                                           "_pf%d_net_vf_cfg2",
 488                                           min_size, &pf->vfcfg_tbl2_area);
 489         if (IS_ERR(pf->vfcfg_tbl2)) {
 490                 if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
 491                         err = PTR_ERR(pf->vfcfg_tbl2);
 492                         goto err_unmap_vf_cfg;
 493                 }
 494                 pf->vfcfg_tbl2 = NULL;
 495         }
 496 
 497         cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
 498         mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
 499                                NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
 500         if (IS_ERR(mem)) {
 501                 nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
 502                 err = PTR_ERR(mem);
 503                 goto err_unmap_vfcfg_tbl2;
 504         }
 505 
 506         return 0;
 507 
 508 err_unmap_vfcfg_tbl2:
 509         if (pf->vfcfg_tbl2_area)
 510                 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
 511 err_unmap_vf_cfg:
 512         if (pf->vf_cfg_bar)
 513                 nfp_cpp_area_release_free(pf->vf_cfg_bar);
 514 err_unmap_mac_stats:
 515         if (pf->mac_stats_bar)
 516                 nfp_cpp_area_release_free(pf->mac_stats_bar);
 517 err_unmap_ctrl:
 518         nfp_cpp_area_release_free(pf->data_vnic_bar);
 519         return err;
 520 }
 521 
 522 static int
 523 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
 524                         struct nfp_eth_table *eth_table)
 525 {
 526         struct nfp_eth_table_port *eth_port;
 527 
 528         ASSERT_RTNL();
 529 
 530         eth_port = nfp_net_find_port(eth_table, port->eth_id);
 531         if (!eth_port) {
 532                 set_bit(NFP_PORT_CHANGED, &port->flags);
 533                 nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
 534                          port->eth_id);
 535                 return -EIO;
 536         }
 537         if (eth_port->override_changed) {
 538                 nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
 539                 port->type = NFP_PORT_INVALID;
 540         }
 541 
 542         memcpy(port->eth_port, eth_port, sizeof(*eth_port));
 543 
 544         return 0;
 545 }
 546 
 547 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
 548 {
 549         struct nfp_eth_table *eth_table;
 550         struct nfp_net *nn, *next;
 551         struct nfp_port *port;
 552         int err;
 553 
 554         lockdep_assert_held(&pf->lock);
 555 
 556         /* Check for nfp_net_pci_remove() racing against us */
 557         if (list_empty(&pf->vnics))
 558                 return 0;
 559 
 560         /* Update state of all ports */
 561         rtnl_lock();
 562         list_for_each_entry(port, &pf->ports, port_list)
 563                 clear_bit(NFP_PORT_CHANGED, &port->flags);
 564 
 565         eth_table = nfp_eth_read_ports(pf->cpp);
 566         if (!eth_table) {
 567                 list_for_each_entry(port, &pf->ports, port_list)
 568                         if (__nfp_port_get_eth_port(port))
 569                                 set_bit(NFP_PORT_CHANGED, &port->flags);
 570                 rtnl_unlock();
 571                 nfp_err(pf->cpp, "Error refreshing port config!\n");
 572                 return -EIO;
 573         }
 574 
 575         list_for_each_entry(port, &pf->ports, port_list)
 576                 if (__nfp_port_get_eth_port(port))
 577                         nfp_net_eth_port_update(pf->cpp, port, eth_table);
 578         rtnl_unlock();
 579 
 580         kfree(eth_table);
 581 
 582         /* Resync repr state. This may cause reprs to be removed. */
 583         err = nfp_reprs_resync_phys_ports(pf->app);
 584         if (err)
 585                 return err;
 586 
 587         /* Shoot off the ports which became invalid */
 588         list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
 589                 if (!nn->port || nn->port->type != NFP_PORT_INVALID)
 590                         continue;
 591 
 592                 nfp_net_pf_clean_vnic(pf, nn);
 593                 nfp_net_pf_free_vnic(pf, nn);
 594         }
 595 
 596         return 0;
 597 }
 598 
 599 static void nfp_net_refresh_vnics(struct work_struct *work)
 600 {
 601         struct nfp_pf *pf = container_of(work, struct nfp_pf,
 602                                          port_refresh_work);
 603 
 604         mutex_lock(&pf->lock);
 605         nfp_net_refresh_port_table_sync(pf);
 606         mutex_unlock(&pf->lock);
 607 }
 608 
 609 void nfp_net_refresh_port_table(struct nfp_port *port)
 610 {
 611         struct nfp_pf *pf = port->app->pf;
 612 
 613         set_bit(NFP_PORT_CHANGED, &port->flags);
 614 
 615         queue_work(pf->wq, &pf->port_refresh_work);
 616 }
 617 
 618 int nfp_net_refresh_eth_port(struct nfp_port *port)
 619 {
 620         struct nfp_cpp *cpp = port->app->cpp;
 621         struct nfp_eth_table *eth_table;
 622         int ret;
 623 
 624         clear_bit(NFP_PORT_CHANGED, &port->flags);
 625 
 626         eth_table = nfp_eth_read_ports(cpp);
 627         if (!eth_table) {
 628                 set_bit(NFP_PORT_CHANGED, &port->flags);
 629                 nfp_err(cpp, "Error refreshing port state table!\n");
 630                 return -EIO;
 631         }
 632 
 633         ret = nfp_net_eth_port_update(cpp, port, eth_table);
 634 
 635         kfree(eth_table);
 636 
 637         return ret;
 638 }
 639 
 640 /*
 641  * PCI device functions
 642  */
 643 int nfp_net_pci_probe(struct nfp_pf *pf)
 644 {
 645         struct devlink *devlink = priv_to_devlink(pf);
 646         struct nfp_net_fw_version fw_ver;
 647         u8 __iomem *ctrl_bar, *qc_bar;
 648         int stride;
 649         int err;
 650 
 651         INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
 652 
 653         if (!pf->rtbl) {
 654                 nfp_err(pf->cpp, "No %s, giving up.\n",
 655                         pf->fw_loaded ? "symbol table" : "firmware found");
 656                 return -EINVAL;
 657         }
 658 
 659         pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
 660         if ((int)pf->max_data_vnics < 0)
 661                 return pf->max_data_vnics;
 662 
 663         err = nfp_net_pci_map_mem(pf);
 664         if (err)
 665                 return err;
 666 
 667         ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
 668         qc_bar = nfp_cpp_area_iomem(pf->qc_area);
 669         if (!ctrl_bar || !qc_bar) {
 670                 err = -EIO;
 671                 goto err_unmap;
 672         }
 673 
 674         nfp_net_get_fw_version(&fw_ver, ctrl_bar);
 675         if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
 676                 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
 677                         fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
 678                 err = -EINVAL;
 679                 goto err_unmap;
 680         }
 681 
 682         /* Determine stride */
 683         if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
 684                 stride = 2;
 685                 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
 686         } else {
 687                 switch (fw_ver.major) {
 688                 case 1 ... 5:
 689                         stride = 4;
 690                         break;
 691                 default:
 692                         nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
 693                                 fw_ver.resv, fw_ver.class,
 694                                 fw_ver.major, fw_ver.minor);
 695                         err = -EINVAL;
 696                         goto err_unmap;
 697                 }
 698         }
 699 
 700         err = nfp_net_pf_app_init(pf, qc_bar, stride);
 701         if (err)
 702                 goto err_unmap;
 703 
 704         err = devlink_register(devlink, &pf->pdev->dev);
 705         if (err)
 706                 goto err_app_clean;
 707 
 708         err = nfp_shared_buf_register(pf);
 709         if (err)
 710                 goto err_devlink_unreg;
 711 
 712         err = nfp_devlink_params_register(pf);
 713         if (err)
 714                 goto err_shared_buf_unreg;
 715 
 716         mutex_lock(&pf->lock);
 717         pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
 718 
 719         /* Allocate the vnics and do basic init */
 720         err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
 721         if (err)
 722                 goto err_clean_ddir;
 723 
 724         err = nfp_net_pf_alloc_irqs(pf);
 725         if (err)
 726                 goto err_free_vnics;
 727 
 728         err = nfp_net_pf_app_start(pf);
 729         if (err)
 730                 goto err_free_irqs;
 731 
 732         err = nfp_net_pf_init_vnics(pf);
 733         if (err)
 734                 goto err_stop_app;
 735 
 736         mutex_unlock(&pf->lock);
 737 
 738         return 0;
 739 
 740 err_stop_app:
 741         nfp_net_pf_app_stop(pf);
 742 err_free_irqs:
 743         nfp_net_pf_free_irqs(pf);
 744 err_free_vnics:
 745         nfp_net_pf_free_vnics(pf);
 746 err_clean_ddir:
 747         nfp_net_debugfs_dir_clean(&pf->ddir);
 748         mutex_unlock(&pf->lock);
 749         nfp_devlink_params_unregister(pf);
 750 err_shared_buf_unreg:
 751         nfp_shared_buf_unregister(pf);
 752 err_devlink_unreg:
 753         cancel_work_sync(&pf->port_refresh_work);
 754         devlink_unregister(devlink);
 755 err_app_clean:
 756         nfp_net_pf_app_clean(pf);
 757 err_unmap:
 758         nfp_net_pci_unmap_mem(pf);
 759         return err;
 760 }
 761 
 762 void nfp_net_pci_remove(struct nfp_pf *pf)
 763 {
 764         struct nfp_net *nn, *next;
 765 
 766         mutex_lock(&pf->lock);
 767         list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
 768                 if (!nfp_net_is_data_vnic(nn))
 769                         continue;
 770                 nfp_net_pf_clean_vnic(pf, nn);
 771                 nfp_net_pf_free_vnic(pf, nn);
 772         }
 773 
 774         nfp_net_pf_app_stop(pf);
 775         /* stop app first, to avoid double free of ctrl vNIC's ddir */
 776         nfp_net_debugfs_dir_clean(&pf->ddir);
 777 
 778         mutex_unlock(&pf->lock);
 779 
 780         nfp_devlink_params_unregister(pf);
 781         nfp_shared_buf_unregister(pf);
 782         devlink_unregister(priv_to_devlink(pf));
 783 
 784         nfp_net_pf_free_irqs(pf);
 785         nfp_net_pf_app_clean(pf);
 786         nfp_net_pci_unmap_mem(pf);
 787 
 788         cancel_work_sync(&pf->port_refresh_work);
 789 }

/* [<][>][^][v][top][bottom][index][help] */