root/drivers/net/ethernet/marvell/octeontx2/af/cgx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cgx_write
  2. cgx_read
  3. lmac_pdata
  4. cgx_get_cgxcnt_max
  5. cgx_get_lmac_cnt
  6. cgx_get_pdata
  7. cgx_get_link_info
  8. mac2u64
  9. cgx_lmac_addr_set
  10. cgx_lmac_addr_get
  11. cgx_set_pkind
  12. cgx_get_lmac_type
  13. cgx_lmac_internal_loopback
  14. cgx_lmac_promisc_config
  15. cgx_get_rx_stats
  16. cgx_get_tx_stats
  17. cgx_lmac_rx_tx_enable
  18. cgx_fwi_cmd_send
  19. cgx_fwi_cmd_generic
  20. cgx_link_usertable_init
  21. link_status_user_format
  22. cgx_link_change_handler
  23. cgx_cmdresp_is_linkevent
  24. cgx_event_is_linkevent
  25. cgx_fwi_get_mkex_prfl_sz
  26. cgx_fwi_get_mkex_prfl_addr
  27. cgx_get_mkex_prfl_info
  28. cgx_fwi_event_handler
  29. cgx_lmac_evh_register
  30. cgx_lmac_evh_unregister
  31. cgx_fwi_link_change
  32. cgx_fwi_read_version
  33. cgx_lmac_verify_fwi_version
  34. cgx_lmac_linkup_work
  35. cgx_lmac_linkup_start
  36. cgx_lmac_init
  37. cgx_lmac_exit
  38. cgx_probe
  39. cgx_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Marvell OcteonTx2 CGX driver
   3  *
   4  * Copyright (C) 2018 Marvell International Ltd.
   5  *
   6  * This program is free software; you can redistribute it and/or modify
   7  * it under the terms of the GNU General Public License version 2 as
   8  * published by the Free Software Foundation.
   9  */
  10 
  11 #include <linux/acpi.h>
  12 #include <linux/module.h>
  13 #include <linux/interrupt.h>
  14 #include <linux/pci.h>
  15 #include <linux/netdevice.h>
  16 #include <linux/etherdevice.h>
  17 #include <linux/phy.h>
  18 #include <linux/of.h>
  19 #include <linux/of_mdio.h>
  20 #include <linux/of_net.h>
  21 
  22 #include "cgx.h"
  23 
  24 #define DRV_NAME        "octeontx2-cgx"
  25 #define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
  26 
  27 /**
  28  * struct lmac
  29  * @wq_cmd_cmplt:       waitq to keep the process blocked until cmd completion
  30  * @cmd_lock:           Lock to serialize the command interface
  31  * @resp:               command response
  32  * @link_info:          link related information
  33  * @event_cb:           callback for linkchange events
  34  * @event_cb_lock:      lock for serializing callback with unregister
  35  * @cmd_pend:           flag set before new command is started
  36  *                      flag cleared after command response is received
  37  * @cgx:                parent cgx port
  38  * @lmac_id:            lmac port id
  39  * @name:               lmac port name
  40  */
  41 struct lmac {
  42         wait_queue_head_t wq_cmd_cmplt;
  43         struct mutex cmd_lock;
  44         u64 resp;
  45         struct cgx_link_user_info link_info;
  46         struct cgx_event_cb event_cb;
  47         spinlock_t event_cb_lock;
  48         bool cmd_pend;
  49         struct cgx *cgx;
  50         u8 lmac_id;
  51         char *name;
  52 };
  53 
  54 struct cgx {
  55         void __iomem            *reg_base;
  56         struct pci_dev          *pdev;
  57         u8                      cgx_id;
  58         u8                      lmac_count;
  59         struct lmac             *lmac_idmap[MAX_LMAC_PER_CGX];
  60         struct                  work_struct cgx_cmd_work;
  61         struct                  workqueue_struct *cgx_cmd_workq;
  62         struct list_head        cgx_list;
  63 };
  64 
  65 static LIST_HEAD(cgx_list);
  66 
  67 /* Convert firmware speed encoding to user format(Mbps) */
  68 static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
  69 
  70 /* Convert firmware lmac type encoding to string */
  71 static char *cgx_lmactype_string[LMAC_MODE_MAX];
  72 
  73 /* CGX PHY management internal APIs */
  74 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
  75 
  76 /* Supported devices */
  77 static const struct pci_device_id cgx_id_table[] = {
  78         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
  79         { 0, }  /* end of table */
  80 };
  81 
  82 MODULE_DEVICE_TABLE(pci, cgx_id_table);
  83 
  84 static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
  85 {
  86         writeq(val, cgx->reg_base + (lmac << 18) + offset);
  87 }
  88 
  89 static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
  90 {
  91         return readq(cgx->reg_base + (lmac << 18) + offset);
  92 }
  93 
  94 static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
  95 {
  96         if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
  97                 return NULL;
  98 
  99         return cgx->lmac_idmap[lmac_id];
 100 }
 101 
 102 int cgx_get_cgxcnt_max(void)
 103 {
 104         struct cgx *cgx_dev;
 105         int idmax = -ENODEV;
 106 
 107         list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
 108                 if (cgx_dev->cgx_id > idmax)
 109                         idmax = cgx_dev->cgx_id;
 110 
 111         if (idmax < 0)
 112                 return 0;
 113 
 114         return idmax + 1;
 115 }
 116 EXPORT_SYMBOL(cgx_get_cgxcnt_max);
 117 
 118 int cgx_get_lmac_cnt(void *cgxd)
 119 {
 120         struct cgx *cgx = cgxd;
 121 
 122         if (!cgx)
 123                 return -ENODEV;
 124 
 125         return cgx->lmac_count;
 126 }
 127 EXPORT_SYMBOL(cgx_get_lmac_cnt);
 128 
 129 void *cgx_get_pdata(int cgx_id)
 130 {
 131         struct cgx *cgx_dev;
 132 
 133         list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
 134                 if (cgx_dev->cgx_id == cgx_id)
 135                         return cgx_dev;
 136         }
 137         return NULL;
 138 }
 139 EXPORT_SYMBOL(cgx_get_pdata);
 140 
 141 /* Ensure the required lock for event queue(where asynchronous events are
 142  * posted) is acquired before calling this API. Else an asynchronous event(with
 143  * latest link status) can reach the destination before this function returns
 144  * and could make the link status appear wrong.
 145  */
 146 int cgx_get_link_info(void *cgxd, int lmac_id,
 147                       struct cgx_link_user_info *linfo)
 148 {
 149         struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
 150 
 151         if (!lmac)
 152                 return -ENODEV;
 153 
 154         *linfo = lmac->link_info;
 155         return 0;
 156 }
 157 EXPORT_SYMBOL(cgx_get_link_info);
 158 
 159 static u64 mac2u64 (u8 *mac_addr)
 160 {
 161         u64 mac = 0;
 162         int index;
 163 
 164         for (index = ETH_ALEN - 1; index >= 0; index--)
 165                 mac |= ((u64)*mac_addr++) << (8 * index);
 166         return mac;
 167 }
 168 
 169 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 170 {
 171         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 172         u64 cfg;
 173 
 174         /* copy 6bytes from macaddr */
 175         /* memcpy(&cfg, mac_addr, 6); */
 176 
 177         cfg = mac2u64 (mac_addr);
 178 
 179         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
 180                   cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
 181 
 182         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 183         cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
 184         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 185 
 186         return 0;
 187 }
 188 EXPORT_SYMBOL(cgx_lmac_addr_set);
 189 
 190 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 191 {
 192         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 193         u64 cfg;
 194 
 195         cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
 196         return cfg & CGX_RX_DMAC_ADR_MASK;
 197 }
 198 EXPORT_SYMBOL(cgx_lmac_addr_get);
 199 
 200 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
 201 {
 202         struct cgx *cgx = cgxd;
 203 
 204         if (!cgx || lmac_id >= cgx->lmac_count)
 205                 return -ENODEV;
 206 
 207         cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
 208         return 0;
 209 }
 210 EXPORT_SYMBOL(cgx_set_pkind);
 211 
 212 static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
 213 {
 214         u64 cfg;
 215 
 216         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 217         return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
 218 }
 219 
 220 /* Configure CGX LMAC in internal loopback mode */
 221 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
 222 {
 223         struct cgx *cgx = cgxd;
 224         u8 lmac_type;
 225         u64 cfg;
 226 
 227         if (!cgx || lmac_id >= cgx->lmac_count)
 228                 return -ENODEV;
 229 
 230         lmac_type = cgx_get_lmac_type(cgx, lmac_id);
 231         if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
 232                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
 233                 if (enable)
 234                         cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
 235                 else
 236                         cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
 237                 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
 238         } else {
 239                 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
 240                 if (enable)
 241                         cfg |= CGXX_SPUX_CONTROL1_LBK;
 242                 else
 243                         cfg &= ~CGXX_SPUX_CONTROL1_LBK;
 244                 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
 245         }
 246         return 0;
 247 }
 248 EXPORT_SYMBOL(cgx_lmac_internal_loopback);
 249 
 250 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 251 {
 252         struct cgx *cgx = cgx_get_pdata(cgx_id);
 253         u64 cfg = 0;
 254 
 255         if (!cgx)
 256                 return;
 257 
 258         if (enable) {
 259                 /* Enable promiscuous mode on LMAC */
 260                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 261                 cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
 262                 cfg |= CGX_DMAC_BCAST_MODE;
 263                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 264 
 265                 cfg = cgx_read(cgx, 0,
 266                                (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
 267                 cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
 268                 cgx_write(cgx, 0,
 269                           (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
 270         } else {
 271                 /* Disable promiscuous mode */
 272                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 273                 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
 274                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 275                 cfg = cgx_read(cgx, 0,
 276                                (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
 277                 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 278                 cgx_write(cgx, 0,
 279                           (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
 280         }
 281 }
 282 EXPORT_SYMBOL(cgx_lmac_promisc_config);
 283 
 284 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
 285 {
 286         struct cgx *cgx = cgxd;
 287 
 288         if (!cgx || lmac_id >= cgx->lmac_count)
 289                 return -ENODEV;
 290         *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
 291         return 0;
 292 }
 293 EXPORT_SYMBOL(cgx_get_rx_stats);
 294 
 295 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
 296 {
 297         struct cgx *cgx = cgxd;
 298 
 299         if (!cgx || lmac_id >= cgx->lmac_count)
 300                 return -ENODEV;
 301         *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
 302         return 0;
 303 }
 304 EXPORT_SYMBOL(cgx_get_tx_stats);
 305 
 306 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 307 {
 308         struct cgx *cgx = cgxd;
 309         u64 cfg;
 310 
 311         if (!cgx || lmac_id >= cgx->lmac_count)
 312                 return -ENODEV;
 313 
 314         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 315         if (enable)
 316                 cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
 317         else
 318                 cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
 319         cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 320         return 0;
 321 }
 322 EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
 323 
 324 /* CGX Firmware interface low level support */
 325 static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
 326 {
 327         struct cgx *cgx = lmac->cgx;
 328         struct device *dev;
 329         int err = 0;
 330         u64 cmd;
 331 
 332         /* Ensure no other command is in progress */
 333         err = mutex_lock_interruptible(&lmac->cmd_lock);
 334         if (err)
 335                 return err;
 336 
 337         /* Ensure command register is free */
 338         cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
 339         if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
 340                 err = -EBUSY;
 341                 goto unlock;
 342         }
 343 
 344         /* Update ownership in command request */
 345         req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
 346 
 347         /* Mark this lmac as pending, before we start */
 348         lmac->cmd_pend = true;
 349 
 350         /* Start command in hardware */
 351         cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
 352 
 353         /* Ensure command is completed without errors */
 354         if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
 355                                 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
 356                 dev = &cgx->pdev->dev;
 357                 dev_err(dev, "cgx port %d:%d cmd timeout\n",
 358                         cgx->cgx_id, lmac->lmac_id);
 359                 err = -EIO;
 360                 goto unlock;
 361         }
 362 
 363         /* we have a valid command response */
 364         smp_rmb(); /* Ensure the latest updates are visible */
 365         *resp = lmac->resp;
 366 
 367 unlock:
 368         mutex_unlock(&lmac->cmd_lock);
 369 
 370         return err;
 371 }
 372 
 373 static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
 374                                       struct cgx *cgx, int lmac_id)
 375 {
 376         struct lmac *lmac;
 377         int err;
 378 
 379         lmac = lmac_pdata(lmac_id, cgx);
 380         if (!lmac)
 381                 return -ENODEV;
 382 
 383         err = cgx_fwi_cmd_send(req, resp, lmac);
 384 
 385         /* Check for valid response */
 386         if (!err) {
 387                 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
 388                         return -EIO;
 389                 else
 390                         return 0;
 391         }
 392 
 393         return err;
 394 }
 395 
 396 static inline void cgx_link_usertable_init(void)
 397 {
 398         cgx_speed_mbps[CGX_LINK_NONE] = 0;
 399         cgx_speed_mbps[CGX_LINK_10M] = 10;
 400         cgx_speed_mbps[CGX_LINK_100M] = 100;
 401         cgx_speed_mbps[CGX_LINK_1G] = 1000;
 402         cgx_speed_mbps[CGX_LINK_2HG] = 2500;
 403         cgx_speed_mbps[CGX_LINK_5G] = 5000;
 404         cgx_speed_mbps[CGX_LINK_10G] = 10000;
 405         cgx_speed_mbps[CGX_LINK_20G] = 20000;
 406         cgx_speed_mbps[CGX_LINK_25G] = 25000;
 407         cgx_speed_mbps[CGX_LINK_40G] = 40000;
 408         cgx_speed_mbps[CGX_LINK_50G] = 50000;
 409         cgx_speed_mbps[CGX_LINK_100G] = 100000;
 410 
 411         cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
 412         cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
 413         cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
 414         cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
 415         cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
 416         cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
 417         cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
 418         cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
 419         cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
 420         cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
 421 }
 422 
 423 static inline void link_status_user_format(u64 lstat,
 424                                            struct cgx_link_user_info *linfo,
 425                                            struct cgx *cgx, u8 lmac_id)
 426 {
 427         char *lmac_string;
 428 
 429         linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
 430         linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
 431         linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
 432         linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
 433         lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
 434         strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
 435 }
 436 
 437 /* Hardware event handlers */
 438 static inline void cgx_link_change_handler(u64 lstat,
 439                                            struct lmac *lmac)
 440 {
 441         struct cgx_link_user_info *linfo;
 442         struct cgx *cgx = lmac->cgx;
 443         struct cgx_link_event event;
 444         struct device *dev;
 445         int err_type;
 446 
 447         dev = &cgx->pdev->dev;
 448 
 449         link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
 450         err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
 451 
 452         event.cgx_id = cgx->cgx_id;
 453         event.lmac_id = lmac->lmac_id;
 454 
 455         /* update the local copy of link status */
 456         lmac->link_info = event.link_uinfo;
 457         linfo = &lmac->link_info;
 458 
 459         /* Ensure callback doesn't get unregistered until we finish it */
 460         spin_lock(&lmac->event_cb_lock);
 461 
 462         if (!lmac->event_cb.notify_link_chg) {
 463                 dev_dbg(dev, "cgx port %d:%d Link change handler null",
 464                         cgx->cgx_id, lmac->lmac_id);
 465                 if (err_type != CGX_ERR_NONE) {
 466                         dev_err(dev, "cgx port %d:%d Link error %d\n",
 467                                 cgx->cgx_id, lmac->lmac_id, err_type);
 468                 }
 469                 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
 470                          cgx->cgx_id, lmac->lmac_id,
 471                          linfo->link_up ? "UP" : "DOWN", linfo->speed);
 472                 goto err;
 473         }
 474 
 475         if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
 476                 dev_err(dev, "event notification failure\n");
 477 err:
 478         spin_unlock(&lmac->event_cb_lock);
 479 }
 480 
 481 static inline bool cgx_cmdresp_is_linkevent(u64 event)
 482 {
 483         u8 id;
 484 
 485         id = FIELD_GET(EVTREG_ID, event);
 486         if (id == CGX_CMD_LINK_BRING_UP ||
 487             id == CGX_CMD_LINK_BRING_DOWN)
 488                 return true;
 489         else
 490                 return false;
 491 }
 492 
 493 static inline bool cgx_event_is_linkevent(u64 event)
 494 {
 495         if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
 496                 return true;
 497         else
 498                 return false;
 499 }
 500 
 501 static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
 502                                            struct cgx *cgx)
 503 {
 504         u64 req = 0;
 505         u64 resp;
 506         int err;
 507 
 508         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
 509         err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
 510         if (!err)
 511                 *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
 512 
 513         return err;
 514 }
 515 
 516 static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
 517                                              struct cgx *cgx)
 518 {
 519         u64 req = 0;
 520         u64 resp;
 521         int err;
 522 
 523         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
 524         err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
 525         if (!err)
 526                 *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
 527 
 528         return err;
 529 }
 530 
 531 int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
 532 {
 533         struct cgx *cgx_dev;
 534         int err;
 535 
 536         if (!addr || !size)
 537                 return -EINVAL;
 538 
 539         cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
 540         if (!cgx_dev)
 541                 return -ENXIO;
 542 
 543         err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
 544         if (err)
 545                 return -EIO;
 546 
 547         err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
 548         if (err)
 549                 return -EIO;
 550 
 551         return 0;
 552 }
 553 EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
 554 
 555 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
 556 {
 557         struct lmac *lmac = data;
 558         struct cgx *cgx;
 559         u64 event;
 560 
 561         cgx = lmac->cgx;
 562 
 563         event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
 564 
 565         if (!FIELD_GET(EVTREG_ACK, event))
 566                 return IRQ_NONE;
 567 
 568         switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
 569         case CGX_EVT_CMD_RESP:
 570                 /* Copy the response. Since only one command is active at a
 571                  * time, there is no way a response can get overwritten
 572                  */
 573                 lmac->resp = event;
 574                 /* Ensure response is updated before thread context starts */
 575                 smp_wmb();
 576 
 577                 /* There wont be separate events for link change initiated from
 578                  * software; Hence report the command responses as events
 579                  */
 580                 if (cgx_cmdresp_is_linkevent(event))
 581                         cgx_link_change_handler(event, lmac);
 582 
 583                 /* Release thread waiting for completion  */
 584                 lmac->cmd_pend = false;
 585                 wake_up_interruptible(&lmac->wq_cmd_cmplt);
 586                 break;
 587         case CGX_EVT_ASYNC:
 588                 if (cgx_event_is_linkevent(event))
 589                         cgx_link_change_handler(event, lmac);
 590                 break;
 591         }
 592 
 593         /* Any new event or command response will be posted by firmware
 594          * only after the current status is acked.
 595          * Ack the interrupt register as well.
 596          */
 597         cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
 598         cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
 599 
 600         return IRQ_HANDLED;
 601 }
 602 
 603 /* APIs for PHY management using CGX firmware interface */
 604 
 605 /* callback registration for hardware events like link change */
 606 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
 607 {
 608         struct cgx *cgx = cgxd;
 609         struct lmac *lmac;
 610 
 611         lmac = lmac_pdata(lmac_id, cgx);
 612         if (!lmac)
 613                 return -ENODEV;
 614 
 615         lmac->event_cb = *cb;
 616 
 617         return 0;
 618 }
 619 EXPORT_SYMBOL(cgx_lmac_evh_register);
 620 
 621 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
 622 {
 623         struct lmac *lmac;
 624         unsigned long flags;
 625         struct cgx *cgx = cgxd;
 626 
 627         lmac = lmac_pdata(lmac_id, cgx);
 628         if (!lmac)
 629                 return -ENODEV;
 630 
 631         spin_lock_irqsave(&lmac->event_cb_lock, flags);
 632         lmac->event_cb.notify_link_chg = NULL;
 633         lmac->event_cb.data = NULL;
 634         spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
 635 
 636         return 0;
 637 }
 638 EXPORT_SYMBOL(cgx_lmac_evh_unregister);
 639 
 640 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
 641 {
 642         u64 req = 0;
 643         u64 resp;
 644 
 645         if (enable)
 646                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
 647         else
 648                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
 649 
 650         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
 651 }
 652 
 653 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
 654 {
 655         u64 req = 0;
 656 
 657         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
 658         return cgx_fwi_cmd_generic(req, resp, cgx, 0);
 659 }
 660 
 661 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
 662 {
 663         struct device *dev = &cgx->pdev->dev;
 664         int major_ver, minor_ver;
 665         u64 resp;
 666         int err;
 667 
 668         if (!cgx->lmac_count)
 669                 return 0;
 670 
 671         err = cgx_fwi_read_version(&resp, cgx);
 672         if (err)
 673                 return err;
 674 
 675         major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
 676         minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
 677         dev_dbg(dev, "Firmware command interface version = %d.%d\n",
 678                 major_ver, minor_ver);
 679         if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
 680             minor_ver != CGX_FIRMWARE_MINOR_VER)
 681                 return -EIO;
 682         else
 683                 return 0;
 684 }
 685 
 686 static void cgx_lmac_linkup_work(struct work_struct *work)
 687 {
 688         struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
 689         struct device *dev = &cgx->pdev->dev;
 690         int i, err;
 691 
 692         /* Do Link up for all the lmacs */
 693         for (i = 0; i < cgx->lmac_count; i++) {
 694                 err = cgx_fwi_link_change(cgx, i, true);
 695                 if (err)
 696                         dev_info(dev, "cgx port %d:%d Link up command failed\n",
 697                                  cgx->cgx_id, i);
 698         }
 699 }
 700 
 701 int cgx_lmac_linkup_start(void *cgxd)
 702 {
 703         struct cgx *cgx = cgxd;
 704 
 705         if (!cgx)
 706                 return -ENODEV;
 707 
 708         queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
 709 
 710         return 0;
 711 }
 712 EXPORT_SYMBOL(cgx_lmac_linkup_start);
 713 
 714 static int cgx_lmac_init(struct cgx *cgx)
 715 {
 716         struct lmac *lmac;
 717         int i, err;
 718 
 719         cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
 720         if (cgx->lmac_count > MAX_LMAC_PER_CGX)
 721                 cgx->lmac_count = MAX_LMAC_PER_CGX;
 722 
 723         for (i = 0; i < cgx->lmac_count; i++) {
 724                 lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
 725                 if (!lmac)
 726                         return -ENOMEM;
 727                 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
 728                 if (!lmac->name)
 729                         return -ENOMEM;
 730                 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
 731                 lmac->lmac_id = i;
 732                 lmac->cgx = cgx;
 733                 init_waitqueue_head(&lmac->wq_cmd_cmplt);
 734                 mutex_init(&lmac->cmd_lock);
 735                 spin_lock_init(&lmac->event_cb_lock);
 736                 err = request_irq(pci_irq_vector(cgx->pdev,
 737                                                  CGX_LMAC_FWI + i * 9),
 738                                    cgx_fwi_event_handler, 0, lmac->name, lmac);
 739                 if (err)
 740                         return err;
 741 
 742                 /* Enable interrupt */
 743                 cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
 744                           FW_CGX_INT);
 745 
 746                 /* Add reference */
 747                 cgx->lmac_idmap[i] = lmac;
 748         }
 749 
 750         return cgx_lmac_verify_fwi_version(cgx);
 751 }
 752 
 753 static int cgx_lmac_exit(struct cgx *cgx)
 754 {
 755         struct lmac *lmac;
 756         int i;
 757 
 758         if (cgx->cgx_cmd_workq) {
 759                 flush_workqueue(cgx->cgx_cmd_workq);
 760                 destroy_workqueue(cgx->cgx_cmd_workq);
 761                 cgx->cgx_cmd_workq = NULL;
 762         }
 763 
 764         /* Free all lmac related resources */
 765         for (i = 0; i < cgx->lmac_count; i++) {
 766                 lmac = cgx->lmac_idmap[i];
 767                 if (!lmac)
 768                         continue;
 769                 free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
 770                 kfree(lmac->name);
 771                 kfree(lmac);
 772         }
 773 
 774         return 0;
 775 }
 776 
 777 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 778 {
 779         struct device *dev = &pdev->dev;
 780         struct cgx *cgx;
 781         int err, nvec;
 782 
 783         cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
 784         if (!cgx)
 785                 return -ENOMEM;
 786         cgx->pdev = pdev;
 787 
 788         pci_set_drvdata(pdev, cgx);
 789 
 790         err = pci_enable_device(pdev);
 791         if (err) {
 792                 dev_err(dev, "Failed to enable PCI device\n");
 793                 pci_set_drvdata(pdev, NULL);
 794                 return err;
 795         }
 796 
 797         err = pci_request_regions(pdev, DRV_NAME);
 798         if (err) {
 799                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
 800                 goto err_disable_device;
 801         }
 802 
 803         /* MAP configuration registers */
 804         cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
 805         if (!cgx->reg_base) {
 806                 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
 807                 err = -ENOMEM;
 808                 goto err_release_regions;
 809         }
 810 
 811         nvec = CGX_NVEC;
 812         err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
 813         if (err < 0 || err != nvec) {
 814                 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
 815                         nvec, err);
 816                 goto err_release_regions;
 817         }
 818 
 819         cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
 820                 & CGX_ID_MASK;
 821 
 822         /* init wq for processing linkup requests */
 823         INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
 824         cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
 825         if (!cgx->cgx_cmd_workq) {
 826                 dev_err(dev, "alloc workqueue failed for cgx cmd");
 827                 err = -ENOMEM;
 828                 goto err_free_irq_vectors;
 829         }
 830 
 831         list_add(&cgx->cgx_list, &cgx_list);
 832 
 833         cgx_link_usertable_init();
 834 
 835         err = cgx_lmac_init(cgx);
 836         if (err)
 837                 goto err_release_lmac;
 838 
 839         return 0;
 840 
 841 err_release_lmac:
 842         cgx_lmac_exit(cgx);
 843         list_del(&cgx->cgx_list);
 844 err_free_irq_vectors:
 845         pci_free_irq_vectors(pdev);
 846 err_release_regions:
 847         pci_release_regions(pdev);
 848 err_disable_device:
 849         pci_disable_device(pdev);
 850         pci_set_drvdata(pdev, NULL);
 851         return err;
 852 }
 853 
 854 static void cgx_remove(struct pci_dev *pdev)
 855 {
 856         struct cgx *cgx = pci_get_drvdata(pdev);
 857 
 858         cgx_lmac_exit(cgx);
 859         list_del(&cgx->cgx_list);
 860         pci_free_irq_vectors(pdev);
 861         pci_release_regions(pdev);
 862         pci_disable_device(pdev);
 863         pci_set_drvdata(pdev, NULL);
 864 }
 865 
 866 struct pci_driver cgx_driver = {
 867         .name = DRV_NAME,
 868         .id_table = cgx_id_table,
 869         .probe = cgx_probe,
 870         .remove = cgx_remove,
 871 };

/* [<][>][^][v][top][bottom][index][help] */