1/* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9#include <linux/acpi.h> 10#include <linux/module.h> 11#include <linux/interrupt.h> 12#include <linux/pci.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/phy.h> 16#include <linux/of.h> 17#include <linux/of_mdio.h> 18#include <linux/of_net.h> 19 20#include "nic_reg.h" 21#include "nic.h" 22#include "thunder_bgx.h" 23 24#define DRV_NAME "thunder-BGX" 25#define DRV_VERSION "1.0" 26 27struct lmac { 28 struct bgx *bgx; 29 int dmac; 30 u8 mac[ETH_ALEN]; 31 bool link_up; 32 int lmacid; /* ID within BGX */ 33 int lmacid_bd; /* ID on board */ 34 struct net_device netdev; 35 struct phy_device *phydev; 36 unsigned int last_duplex; 37 unsigned int last_link; 38 unsigned int last_speed; 39 bool is_sgmii; 40 struct delayed_work dwork; 41 struct workqueue_struct *check_link; 42}; 43 44struct bgx { 45 u8 bgx_id; 46 u8 qlm_mode; 47 struct lmac lmac[MAX_LMAC_PER_BGX]; 48 int lmac_count; 49 int lmac_type; 50 int lane_to_sds; 51 int use_training; 52 void __iomem *reg_base; 53 struct pci_dev *pdev; 54}; 55 56static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 57static int lmac_count; /* Total no of LMACs in system */ 58 59static int bgx_xaui_check_link(struct lmac *lmac); 60 61/* Supported devices */ 62static const struct pci_device_id bgx_id_table[] = { 63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, 64 { 0, } /* end of table */ 65}; 66 67MODULE_AUTHOR("Cavium Inc"); 68MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); 69MODULE_LICENSE("GPL v2"); 70MODULE_VERSION(DRV_VERSION); 71MODULE_DEVICE_TABLE(pci, bgx_id_table); 72 73/* The Cavium ThunderX network controller can *only* be found in SoCs 74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 75 * registers on this platform are implicitly strongly ordered with respect 76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 77 * with no memory barriers in this driver. The readq()/writeq() functions add 78 * explicit ordering operation which in this case are redundant, and only 79 * add overhead. 80 */ 81 82/* Register read/write APIs */ 83static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) 84{ 85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 86 87 return readq_relaxed(addr); 88} 89 90static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 91{ 92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 93 94 writeq_relaxed(val, addr); 95} 96 97static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 98{ 99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 100 101 writeq_relaxed(val | readq_relaxed(addr), addr); 102} 103 104static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) 105{ 106 int timeout = 100; 107 u64 reg_val; 108 109 while (timeout) { 110 reg_val = bgx_reg_read(bgx, lmac, reg); 111 if (zero && !(reg_val & mask)) 112 return 0; 113 if (!zero && (reg_val & mask)) 114 return 0; 115 usleep_range(1000, 2000); 116 timeout--; 117 } 118 return 1; 119} 120 121/* Return number of BGX present in HW */ 122unsigned bgx_get_map(int node) 123{ 124 int i; 125 unsigned map = 0; 126 127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { 128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) 129 map |= (1 << i); 130 } 131 132 return map; 133} 134EXPORT_SYMBOL(bgx_get_map); 135 136/* Return number of LMAC configured for this BGX */ 137int bgx_get_lmac_count(int node, int bgx_idx) 138{ 139 struct bgx *bgx; 140 141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 142 if (bgx) 143 return bgx->lmac_count; 144 145 return 0; 146} 147EXPORT_SYMBOL(bgx_get_lmac_count); 148 149/* Returns the current link status of LMAC */ 150void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 151{ 152 struct bgx_link_status *link = (struct bgx_link_status *)status; 153 struct bgx *bgx; 154 struct lmac *lmac; 155 156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 157 if (!bgx) 158 return; 159 160 lmac = &bgx->lmac[lmacid]; 161 link->link_up = lmac->link_up; 162 link->duplex = lmac->last_duplex; 163 link->speed = lmac->last_speed; 164} 165EXPORT_SYMBOL(bgx_get_lmac_link_state); 166 167const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 168{ 169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 170 171 if (bgx) 172 return bgx->lmac[lmacid].mac; 173 174 return NULL; 175} 176EXPORT_SYMBOL(bgx_get_lmac_mac); 177 178void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 179{ 180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 181 182 if (!bgx) 183 return; 184 185 ether_addr_copy(bgx->lmac[lmacid].mac, mac); 186} 187EXPORT_SYMBOL(bgx_set_lmac_mac); 188 189void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 190{ 191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 192 u64 cfg; 193 194 if (!bgx) 195 return; 196 197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 198 if (enable) 199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; 200 else 201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 203} 204EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); 205 206static void bgx_sgmii_change_link_state(struct lmac *lmac) 207{ 208 struct bgx *bgx = lmac->bgx; 209 u64 cmr_cfg; 210 u64 port_cfg = 0; 211 u64 misc_ctl = 0; 212 213 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 214 cmr_cfg &= ~CMR_EN; 215 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 216 217 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 218 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 219 220 if (lmac->link_up) { 221 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 222 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 223 port_cfg |= (lmac->last_duplex << 2); 224 } else { 225 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 226 } 227 228 switch (lmac->last_speed) { 229 case 10: 230 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 231 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 232 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 233 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 234 misc_ctl |= 50; /* samp_pt */ 235 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 237 break; 238 case 100: 239 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 240 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 241 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 242 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 243 misc_ctl |= 5; /* samp_pt */ 244 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 245 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 246 break; 247 case 1000: 248 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 249 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 250 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 251 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 252 misc_ctl |= 1; /* samp_pt */ 253 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 254 if (lmac->last_duplex) 255 bgx_reg_write(bgx, lmac->lmacid, 256 BGX_GMP_GMI_TXX_BURST, 0); 257 else 258 bgx_reg_write(bgx, lmac->lmacid, 259 BGX_GMP_GMI_TXX_BURST, 8192); 260 break; 261 default: 262 break; 263 } 264 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 265 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 266 267 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 268 269 /* renable lmac */ 270 cmr_cfg |= CMR_EN; 271 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 272} 273 274static void bgx_lmac_handler(struct net_device *netdev) 275{ 276 struct lmac *lmac = container_of(netdev, struct lmac, netdev); 277 struct phy_device *phydev = lmac->phydev; 278 int link_changed = 0; 279 280 if (!lmac) 281 return; 282 283 if (!phydev->link && lmac->last_link) 284 link_changed = -1; 285 286 if (phydev->link && 287 (lmac->last_duplex != phydev->duplex || 288 lmac->last_link != phydev->link || 289 lmac->last_speed != phydev->speed)) { 290 link_changed = 1; 291 } 292 293 lmac->last_link = phydev->link; 294 lmac->last_speed = phydev->speed; 295 lmac->last_duplex = phydev->duplex; 296 297 if (!link_changed) 298 return; 299 300 if (link_changed > 0) 301 lmac->link_up = true; 302 else 303 lmac->link_up = false; 304 305 if (lmac->is_sgmii) 306 bgx_sgmii_change_link_state(lmac); 307 else 308 bgx_xaui_check_link(lmac); 309} 310 311u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 312{ 313 struct bgx *bgx; 314 315 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 316 if (!bgx) 317 return 0; 318 319 if (idx > 8) 320 lmac = 0; 321 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); 322} 323EXPORT_SYMBOL(bgx_get_rx_stats); 324 325u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 326{ 327 struct bgx *bgx; 328 329 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 330 if (!bgx) 331 return 0; 332 333 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); 334} 335EXPORT_SYMBOL(bgx_get_tx_stats); 336 337static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) 338{ 339 u64 offset; 340 341 while (bgx->lmac[lmac].dmac > 0) { 342 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + 343 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); 344 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); 345 bgx->lmac[lmac].dmac--; 346 } 347} 348 349/* Configure BGX LMAC in internal loopback mode */ 350void bgx_lmac_internal_loopback(int node, int bgx_idx, 351 int lmac_idx, bool enable) 352{ 353 struct bgx *bgx; 354 struct lmac *lmac; 355 u64 cfg; 356 357 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; 358 if (!bgx) 359 return; 360 361 lmac = &bgx->lmac[lmac_idx]; 362 if (lmac->is_sgmii) { 363 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 364 if (enable) 365 cfg |= PCS_MRX_CTL_LOOPBACK1; 366 else 367 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 368 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 369 } else { 370 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 371 if (enable) 372 cfg |= SPU_CTL_LOOPBACK; 373 else 374 cfg &= ~SPU_CTL_LOOPBACK; 375 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 376 } 377} 378EXPORT_SYMBOL(bgx_lmac_internal_loopback); 379 380static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) 381{ 382 u64 cfg; 383 384 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 385 /* max packet size */ 386 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 387 388 /* Disable frame alignment if using preamble */ 389 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 390 if (cfg & 1) 391 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 392 393 /* Enable lmac */ 394 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 395 396 /* PCS reset */ 397 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 398 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 399 PCS_MRX_CTL_RESET, true)) { 400 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); 401 return -1; 402 } 403 404 /* power down, reset autoneg, autoneg enable */ 405 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 406 cfg &= ~PCS_MRX_CTL_PWR_DN; 407 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 408 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 409 410 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 411 PCS_MRX_STATUS_AN_CPT, false)) { 412 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 413 return -1; 414 } 415 416 return 0; 417} 418 419static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) 420{ 421 u64 cfg; 422 423 /* Reset SPU */ 424 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 425 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 426 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 427 return -1; 428 } 429 430 /* Disable LMAC */ 431 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 432 cfg &= ~CMR_EN; 433 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 434 435 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 436 /* Set interleaved running disparity for RXAUI */ 437 if (bgx->lmac_type != BGX_MODE_RXAUI) 438 bgx_reg_modify(bgx, lmacid, 439 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 440 else 441 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 442 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); 443 444 /* clear all interrupts */ 445 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 446 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 447 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 448 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 449 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 450 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 451 452 if (bgx->use_training) { 453 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 454 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 455 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 456 /* training enable */ 457 bgx_reg_modify(bgx, lmacid, 458 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); 459 } 460 461 /* Append FCS to each packet */ 462 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 463 464 /* Disable forward error correction */ 465 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 466 cfg &= ~SPU_FEC_CTL_FEC_EN; 467 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 468 469 /* Disable autoneg */ 470 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 471 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 472 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 473 474 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 475 if (bgx->lmac_type == BGX_MODE_10G_KR) 476 cfg |= (1 << 23); 477 else if (bgx->lmac_type == BGX_MODE_40G_KR) 478 cfg |= (1 << 24); 479 else 480 cfg &= ~((1 << 23) | (1 << 24)); 481 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); 482 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 483 484 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 485 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 486 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 487 488 /* Enable lmac */ 489 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 490 491 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 492 cfg &= ~SPU_CTL_LOW_POWER; 493 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 494 495 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 496 cfg &= ~SMU_TX_CTL_UNI_EN; 497 cfg |= SMU_TX_CTL_DIC_EN; 498 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 499 500 /* take lmac_count into account */ 501 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 502 /* max packet size */ 503 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 504 505 return 0; 506} 507 508static int bgx_xaui_check_link(struct lmac *lmac) 509{ 510 struct bgx *bgx = lmac->bgx; 511 int lmacid = lmac->lmacid; 512 int lmac_type = bgx->lmac_type; 513 u64 cfg; 514 515 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); 516 if (bgx->use_training) { 517 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 518 if (!(cfg & (1ull << 13))) { 519 cfg = (1ull << 13) | (1ull << 14); 520 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 521 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 522 cfg |= (1ull << 0); 523 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 524 return -1; 525 } 526 } 527 528 /* wait for PCS to come out of reset */ 529 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 530 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 531 return -1; 532 } 533 534 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 535 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 536 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 537 SPU_BR_STATUS_BLK_LOCK, false)) { 538 dev_err(&bgx->pdev->dev, 539 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 540 return -1; 541 } 542 } else { 543 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 544 SPU_BX_STATUS_RX_ALIGN, false)) { 545 dev_err(&bgx->pdev->dev, 546 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 547 return -1; 548 } 549 } 550 551 /* Clear rcvflt bit (latching high) and read it back */ 552 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 553 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 554 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 555 if (bgx->use_training) { 556 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 557 if (!(cfg & (1ull << 13))) { 558 cfg = (1ull << 13) | (1ull << 14); 559 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 560 cfg = bgx_reg_read(bgx, lmacid, 561 BGX_SPUX_BR_PMD_CRTL); 562 cfg |= (1ull << 0); 563 bgx_reg_write(bgx, lmacid, 564 BGX_SPUX_BR_PMD_CRTL, cfg); 565 return -1; 566 } 567 } 568 return -1; 569 } 570 571 /* Wait for MAC RX to be ready */ 572 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, 573 SMU_RX_CTL_STATUS, true)) { 574 dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); 575 return -1; 576 } 577 578 /* Wait for BGX RX to be idle */ 579 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 580 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 581 return -1; 582 } 583 584 /* Wait for BGX TX to be idle */ 585 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { 586 dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); 587 return -1; 588 } 589 590 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 591 dev_err(&bgx->pdev->dev, "Receive fault\n"); 592 return -1; 593 } 594 595 /* Receive link is latching low. Force it high and verify it */ 596 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 597 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, 598 SPU_STATUS1_RCV_LNK, false)) { 599 dev_err(&bgx->pdev->dev, "SPU receive link down\n"); 600 return -1; 601 } 602 603 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 604 cfg &= ~SPU_MISC_CTL_RX_DIS; 605 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 606 return 0; 607} 608 609static void bgx_poll_for_link(struct work_struct *work) 610{ 611 struct lmac *lmac; 612 u64 link; 613 614 lmac = container_of(work, struct lmac, dwork.work); 615 616 /* Receive link is latching low. Force it high and verify it */ 617 bgx_reg_modify(lmac->bgx, lmac->lmacid, 618 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 619 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 620 SPU_STATUS1_RCV_LNK, false); 621 622 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 623 if (link & SPU_STATUS1_RCV_LNK) { 624 lmac->link_up = 1; 625 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) 626 lmac->last_speed = 40000; 627 else 628 lmac->last_speed = 10000; 629 lmac->last_duplex = 1; 630 } else { 631 lmac->link_up = 0; 632 lmac->last_speed = SPEED_UNKNOWN; 633 lmac->last_duplex = DUPLEX_UNKNOWN; 634 } 635 636 if (lmac->last_link != lmac->link_up) { 637 lmac->last_link = lmac->link_up; 638 if (lmac->link_up) 639 bgx_xaui_check_link(lmac); 640 } 641 642 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 643} 644 645static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 646{ 647 struct lmac *lmac; 648 u64 cfg; 649 650 lmac = &bgx->lmac[lmacid]; 651 lmac->bgx = bgx; 652 653 if (bgx->lmac_type == BGX_MODE_SGMII) { 654 lmac->is_sgmii = 1; 655 if (bgx_lmac_sgmii_init(bgx, lmacid)) 656 return -1; 657 } else { 658 lmac->is_sgmii = 0; 659 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) 660 return -1; 661 } 662 663 if (lmac->is_sgmii) { 664 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 665 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 666 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 667 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 668 } else { 669 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 670 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 671 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 672 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 673 } 674 675 /* Enable lmac */ 676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 677 678 /* Restore default cfg, incase low level firmware changed it */ 679 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 680 681 if ((bgx->lmac_type != BGX_MODE_XFI) && 682 (bgx->lmac_type != BGX_MODE_XLAUI) && 683 (bgx->lmac_type != BGX_MODE_40G_KR) && 684 (bgx->lmac_type != BGX_MODE_10G_KR)) { 685 if (!lmac->phydev) 686 return -ENODEV; 687 688 lmac->phydev->dev_flags = 0; 689 690 if (phy_connect_direct(&lmac->netdev, lmac->phydev, 691 bgx_lmac_handler, 692 PHY_INTERFACE_MODE_SGMII)) 693 return -ENODEV; 694 695 phy_start_aneg(lmac->phydev); 696 } else { 697 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 698 WQ_MEM_RECLAIM, 1); 699 if (!lmac->check_link) 700 return -ENOMEM; 701 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 702 queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 703 } 704 705 return 0; 706} 707 708static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 709{ 710 struct lmac *lmac; 711 u64 cmrx_cfg; 712 713 lmac = &bgx->lmac[lmacid]; 714 if (lmac->check_link) { 715 /* Destroy work queue */ 716 cancel_delayed_work_sync(&lmac->dwork); 717 destroy_workqueue(lmac->check_link); 718 } 719 720 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 721 cmrx_cfg &= ~(1 << 15); 722 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 723 bgx_flush_dmac_addrs(bgx, lmacid); 724 725 if ((bgx->lmac_type != BGX_MODE_XFI) && 726 (bgx->lmac_type != BGX_MODE_XLAUI) && 727 (bgx->lmac_type != BGX_MODE_40G_KR) && 728 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 729 phy_disconnect(lmac->phydev); 730 731 lmac->phydev = NULL; 732} 733 734static void bgx_set_num_ports(struct bgx *bgx) 735{ 736 u64 lmac_count; 737 738 switch (bgx->qlm_mode) { 739 case QLM_MODE_SGMII: 740 bgx->lmac_count = 4; 741 bgx->lmac_type = BGX_MODE_SGMII; 742 bgx->lane_to_sds = 0; 743 break; 744 case QLM_MODE_XAUI_1X4: 745 bgx->lmac_count = 1; 746 bgx->lmac_type = BGX_MODE_XAUI; 747 bgx->lane_to_sds = 0xE4; 748 break; 749 case QLM_MODE_RXAUI_2X2: 750 bgx->lmac_count = 2; 751 bgx->lmac_type = BGX_MODE_RXAUI; 752 bgx->lane_to_sds = 0xE4; 753 break; 754 case QLM_MODE_XFI_4X1: 755 bgx->lmac_count = 4; 756 bgx->lmac_type = BGX_MODE_XFI; 757 bgx->lane_to_sds = 0; 758 break; 759 case QLM_MODE_XLAUI_1X4: 760 bgx->lmac_count = 1; 761 bgx->lmac_type = BGX_MODE_XLAUI; 762 bgx->lane_to_sds = 0xE4; 763 break; 764 case QLM_MODE_10G_KR_4X1: 765 bgx->lmac_count = 4; 766 bgx->lmac_type = BGX_MODE_10G_KR; 767 bgx->lane_to_sds = 0; 768 bgx->use_training = 1; 769 break; 770 case QLM_MODE_40G_KR4_1X4: 771 bgx->lmac_count = 1; 772 bgx->lmac_type = BGX_MODE_40G_KR; 773 bgx->lane_to_sds = 0xE4; 774 bgx->use_training = 1; 775 break; 776 default: 777 bgx->lmac_count = 0; 778 break; 779 } 780 781 /* Check if low level firmware has programmed LMAC count 782 * based on board type, if yes consider that otherwise 783 * the default static values 784 */ 785 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 786 if (lmac_count != 4) 787 bgx->lmac_count = lmac_count; 788} 789 790static void bgx_init_hw(struct bgx *bgx) 791{ 792 int i; 793 794 bgx_set_num_ports(bgx); 795 796 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 797 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 798 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); 799 800 /* Set lmac type and lane2serdes mapping */ 801 for (i = 0; i < bgx->lmac_count; i++) { 802 if (bgx->lmac_type == BGX_MODE_RXAUI) { 803 if (i) 804 bgx->lane_to_sds = 0x0e; 805 else 806 bgx->lane_to_sds = 0x04; 807 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 808 (bgx->lmac_type << 8) | bgx->lane_to_sds); 809 continue; 810 } 811 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 812 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); 813 bgx->lmac[i].lmacid_bd = lmac_count; 814 lmac_count++; 815 } 816 817 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 818 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 819 820 /* Set the backpressure AND mask */ 821 for (i = 0; i < bgx->lmac_count; i++) 822 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 823 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << 824 (i * MAX_BGX_CHANS_PER_LMAC)); 825 826 /* Disable all MAC filtering */ 827 for (i = 0; i < RX_DMAC_COUNT; i++) 828 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 829 830 /* Disable MAC steering (NCSI traffic) */ 831 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 832 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); 833} 834 835static void bgx_get_qlm_mode(struct bgx *bgx) 836{ 837 struct device *dev = &bgx->pdev->dev; 838 int lmac_type; 839 int train_en; 840 841 /* Read LMAC0 type to figure out QLM mode 842 * This is configured by low level firmware 843 */ 844 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 845 lmac_type = (lmac_type >> 8) & 0x07; 846 847 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & 848 SPU_PMD_CRTL_TRAIN_EN; 849 850 switch (lmac_type) { 851 case BGX_MODE_SGMII: 852 bgx->qlm_mode = QLM_MODE_SGMII; 853 dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); 854 break; 855 case BGX_MODE_XAUI: 856 bgx->qlm_mode = QLM_MODE_XAUI_1X4; 857 dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); 858 break; 859 case BGX_MODE_RXAUI: 860 bgx->qlm_mode = QLM_MODE_RXAUI_2X2; 861 dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); 862 break; 863 case BGX_MODE_XFI: 864 if (!train_en) { 865 bgx->qlm_mode = QLM_MODE_XFI_4X1; 866 dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); 867 } else { 868 bgx->qlm_mode = QLM_MODE_10G_KR_4X1; 869 dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); 870 } 871 break; 872 case BGX_MODE_XLAUI: 873 if (!train_en) { 874 bgx->qlm_mode = QLM_MODE_XLAUI_1X4; 875 dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); 876 } else { 877 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; 878 dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); 879 } 880 break; 881 default: 882 bgx->qlm_mode = QLM_MODE_SGMII; 883 dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); 884 } 885} 886 887#ifdef CONFIG_ACPI 888 889static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) 890{ 891 u8 mac[ETH_ALEN]; 892 int ret; 893 894 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 895 "mac-address", mac, ETH_ALEN); 896 if (ret) 897 goto out; 898 899 if (!is_valid_ether_addr(mac)) { 900 ret = -EINVAL; 901 goto out; 902 } 903 904 memcpy(dst, mac, ETH_ALEN); 905out: 906 return ret; 907} 908 909/* Currently only sets the MAC address. */ 910static acpi_status bgx_acpi_register_phy(acpi_handle handle, 911 u32 lvl, void *context, void **rv) 912{ 913 struct bgx *bgx = context; 914 struct acpi_device *adev; 915 916 if (acpi_bus_get_device(handle, &adev)) 917 goto out; 918 919 acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); 920 921 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); 922 923 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 924out: 925 bgx->lmac_count++; 926 return AE_OK; 927} 928 929static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, 930 void *context, void **ret_val) 931{ 932 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 933 struct bgx *bgx = context; 934 char bgx_sel[5]; 935 936 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); 937 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { 938 pr_warn("Invalid link device\n"); 939 return AE_OK; 940 } 941 942 if (strncmp(string.pointer, bgx_sel, 4)) 943 return AE_OK; 944 945 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 946 bgx_acpi_register_phy, NULL, bgx, NULL); 947 948 kfree(string.pointer); 949 return AE_CTRL_TERMINATE; 950} 951 952static int bgx_init_acpi_phy(struct bgx *bgx) 953{ 954 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); 955 return 0; 956} 957 958#else 959 960static int bgx_init_acpi_phy(struct bgx *bgx) 961{ 962 return -ENODEV; 963} 964 965#endif /* CONFIG_ACPI */ 966 967#if IS_ENABLED(CONFIG_OF_MDIO) 968 969static int bgx_init_of_phy(struct bgx *bgx) 970{ 971 struct device_node *np; 972 struct device_node *np_child; 973 u8 lmac = 0; 974 char bgx_sel[5]; 975 const char *mac; 976 977 /* Get BGX node from DT */ 978 snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); 979 np = of_find_node_by_name(NULL, bgx_sel); 980 if (!np) 981 return -ENODEV; 982 983 for_each_child_of_node(np, np_child) { 984 struct device_node *phy_np = of_parse_phandle(np_child, 985 "phy-handle", 0); 986 if (!phy_np) 987 continue; 988 bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); 989 990 mac = of_get_mac_address(np_child); 991 if (mac) 992 ether_addr_copy(bgx->lmac[lmac].mac, mac); 993 994 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); 995 bgx->lmac[lmac].lmacid = lmac; 996 lmac++; 997 if (lmac == MAX_LMAC_PER_BGX) { 998 of_node_put(np_child); 999 break; 1000 } 1001 } 1002 return 0; 1003} 1004 1005#else 1006 1007static int bgx_init_of_phy(struct bgx *bgx) 1008{ 1009 return -ENODEV; 1010} 1011 1012#endif /* CONFIG_OF_MDIO */ 1013 1014static int bgx_init_phy(struct bgx *bgx) 1015{ 1016 if (!acpi_disabled) 1017 return bgx_init_acpi_phy(bgx); 1018 1019 return bgx_init_of_phy(bgx); 1020} 1021 1022static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1023{ 1024 int err; 1025 struct device *dev = &pdev->dev; 1026 struct bgx *bgx = NULL; 1027 u8 lmac; 1028 1029 /* Load octeon mdio driver */ 1030 octeon_mdiobus_force_mod_depencency(); 1031 1032 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1033 if (!bgx) 1034 return -ENOMEM; 1035 bgx->pdev = pdev; 1036 1037 pci_set_drvdata(pdev, bgx); 1038 1039 err = pci_enable_device(pdev); 1040 if (err) { 1041 dev_err(dev, "Failed to enable PCI device\n"); 1042 pci_set_drvdata(pdev, NULL); 1043 return err; 1044 } 1045 1046 err = pci_request_regions(pdev, DRV_NAME); 1047 if (err) { 1048 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1049 goto err_disable_device; 1050 } 1051 1052 /* MAP configuration registers */ 1053 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1054 if (!bgx->reg_base) { 1055 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); 1056 err = -ENOMEM; 1057 goto err_release_regions; 1058 } 1059 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1060 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; 1061 1062 bgx_vnic[bgx->bgx_id] = bgx; 1063 bgx_get_qlm_mode(bgx); 1064 1065 err = bgx_init_phy(bgx); 1066 if (err) 1067 goto err_enable; 1068 1069 bgx_init_hw(bgx); 1070 1071 /* Enable all LMACs */ 1072 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1073 err = bgx_lmac_enable(bgx, lmac); 1074 if (err) { 1075 dev_err(dev, "BGX%d failed to enable lmac%d\n", 1076 bgx->bgx_id, lmac); 1077 goto err_enable; 1078 } 1079 } 1080 1081 return 0; 1082 1083err_enable: 1084 bgx_vnic[bgx->bgx_id] = NULL; 1085err_release_regions: 1086 pci_release_regions(pdev); 1087err_disable_device: 1088 pci_disable_device(pdev); 1089 pci_set_drvdata(pdev, NULL); 1090 return err; 1091} 1092 1093static void bgx_remove(struct pci_dev *pdev) 1094{ 1095 struct bgx *bgx = pci_get_drvdata(pdev); 1096 u8 lmac; 1097 1098 /* Disable all LMACs */ 1099 for (lmac = 0; lmac < bgx->lmac_count; lmac++) 1100 bgx_lmac_disable(bgx, lmac); 1101 1102 bgx_vnic[bgx->bgx_id] = NULL; 1103 pci_release_regions(pdev); 1104 pci_disable_device(pdev); 1105 pci_set_drvdata(pdev, NULL); 1106} 1107 1108static struct pci_driver bgx_driver = { 1109 .name = DRV_NAME, 1110 .id_table = bgx_id_table, 1111 .probe = bgx_probe, 1112 .remove = bgx_remove, 1113}; 1114 1115static int __init bgx_init_module(void) 1116{ 1117 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1118 1119 return pci_register_driver(&bgx_driver); 1120} 1121 1122static void __exit bgx_cleanup_module(void) 1123{ 1124 pci_unregister_driver(&bgx_driver); 1125} 1126 1127module_init(bgx_init_module); 1128module_exit(bgx_cleanup_module); 1129