1/* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117#include <linux/phy.h> 118#include <linux/mdio.h> 119#include <linux/clk.h> 120#include <linux/bitrev.h> 121#include <linux/crc32.h> 122 123#include "xgbe.h" 124#include "xgbe-common.h" 125 126static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 127 unsigned int usec) 128{ 129 unsigned long rate; 130 unsigned int ret; 131 132 DBGPR("-->xgbe_usec_to_riwt\n"); 133 134 rate = pdata->sysclk_rate; 135 136 /* 137 * Convert the input usec value to the watchdog timer value. Each 138 * watchdog timer value is equivalent to 256 clock cycles. 139 * Calculate the required value as: 140 * ( usec * ( system_clock_mhz / 10^6 ) / 256 141 */ 142 ret = (usec * (rate / 1000000)) / 256; 143 144 DBGPR("<--xgbe_usec_to_riwt\n"); 145 146 return ret; 147} 148 149static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 150 unsigned int riwt) 151{ 152 unsigned long rate; 153 unsigned int ret; 154 155 DBGPR("-->xgbe_riwt_to_usec\n"); 156 157 rate = pdata->sysclk_rate; 158 159 /* 160 * Convert the input watchdog timer value to the usec value. Each 161 * watchdog timer value is equivalent to 256 clock cycles. 162 * Calculate the required value as: 163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 164 */ 165 ret = (riwt * 256) / (rate / 1000000); 166 167 DBGPR("<--xgbe_riwt_to_usec\n"); 168 169 return ret; 170} 171 172static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) 173{ 174 struct xgbe_channel *channel; 175 unsigned int i; 176 177 channel = pdata->channel; 178 for (i = 0; i < pdata->channel_count; i++, channel++) 179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8, 180 pdata->pblx8); 181 182 return 0; 183} 184 185static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) 186{ 187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); 188} 189 190static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) 191{ 192 struct xgbe_channel *channel; 193 unsigned int i; 194 195 channel = pdata->channel; 196 for (i = 0; i < pdata->channel_count; i++, channel++) { 197 if (!channel->tx_ring) 198 break; 199 200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, 201 pdata->tx_pbl); 202 } 203 204 return 0; 205} 206 207static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) 208{ 209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); 210} 211 212static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) 213{ 214 struct xgbe_channel *channel; 215 unsigned int i; 216 217 channel = pdata->channel; 218 for (i = 0; i < pdata->channel_count; i++, channel++) { 219 if (!channel->rx_ring) 220 break; 221 222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, 223 pdata->rx_pbl); 224 } 225 226 return 0; 227} 228 229static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 230{ 231 struct xgbe_channel *channel; 232 unsigned int i; 233 234 channel = pdata->channel; 235 for (i = 0; i < pdata->channel_count; i++, channel++) { 236 if (!channel->tx_ring) 237 break; 238 239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, 240 pdata->tx_osp_mode); 241 } 242 243 return 0; 244} 245 246static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 247{ 248 unsigned int i; 249 250 for (i = 0; i < pdata->rx_q_count; i++) 251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 252 253 return 0; 254} 255 256static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 257{ 258 unsigned int i; 259 260 for (i = 0; i < pdata->tx_q_count; i++) 261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 262 263 return 0; 264} 265 266static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 267 unsigned int val) 268{ 269 unsigned int i; 270 271 for (i = 0; i < pdata->rx_q_count; i++) 272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 273 274 return 0; 275} 276 277static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 278 unsigned int val) 279{ 280 unsigned int i; 281 282 for (i = 0; i < pdata->tx_q_count; i++) 283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 284 285 return 0; 286} 287 288static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 289{ 290 struct xgbe_channel *channel; 291 unsigned int i; 292 293 channel = pdata->channel; 294 for (i = 0; i < pdata->channel_count; i++, channel++) { 295 if (!channel->rx_ring) 296 break; 297 298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, 299 pdata->rx_riwt); 300 } 301 302 return 0; 303} 304 305static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 306{ 307 return 0; 308} 309 310static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 311{ 312 struct xgbe_channel *channel; 313 unsigned int i; 314 315 channel = pdata->channel; 316 for (i = 0; i < pdata->channel_count; i++, channel++) { 317 if (!channel->rx_ring) 318 break; 319 320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, 321 pdata->rx_buf_size); 322 } 323} 324 325static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 326{ 327 struct xgbe_channel *channel; 328 unsigned int i; 329 330 channel = pdata->channel; 331 for (i = 0; i < pdata->channel_count; i++, channel++) { 332 if (!channel->tx_ring) 333 break; 334 335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); 336 } 337} 338 339static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 340{ 341 struct xgbe_channel *channel; 342 unsigned int i; 343 344 channel = pdata->channel; 345 for (i = 0; i < pdata->channel_count; i++, channel++) { 346 if (!channel->rx_ring) 347 break; 348 349 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); 350 } 351 352 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 353} 354 355static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 356 unsigned int index, unsigned int val) 357{ 358 unsigned int wait; 359 int ret = 0; 360 361 mutex_lock(&pdata->rss_mutex); 362 363 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 364 ret = -EBUSY; 365 goto unlock; 366 } 367 368 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 369 370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 373 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 374 375 wait = 1000; 376 while (wait--) { 377 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 378 goto unlock; 379 380 usleep_range(1000, 1500); 381 } 382 383 ret = -EBUSY; 384 385unlock: 386 mutex_unlock(&pdata->rss_mutex); 387 388 return ret; 389} 390 391static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 392{ 393 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 394 unsigned int *key = (unsigned int *)&pdata->rss_key; 395 int ret; 396 397 while (key_regs--) { 398 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 399 key_regs, *key++); 400 if (ret) 401 return ret; 402 } 403 404 return 0; 405} 406 407static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 408{ 409 unsigned int i; 410 int ret; 411 412 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 413 ret = xgbe_write_rss_reg(pdata, 414 XGBE_RSS_LOOKUP_TABLE_TYPE, i, 415 pdata->rss_table[i]); 416 if (ret) 417 return ret; 418 } 419 420 return 0; 421} 422 423static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) 424{ 425 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 426 427 return xgbe_write_rss_hash_key(pdata); 428} 429 430static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, 431 const u32 *table) 432{ 433 unsigned int i; 434 435 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 436 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 437 438 return xgbe_write_rss_lookup_table(pdata); 439} 440 441static int xgbe_enable_rss(struct xgbe_prv_data *pdata) 442{ 443 int ret; 444 445 if (!pdata->hw_feat.rss) 446 return -EOPNOTSUPP; 447 448 /* Program the hash key */ 449 ret = xgbe_write_rss_hash_key(pdata); 450 if (ret) 451 return ret; 452 453 /* Program the lookup table */ 454 ret = xgbe_write_rss_lookup_table(pdata); 455 if (ret) 456 return ret; 457 458 /* Set the RSS options */ 459 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 460 461 /* Enable RSS */ 462 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 463 464 return 0; 465} 466 467static int xgbe_disable_rss(struct xgbe_prv_data *pdata) 468{ 469 if (!pdata->hw_feat.rss) 470 return -EOPNOTSUPP; 471 472 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 473 474 return 0; 475} 476 477static void xgbe_config_rss(struct xgbe_prv_data *pdata) 478{ 479 int ret; 480 481 if (!pdata->hw_feat.rss) 482 return; 483 484 if (pdata->netdev->features & NETIF_F_RXHASH) 485 ret = xgbe_enable_rss(pdata); 486 else 487 ret = xgbe_disable_rss(pdata); 488 489 if (ret) 490 netdev_err(pdata->netdev, 491 "error configuring RSS, RSS disabled\n"); 492} 493 494static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 495{ 496 unsigned int max_q_count, q_count; 497 unsigned int reg, reg_val; 498 unsigned int i; 499 500 /* Clear MTL flow control */ 501 for (i = 0; i < pdata->rx_q_count; i++) 502 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 503 504 /* Clear MAC flow control */ 505 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 506 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 507 reg = MAC_Q0TFCR; 508 for (i = 0; i < q_count; i++) { 509 reg_val = XGMAC_IOREAD(pdata, reg); 510 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 511 XGMAC_IOWRITE(pdata, reg, reg_val); 512 513 reg += MAC_QTFCR_INC; 514 } 515 516 return 0; 517} 518 519static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 520{ 521 unsigned int max_q_count, q_count; 522 unsigned int reg, reg_val; 523 unsigned int i; 524 525 /* Set MTL flow control */ 526 for (i = 0; i < pdata->rx_q_count; i++) 527 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); 528 529 /* Set MAC flow control */ 530 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 531 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 532 reg = MAC_Q0TFCR; 533 for (i = 0; i < q_count; i++) { 534 reg_val = XGMAC_IOREAD(pdata, reg); 535 536 /* Enable transmit flow control */ 537 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 538 /* Set pause time */ 539 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 540 541 XGMAC_IOWRITE(pdata, reg, reg_val); 542 543 reg += MAC_QTFCR_INC; 544 } 545 546 return 0; 547} 548 549static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 550{ 551 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 552 553 return 0; 554} 555 556static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 557{ 558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 559 560 return 0; 561} 562 563static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 564{ 565 struct ieee_pfc *pfc = pdata->pfc; 566 567 if (pdata->tx_pause || (pfc && pfc->pfc_en)) 568 xgbe_enable_tx_flow_control(pdata); 569 else 570 xgbe_disable_tx_flow_control(pdata); 571 572 return 0; 573} 574 575static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 576{ 577 struct ieee_pfc *pfc = pdata->pfc; 578 579 if (pdata->rx_pause || (pfc && pfc->pfc_en)) 580 xgbe_enable_rx_flow_control(pdata); 581 else 582 xgbe_disable_rx_flow_control(pdata); 583 584 return 0; 585} 586 587static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 588{ 589 struct ieee_pfc *pfc = pdata->pfc; 590 591 xgbe_config_tx_flow_control(pdata); 592 xgbe_config_rx_flow_control(pdata); 593 594 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 595 (pfc && pfc->pfc_en) ? 1 : 0); 596} 597 598static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 599{ 600 struct xgbe_channel *channel; 601 unsigned int dma_ch_isr, dma_ch_ier; 602 unsigned int i; 603 604 channel = pdata->channel; 605 for (i = 0; i < pdata->channel_count; i++, channel++) { 606 /* Clear all the interrupts which are set */ 607 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 608 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); 609 610 /* Clear all interrupt enable bits */ 611 dma_ch_ier = 0; 612 613 /* Enable following interrupts 614 * NIE - Normal Interrupt Summary Enable 615 * AIE - Abnormal Interrupt Summary Enable 616 * FBEE - Fatal Bus Error Enable 617 */ 618 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); 619 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); 620 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 621 622 if (channel->tx_ring) { 623 /* Enable the following Tx interrupts 624 * TIE - Transmit Interrupt Enable (unless using 625 * per channel interrupts) 626 */ 627 if (!pdata->per_channel_irq) 628 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 629 } 630 if (channel->rx_ring) { 631 /* Enable following Rx interrupts 632 * RBUE - Receive Buffer Unavailable Enable 633 * RIE - Receive Interrupt Enable (unless using 634 * per channel interrupts) 635 */ 636 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 637 if (!pdata->per_channel_irq) 638 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 639 } 640 641 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 642 } 643} 644 645static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 646{ 647 unsigned int mtl_q_isr; 648 unsigned int q_count, i; 649 650 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 651 for (i = 0; i < q_count; i++) { 652 /* Clear all the interrupts which are set */ 653 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 654 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 655 656 /* No MTL interrupts to be enabled */ 657 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 658 } 659} 660 661static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 662{ 663 unsigned int mac_ier = 0; 664 665 /* Enable Timestamp interrupt */ 666 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 667 668 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 669 670 /* Enable all counter interrupts */ 671 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 672 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 673} 674 675static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 676{ 677 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3) 678 return 0; 679 680 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); 681 682 return 0; 683} 684 685static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) 686{ 687 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2) 688 return 0; 689 690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); 691 692 return 0; 693} 694 695static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) 696{ 697 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0) 698 return 0; 699 700 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); 701 702 return 0; 703} 704 705static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 706 unsigned int enable) 707{ 708 unsigned int val = enable ? 1 : 0; 709 710 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 711 return 0; 712 713 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", 714 enable ? "entering" : "leaving"); 715 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 716 717 return 0; 718} 719 720static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 721 unsigned int enable) 722{ 723 unsigned int val = enable ? 1 : 0; 724 725 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 726 return 0; 727 728 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", 729 enable ? "entering" : "leaving"); 730 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 731 732 return 0; 733} 734 735static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, 736 struct netdev_hw_addr *ha, unsigned int *mac_reg) 737{ 738 unsigned int mac_addr_hi, mac_addr_lo; 739 u8 *mac_addr; 740 741 mac_addr_lo = 0; 742 mac_addr_hi = 0; 743 744 if (ha) { 745 mac_addr = (u8 *)&mac_addr_lo; 746 mac_addr[0] = ha->addr[0]; 747 mac_addr[1] = ha->addr[1]; 748 mac_addr[2] = ha->addr[2]; 749 mac_addr[3] = ha->addr[3]; 750 mac_addr = (u8 *)&mac_addr_hi; 751 mac_addr[0] = ha->addr[4]; 752 mac_addr[1] = ha->addr[5]; 753 754 netif_dbg(pdata, drv, pdata->netdev, 755 "adding mac address %pM at %#x\n", 756 ha->addr, *mac_reg); 757 758 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 759 } 760 761 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 762 *mac_reg += MAC_MACA_INC; 763 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 764 *mac_reg += MAC_MACA_INC; 765} 766 767static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 768{ 769 struct net_device *netdev = pdata->netdev; 770 struct netdev_hw_addr *ha; 771 unsigned int mac_reg; 772 unsigned int addn_macs; 773 774 mac_reg = MAC_MACA1HR; 775 addn_macs = pdata->hw_feat.addn_mac; 776 777 if (netdev_uc_count(netdev) > addn_macs) { 778 xgbe_set_promiscuous_mode(pdata, 1); 779 } else { 780 netdev_for_each_uc_addr(ha, netdev) { 781 xgbe_set_mac_reg(pdata, ha, &mac_reg); 782 addn_macs--; 783 } 784 785 if (netdev_mc_count(netdev) > addn_macs) { 786 xgbe_set_all_multicast_mode(pdata, 1); 787 } else { 788 netdev_for_each_mc_addr(ha, netdev) { 789 xgbe_set_mac_reg(pdata, ha, &mac_reg); 790 addn_macs--; 791 } 792 } 793 } 794 795 /* Clear remaining additional MAC address entries */ 796 while (addn_macs--) 797 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 798} 799 800static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) 801{ 802 struct net_device *netdev = pdata->netdev; 803 struct netdev_hw_addr *ha; 804 unsigned int hash_reg; 805 unsigned int hash_table_shift, hash_table_count; 806 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; 807 u32 crc; 808 unsigned int i; 809 810 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); 811 hash_table_count = pdata->hw_feat.hash_table_size / 32; 812 memset(hash_table, 0, sizeof(hash_table)); 813 814 /* Build the MAC Hash Table register values */ 815 netdev_for_each_uc_addr(ha, netdev) { 816 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 817 crc >>= hash_table_shift; 818 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 819 } 820 821 netdev_for_each_mc_addr(ha, netdev) { 822 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 823 crc >>= hash_table_shift; 824 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 825 } 826 827 /* Set the MAC Hash Table registers */ 828 hash_reg = MAC_HTR0; 829 for (i = 0; i < hash_table_count; i++) { 830 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); 831 hash_reg += MAC_HTR_INC; 832 } 833} 834 835static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 836{ 837 if (pdata->hw_feat.hash_table_size) 838 xgbe_set_mac_hash_table(pdata); 839 else 840 xgbe_set_mac_addn_addrs(pdata); 841 842 return 0; 843} 844 845static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) 846{ 847 unsigned int mac_addr_hi, mac_addr_lo; 848 849 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 850 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 851 (addr[1] << 8) | (addr[0] << 0); 852 853 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 854 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 855 856 return 0; 857} 858 859static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 860{ 861 struct net_device *netdev = pdata->netdev; 862 unsigned int pr_mode, am_mode; 863 864 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 865 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 866 867 xgbe_set_promiscuous_mode(pdata, pr_mode); 868 xgbe_set_all_multicast_mode(pdata, am_mode); 869 870 xgbe_add_mac_addresses(pdata); 871 872 return 0; 873} 874 875static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 876 int mmd_reg) 877{ 878 unsigned int mmd_address; 879 int mmd_data; 880 881 if (mmd_reg & MII_ADDR_C45) 882 mmd_address = mmd_reg & ~MII_ADDR_C45; 883 else 884 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 885 886 /* The PCS registers are accessed using mmio. The underlying APB3 887 * management interface uses indirect addressing to access the MMD 888 * register sets. This requires accessing of the PCS register in two 889 * phases, an address phase and a data phase. 890 * 891 * The mmio interface is based on 32-bit offsets and values. All 892 * register offsets must therefore be adjusted by left shifting the 893 * offset 2 bits and reading 32 bits of data. 894 */ 895 mutex_lock(&pdata->xpcs_mutex); 896 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 897 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); 898 mutex_unlock(&pdata->xpcs_mutex); 899 900 return mmd_data; 901} 902 903static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 904 int mmd_reg, int mmd_data) 905{ 906 unsigned int mmd_address; 907 908 if (mmd_reg & MII_ADDR_C45) 909 mmd_address = mmd_reg & ~MII_ADDR_C45; 910 else 911 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 912 913 /* The PCS registers are accessed using mmio. The underlying APB3 914 * management interface uses indirect addressing to access the MMD 915 * register sets. This requires accessing of the PCS register in two 916 * phases, an address phase and a data phase. 917 * 918 * The mmio interface is based on 32-bit offsets and values. All 919 * register offsets must therefore be adjusted by left shifting the 920 * offset 2 bits and reading 32 bits of data. 921 */ 922 mutex_lock(&pdata->xpcs_mutex); 923 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); 924 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 925 mutex_unlock(&pdata->xpcs_mutex); 926} 927 928static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 929{ 930 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 931} 932 933static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 934{ 935 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 936 937 return 0; 938} 939 940static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 941{ 942 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 943 944 return 0; 945} 946 947static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 948{ 949 /* Put the VLAN tag in the Rx descriptor */ 950 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 951 952 /* Don't check the VLAN type */ 953 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 954 955 /* Check only C-TAG (0x8100) packets */ 956 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 957 958 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 959 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 960 961 /* Enable VLAN tag stripping */ 962 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 963 964 return 0; 965} 966 967static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 968{ 969 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 970 971 return 0; 972} 973 974static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 975{ 976 /* Enable VLAN filtering */ 977 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 978 979 /* Enable VLAN Hash Table filtering */ 980 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 981 982 /* Disable VLAN tag inverse matching */ 983 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 984 985 /* Only filter on the lower 12-bits of the VLAN tag */ 986 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 987 988 /* In order for the VLAN Hash Table filtering to be effective, 989 * the VLAN tag identifier in the VLAN Tag Register must not 990 * be zero. Set the VLAN tag identifier to "1" to enable the 991 * VLAN Hash Table filtering. This implies that a VLAN tag of 992 * 1 will always pass filtering. 993 */ 994 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 995 996 return 0; 997} 998 999static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 1000{ 1001 /* Disable VLAN filtering */ 1002 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 1003 1004 return 0; 1005} 1006 1007#ifndef CRCPOLY_LE 1008#define CRCPOLY_LE 0xedb88320 1009#endif 1010static u32 xgbe_vid_crc32_le(__le16 vid_le) 1011{ 1012 u32 poly = CRCPOLY_LE; 1013 u32 crc = ~0; 1014 u32 temp = 0; 1015 unsigned char *data = (unsigned char *)&vid_le; 1016 unsigned char data_byte = 0; 1017 int i, bits; 1018 1019 bits = get_bitmask_order(VLAN_VID_MASK); 1020 for (i = 0; i < bits; i++) { 1021 if ((i % 8) == 0) 1022 data_byte = data[i / 8]; 1023 1024 temp = ((crc & 1) ^ data_byte) & 1; 1025 crc >>= 1; 1026 data_byte >>= 1; 1027 1028 if (temp) 1029 crc ^= poly; 1030 } 1031 1032 return crc; 1033} 1034 1035static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 1036{ 1037 u32 crc; 1038 u16 vid; 1039 __le16 vid_le; 1040 u16 vlan_hash_table = 0; 1041 1042 /* Generate the VLAN Hash Table value */ 1043 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { 1044 /* Get the CRC32 value of the VLAN ID */ 1045 vid_le = cpu_to_le16(vid); 1046 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 1047 1048 vlan_hash_table |= (1 << crc); 1049 } 1050 1051 /* Set the VLAN Hash Table filtering register */ 1052 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 1053 1054 return 0; 1055} 1056 1057static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1058{ 1059 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1060 1061 /* Reset the Tx descriptor 1062 * Set buffer 1 (lo) address to zero 1063 * Set buffer 1 (hi) address to zero 1064 * Reset all other control bits (IC, TTSE, B2L & B1L) 1065 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1066 */ 1067 rdesc->desc0 = 0; 1068 rdesc->desc1 = 0; 1069 rdesc->desc2 = 0; 1070 rdesc->desc3 = 0; 1071 1072 /* Make sure ownership is written to the descriptor */ 1073 dma_wmb(); 1074} 1075 1076static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1077{ 1078 struct xgbe_ring *ring = channel->tx_ring; 1079 struct xgbe_ring_data *rdata; 1080 int i; 1081 int start_index = ring->cur; 1082 1083 DBGPR("-->tx_desc_init\n"); 1084 1085 /* Initialze all descriptors */ 1086 for (i = 0; i < ring->rdesc_count; i++) { 1087 rdata = XGBE_GET_DESC_DATA(ring, i); 1088 1089 /* Initialize Tx descriptor */ 1090 xgbe_tx_desc_reset(rdata); 1091 } 1092 1093 /* Update the total number of Tx descriptors */ 1094 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1095 1096 /* Update the starting address of descriptor ring */ 1097 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1098 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1099 upper_32_bits(rdata->rdesc_dma)); 1100 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1101 lower_32_bits(rdata->rdesc_dma)); 1102 1103 DBGPR("<--tx_desc_init\n"); 1104} 1105 1106static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, 1107 struct xgbe_ring_data *rdata, unsigned int index) 1108{ 1109 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1110 unsigned int rx_usecs = pdata->rx_usecs; 1111 unsigned int rx_frames = pdata->rx_frames; 1112 unsigned int inte; 1113 dma_addr_t hdr_dma, buf_dma; 1114 1115 if (!rx_usecs && !rx_frames) { 1116 /* No coalescing, interrupt for every descriptor */ 1117 inte = 1; 1118 } else { 1119 /* Set interrupt based on Rx frame coalescing setting */ 1120 if (rx_frames && !((index + 1) % rx_frames)) 1121 inte = 1; 1122 else 1123 inte = 0; 1124 } 1125 1126 /* Reset the Rx descriptor 1127 * Set buffer 1 (lo) address to header dma address (lo) 1128 * Set buffer 1 (hi) address to header dma address (hi) 1129 * Set buffer 2 (lo) address to buffer dma address (lo) 1130 * Set buffer 2 (hi) address to buffer dma address (hi) and 1131 * set control bits OWN and INTE 1132 */ 1133 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; 1134 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; 1135 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); 1136 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); 1137 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); 1138 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); 1139 1140 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1141 1142 /* Since the Rx DMA engine is likely running, make sure everything 1143 * is written to the descriptor(s) before setting the OWN bit 1144 * for the descriptor 1145 */ 1146 dma_wmb(); 1147 1148 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 1149 1150 /* Make sure ownership is written to the descriptor */ 1151 dma_wmb(); 1152} 1153 1154static void xgbe_rx_desc_init(struct xgbe_channel *channel) 1155{ 1156 struct xgbe_prv_data *pdata = channel->pdata; 1157 struct xgbe_ring *ring = channel->rx_ring; 1158 struct xgbe_ring_data *rdata; 1159 unsigned int start_index = ring->cur; 1160 unsigned int i; 1161 1162 DBGPR("-->rx_desc_init\n"); 1163 1164 /* Initialize all descriptors */ 1165 for (i = 0; i < ring->rdesc_count; i++) { 1166 rdata = XGBE_GET_DESC_DATA(ring, i); 1167 1168 /* Initialize Rx descriptor */ 1169 xgbe_rx_desc_reset(pdata, rdata, i); 1170 } 1171 1172 /* Update the total number of Rx descriptors */ 1173 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1174 1175 /* Update the starting address of descriptor ring */ 1176 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1177 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1178 upper_32_bits(rdata->rdesc_dma)); 1179 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1180 lower_32_bits(rdata->rdesc_dma)); 1181 1182 /* Update the Rx Descriptor Tail Pointer */ 1183 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 1184 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1185 lower_32_bits(rdata->rdesc_dma)); 1186 1187 DBGPR("<--rx_desc_init\n"); 1188} 1189 1190static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, 1191 unsigned int addend) 1192{ 1193 /* Set the addend register value and tell the device */ 1194 XGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1195 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1196 1197 /* Wait for addend update to complete */ 1198 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1199 udelay(5); 1200} 1201 1202static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, 1203 unsigned int nsec) 1204{ 1205 /* Set the time values and tell the device */ 1206 XGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1207 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1208 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1209 1210 /* Wait for time update to complete */ 1211 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1212 udelay(5); 1213} 1214 1215static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) 1216{ 1217 u64 nsec; 1218 1219 nsec = XGMAC_IOREAD(pdata, MAC_STSR); 1220 nsec *= NSEC_PER_SEC; 1221 nsec += XGMAC_IOREAD(pdata, MAC_STNR); 1222 1223 return nsec; 1224} 1225 1226static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) 1227{ 1228 unsigned int tx_snr; 1229 u64 nsec; 1230 1231 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1232 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) 1233 return 0; 1234 1235 nsec = XGMAC_IOREAD(pdata, MAC_TXSSR); 1236 nsec *= NSEC_PER_SEC; 1237 nsec += tx_snr; 1238 1239 return nsec; 1240} 1241 1242static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, 1243 struct xgbe_ring_desc *rdesc) 1244{ 1245 u64 nsec; 1246 1247 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && 1248 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { 1249 nsec = le32_to_cpu(rdesc->desc1); 1250 nsec <<= 32; 1251 nsec |= le32_to_cpu(rdesc->desc0); 1252 if (nsec != 0xffffffffffffffffULL) { 1253 packet->rx_tstamp = nsec; 1254 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1255 RX_TSTAMP, 1); 1256 } 1257 } 1258} 1259 1260static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, 1261 unsigned int mac_tscr) 1262{ 1263 /* Set one nano-second accuracy */ 1264 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1265 1266 /* Set fine timestamp update */ 1267 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1268 1269 /* Overwrite earlier timestamps */ 1270 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1271 1272 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1273 1274 /* Exit if timestamping is not enabled */ 1275 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) 1276 return 0; 1277 1278 /* Initialize time registers */ 1279 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); 1280 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); 1281 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1282 xgbe_set_tstamp_time(pdata, 0, 0); 1283 1284 /* Initialize the timecounter */ 1285 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, 1286 ktime_to_ns(ktime_get_real())); 1287 1288 return 0; 1289} 1290 1291static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) 1292{ 1293 struct ieee_ets *ets = pdata->ets; 1294 unsigned int total_weight, min_weight, weight; 1295 unsigned int i; 1296 1297 if (!ets) 1298 return; 1299 1300 /* Set Tx to deficit weighted round robin scheduling algorithm (when 1301 * traffic class is using ETS algorithm) 1302 */ 1303 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); 1304 1305 /* Set Traffic Class algorithms */ 1306 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; 1307 min_weight = total_weight / 100; 1308 if (!min_weight) 1309 min_weight = 1; 1310 1311 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1312 switch (ets->tc_tsa[i]) { 1313 case IEEE_8021QAZ_TSA_STRICT: 1314 netif_dbg(pdata, drv, pdata->netdev, 1315 "TC%u using SP\n", i); 1316 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1317 MTL_TSA_SP); 1318 break; 1319 case IEEE_8021QAZ_TSA_ETS: 1320 weight = total_weight * ets->tc_tx_bw[i] / 100; 1321 weight = clamp(weight, min_weight, total_weight); 1322 1323 netif_dbg(pdata, drv, pdata->netdev, 1324 "TC%u using DWRR (weight %u)\n", i, weight); 1325 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1326 MTL_TSA_ETS); 1327 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1328 weight); 1329 break; 1330 } 1331 } 1332} 1333 1334static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) 1335{ 1336 struct ieee_pfc *pfc = pdata->pfc; 1337 struct ieee_ets *ets = pdata->ets; 1338 unsigned int mask, reg, reg_val; 1339 unsigned int tc, prio; 1340 1341 if (!pfc || !ets) 1342 return; 1343 1344 for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { 1345 mask = 0; 1346 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 1347 if ((pfc->pfc_en & (1 << prio)) && 1348 (ets->prio_tc[prio] == tc)) 1349 mask |= (1 << prio); 1350 } 1351 mask &= 0xff; 1352 1353 netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", 1354 tc, mask); 1355 reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); 1356 reg_val = XGMAC_IOREAD(pdata, reg); 1357 1358 reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); 1359 reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); 1360 1361 XGMAC_IOWRITE(pdata, reg, reg_val); 1362 } 1363 1364 xgbe_config_flow_control(pdata); 1365} 1366 1367static void xgbe_tx_start_xmit(struct xgbe_channel *channel, 1368 struct xgbe_ring *ring) 1369{ 1370 struct xgbe_prv_data *pdata = channel->pdata; 1371 struct xgbe_ring_data *rdata; 1372 1373 /* Make sure everything is written before the register write */ 1374 wmb(); 1375 1376 /* Issue a poll command to Tx DMA by writing address 1377 * of next immediate free descriptor */ 1378 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1379 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1380 lower_32_bits(rdata->rdesc_dma)); 1381 1382 /* Start the Tx timer */ 1383 if (pdata->tx_usecs && !channel->tx_timer_active) { 1384 channel->tx_timer_active = 1; 1385 mod_timer(&channel->tx_timer, 1386 jiffies + usecs_to_jiffies(pdata->tx_usecs)); 1387 } 1388 1389 ring->tx.xmit_more = 0; 1390} 1391 1392static void xgbe_dev_xmit(struct xgbe_channel *channel) 1393{ 1394 struct xgbe_prv_data *pdata = channel->pdata; 1395 struct xgbe_ring *ring = channel->tx_ring; 1396 struct xgbe_ring_data *rdata; 1397 struct xgbe_ring_desc *rdesc; 1398 struct xgbe_packet_data *packet = &ring->packet_data; 1399 unsigned int csum, tso, vlan; 1400 unsigned int tso_context, vlan_context; 1401 unsigned int tx_set_ic; 1402 int start_index = ring->cur; 1403 int cur_index = ring->cur; 1404 int i; 1405 1406 DBGPR("-->xgbe_dev_xmit\n"); 1407 1408 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1409 CSUM_ENABLE); 1410 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1411 TSO_ENABLE); 1412 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1413 VLAN_CTAG); 1414 1415 if (tso && (packet->mss != ring->tx.cur_mss)) 1416 tso_context = 1; 1417 else 1418 tso_context = 0; 1419 1420 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 1421 vlan_context = 1; 1422 else 1423 vlan_context = 0; 1424 1425 /* Determine if an interrupt should be generated for this Tx: 1426 * Interrupt: 1427 * - Tx frame count exceeds the frame count setting 1428 * - Addition of Tx frame count to the frame count since the 1429 * last interrupt was set exceeds the frame count setting 1430 * No interrupt: 1431 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 1432 * - Addition of Tx frame count to the frame count since the 1433 * last interrupt was set does not exceed the frame count setting 1434 */ 1435 ring->coalesce_count += packet->tx_packets; 1436 if (!pdata->tx_frames) 1437 tx_set_ic = 0; 1438 else if (packet->tx_packets > pdata->tx_frames) 1439 tx_set_ic = 1; 1440 else if ((ring->coalesce_count % pdata->tx_frames) < 1441 packet->tx_packets) 1442 tx_set_ic = 1; 1443 else 1444 tx_set_ic = 0; 1445 1446 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1447 rdesc = rdata->rdesc; 1448 1449 /* Create a context descriptor if this is a TSO packet */ 1450 if (tso_context || vlan_context) { 1451 if (tso_context) { 1452 netif_dbg(pdata, tx_queued, pdata->netdev, 1453 "TSO context descriptor, mss=%u\n", 1454 packet->mss); 1455 1456 /* Set the MSS size */ 1457 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 1458 MSS, packet->mss); 1459 1460 /* Mark it as a CONTEXT descriptor */ 1461 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1462 CTXT, 1); 1463 1464 /* Indicate this descriptor contains the MSS */ 1465 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1466 TCMSSV, 1); 1467 1468 ring->tx.cur_mss = packet->mss; 1469 } 1470 1471 if (vlan_context) { 1472 netif_dbg(pdata, tx_queued, pdata->netdev, 1473 "VLAN context descriptor, ctag=%u\n", 1474 packet->vlan_ctag); 1475 1476 /* Mark it as a CONTEXT descriptor */ 1477 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1478 CTXT, 1); 1479 1480 /* Set the VLAN tag */ 1481 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1482 VT, packet->vlan_ctag); 1483 1484 /* Indicate this descriptor contains the VLAN tag */ 1485 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1486 VLTV, 1); 1487 1488 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1489 } 1490 1491 cur_index++; 1492 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1493 rdesc = rdata->rdesc; 1494 } 1495 1496 /* Update buffer address (for TSO this is the header) */ 1497 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1498 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1499 1500 /* Update the buffer length */ 1501 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1502 rdata->skb_dma_len); 1503 1504 /* VLAN tag insertion check */ 1505 if (vlan) 1506 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 1507 TX_NORMAL_DESC2_VLAN_INSERT); 1508 1509 /* Timestamp enablement check */ 1510 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1511 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); 1512 1513 /* Mark it as First Descriptor */ 1514 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1515 1516 /* Mark it as a NORMAL descriptor */ 1517 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1518 1519 /* Set OWN bit if not the first descriptor */ 1520 if (cur_index != start_index) 1521 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1522 1523 if (tso) { 1524 /* Enable TSO */ 1525 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1526 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1527 packet->tcp_payload_len); 1528 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1529 packet->tcp_header_len / 4); 1530 1531 pdata->ext_stats.tx_tso_packets++; 1532 } else { 1533 /* Enable CRC and Pad Insertion */ 1534 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1535 1536 /* Enable HW CSUM */ 1537 if (csum) 1538 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1539 CIC, 0x3); 1540 1541 /* Set the total length to be transmitted */ 1542 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1543 packet->length); 1544 } 1545 1546 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { 1547 cur_index++; 1548 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1549 rdesc = rdata->rdesc; 1550 1551 /* Update buffer address */ 1552 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1553 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1554 1555 /* Update the buffer length */ 1556 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1557 rdata->skb_dma_len); 1558 1559 /* Set OWN bit */ 1560 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1561 1562 /* Mark it as NORMAL descriptor */ 1563 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1564 1565 /* Enable HW CSUM */ 1566 if (csum) 1567 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1568 CIC, 0x3); 1569 } 1570 1571 /* Set LAST bit for the last descriptor */ 1572 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1573 1574 /* Set IC bit based on Tx coalescing settings */ 1575 if (tx_set_ic) 1576 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1577 1578 /* Save the Tx info to report back during cleanup */ 1579 rdata->tx.packets = packet->tx_packets; 1580 rdata->tx.bytes = packet->tx_bytes; 1581 1582 /* In case the Tx DMA engine is running, make sure everything 1583 * is written to the descriptor(s) before setting the OWN bit 1584 * for the first descriptor 1585 */ 1586 dma_wmb(); 1587 1588 /* Set OWN bit for the first descriptor */ 1589 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1590 rdesc = rdata->rdesc; 1591 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1592 1593 if (netif_msg_tx_queued(pdata)) 1594 xgbe_dump_tx_desc(pdata, ring, start_index, 1595 packet->rdesc_count, 1); 1596 1597 /* Make sure ownership is written to the descriptor */ 1598 smp_wmb(); 1599 1600 ring->cur = cur_index + 1; 1601 if (!packet->skb->xmit_more || 1602 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1603 channel->queue_index))) 1604 xgbe_tx_start_xmit(channel, ring); 1605 else 1606 ring->tx.xmit_more = 1; 1607 1608 DBGPR(" %s: descriptors %u to %u written\n", 1609 channel->name, start_index & (ring->rdesc_count - 1), 1610 (ring->cur - 1) & (ring->rdesc_count - 1)); 1611 1612 DBGPR("<--xgbe_dev_xmit\n"); 1613} 1614 1615static int xgbe_dev_read(struct xgbe_channel *channel) 1616{ 1617 struct xgbe_prv_data *pdata = channel->pdata; 1618 struct xgbe_ring *ring = channel->rx_ring; 1619 struct xgbe_ring_data *rdata; 1620 struct xgbe_ring_desc *rdesc; 1621 struct xgbe_packet_data *packet = &ring->packet_data; 1622 struct net_device *netdev = pdata->netdev; 1623 unsigned int err, etlt, l34t; 1624 1625 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1626 1627 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1628 rdesc = rdata->rdesc; 1629 1630 /* Check for data availability */ 1631 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1632 return 1; 1633 1634 /* Make sure descriptor fields are read after reading the OWN bit */ 1635 dma_rmb(); 1636 1637 if (netif_msg_rx_status(pdata)) 1638 xgbe_dump_rx_desc(pdata, ring, ring->cur); 1639 1640 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1641 /* Timestamp Context Descriptor */ 1642 xgbe_get_rx_tstamp(packet, rdesc); 1643 1644 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1645 CONTEXT, 1); 1646 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1647 CONTEXT_NEXT, 0); 1648 return 0; 1649 } 1650 1651 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1652 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1653 1654 /* Indicate if a Context Descriptor is next */ 1655 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1656 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1657 CONTEXT_NEXT, 1); 1658 1659 /* Get the header length */ 1660 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1661 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1662 RX_NORMAL_DESC2, HL); 1663 if (rdata->rx.hdr_len) 1664 pdata->ext_stats.rx_split_header_packets++; 1665 } 1666 1667 /* Get the RSS hash */ 1668 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1669 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1670 RSS_HASH, 1); 1671 1672 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1673 1674 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1675 switch (l34t) { 1676 case RX_DESC3_L34T_IPV4_TCP: 1677 case RX_DESC3_L34T_IPV4_UDP: 1678 case RX_DESC3_L34T_IPV6_TCP: 1679 case RX_DESC3_L34T_IPV6_UDP: 1680 packet->rss_hash_type = PKT_HASH_TYPE_L4; 1681 break; 1682 default: 1683 packet->rss_hash_type = PKT_HASH_TYPE_L3; 1684 } 1685 } 1686 1687 /* Get the packet length */ 1688 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1689 1690 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1691 /* Not all the data has been transferred for this packet */ 1692 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1693 INCOMPLETE, 1); 1694 return 0; 1695 } 1696 1697 /* This is the last of the data for this packet */ 1698 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1699 INCOMPLETE, 0); 1700 1701 /* Set checksum done indicator as appropriate */ 1702 if (netdev->features & NETIF_F_RXCSUM) 1703 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1704 CSUM_DONE, 1); 1705 1706 /* Check for errors (only valid in last descriptor) */ 1707 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1708 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1709 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); 1710 1711 if (!err || !etlt) { 1712 /* No error if err is 0 or etlt is 0 */ 1713 if ((etlt == 0x09) && 1714 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1715 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1716 VLAN_CTAG, 1); 1717 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1718 RX_NORMAL_DESC0, 1719 OVT); 1720 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", 1721 packet->vlan_ctag); 1722 } 1723 } else { 1724 if ((etlt == 0x05) || (etlt == 0x06)) 1725 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1726 CSUM_DONE, 0); 1727 else 1728 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1729 FRAME, 1); 1730 } 1731 1732 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 1733 ring->cur & (ring->rdesc_count - 1), ring->cur); 1734 1735 return 0; 1736} 1737 1738static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1739{ 1740 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1741 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 1742} 1743 1744static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1745{ 1746 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1747 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 1748} 1749 1750static int xgbe_enable_int(struct xgbe_channel *channel, 1751 enum xgbe_int int_id) 1752{ 1753 unsigned int dma_ch_ier; 1754 1755 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1756 1757 switch (int_id) { 1758 case XGMAC_INT_DMA_CH_SR_TI: 1759 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1760 break; 1761 case XGMAC_INT_DMA_CH_SR_TPS: 1762 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); 1763 break; 1764 case XGMAC_INT_DMA_CH_SR_TBU: 1765 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); 1766 break; 1767 case XGMAC_INT_DMA_CH_SR_RI: 1768 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1769 break; 1770 case XGMAC_INT_DMA_CH_SR_RBU: 1771 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); 1772 break; 1773 case XGMAC_INT_DMA_CH_SR_RPS: 1774 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); 1775 break; 1776 case XGMAC_INT_DMA_CH_SR_TI_RI: 1777 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); 1778 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); 1779 break; 1780 case XGMAC_INT_DMA_CH_SR_FBE: 1781 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 1782 break; 1783 case XGMAC_INT_DMA_ALL: 1784 dma_ch_ier |= channel->saved_ier; 1785 break; 1786 default: 1787 return -1; 1788 } 1789 1790 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1791 1792 return 0; 1793} 1794 1795static int xgbe_disable_int(struct xgbe_channel *channel, 1796 enum xgbe_int int_id) 1797{ 1798 unsigned int dma_ch_ier; 1799 1800 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); 1801 1802 switch (int_id) { 1803 case XGMAC_INT_DMA_CH_SR_TI: 1804 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 1805 break; 1806 case XGMAC_INT_DMA_CH_SR_TPS: 1807 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); 1808 break; 1809 case XGMAC_INT_DMA_CH_SR_TBU: 1810 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); 1811 break; 1812 case XGMAC_INT_DMA_CH_SR_RI: 1813 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 1814 break; 1815 case XGMAC_INT_DMA_CH_SR_RBU: 1816 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); 1817 break; 1818 case XGMAC_INT_DMA_CH_SR_RPS: 1819 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); 1820 break; 1821 case XGMAC_INT_DMA_CH_SR_TI_RI: 1822 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); 1823 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); 1824 break; 1825 case XGMAC_INT_DMA_CH_SR_FBE: 1826 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); 1827 break; 1828 case XGMAC_INT_DMA_ALL: 1829 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; 1830 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; 1831 break; 1832 default: 1833 return -1; 1834 } 1835 1836 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); 1837 1838 return 0; 1839} 1840 1841static int xgbe_exit(struct xgbe_prv_data *pdata) 1842{ 1843 unsigned int count = 2000; 1844 1845 DBGPR("-->xgbe_exit\n"); 1846 1847 /* Issue a software reset */ 1848 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 1849 usleep_range(10, 15); 1850 1851 /* Poll Until Poll Condition */ 1852 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1853 usleep_range(500, 600); 1854 1855 if (!count) 1856 return -EBUSY; 1857 1858 DBGPR("<--xgbe_exit\n"); 1859 1860 return 0; 1861} 1862 1863static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 1864{ 1865 unsigned int i, count; 1866 1867 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 1868 return 0; 1869 1870 for (i = 0; i < pdata->tx_q_count; i++) 1871 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1872 1873 /* Poll Until Poll Condition */ 1874 for (i = 0; i < pdata->tx_q_count; i++) { 1875 count = 2000; 1876 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 1877 MTL_Q_TQOMR, FTQ)) 1878 usleep_range(500, 600); 1879 1880 if (!count) 1881 return -EBUSY; 1882 } 1883 1884 return 0; 1885} 1886 1887static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 1888{ 1889 /* Set enhanced addressing mode */ 1890 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); 1891 1892 /* Set the System Bus mode */ 1893 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); 1894 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); 1895} 1896 1897static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 1898{ 1899 unsigned int arcache, awcache; 1900 1901 arcache = 0; 1902 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache); 1903 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain); 1904 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache); 1905 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain); 1906 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache); 1907 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain); 1908 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); 1909 1910 awcache = 0; 1911 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache); 1912 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain); 1913 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache); 1914 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain); 1915 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache); 1916 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain); 1917 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache); 1918 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain); 1919 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); 1920} 1921 1922static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 1923{ 1924 unsigned int i; 1925 1926 /* Set Tx to weighted round robin scheduling algorithm */ 1927 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 1928 1929 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 1930 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1931 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1932 MTL_TSA_ETS); 1933 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 1934 } 1935 1936 /* Set Rx to strict priority algorithm */ 1937 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1938} 1939 1940static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, 1941 unsigned int queue_count) 1942{ 1943 unsigned int q_fifo_size; 1944 unsigned int p_fifo; 1945 1946 /* Calculate the configured fifo size */ 1947 q_fifo_size = 1 << (fifo_size + 7); 1948 1949 /* The configured value may not be the actual amount of fifo RAM */ 1950 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); 1951 1952 q_fifo_size = q_fifo_size / queue_count; 1953 1954 /* Each increment in the queue fifo size represents 256 bytes of 1955 * fifo, with 0 representing 256 bytes. Distribute the fifo equally 1956 * between the queues. 1957 */ 1958 p_fifo = q_fifo_size / 256; 1959 if (p_fifo) 1960 p_fifo--; 1961 1962 return p_fifo; 1963} 1964 1965static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 1966{ 1967 unsigned int fifo_size; 1968 unsigned int i; 1969 1970 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, 1971 pdata->tx_q_count); 1972 1973 for (i = 0; i < pdata->tx_q_count; i++) 1974 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); 1975 1976 netif_info(pdata, drv, pdata->netdev, 1977 "%d Tx hardware queues, %d byte fifo per queue\n", 1978 pdata->tx_q_count, ((fifo_size + 1) * 256)); 1979} 1980 1981static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 1982{ 1983 unsigned int fifo_size; 1984 unsigned int i; 1985 1986 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, 1987 pdata->rx_q_count); 1988 1989 for (i = 0; i < pdata->rx_q_count; i++) 1990 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); 1991 1992 netif_info(pdata, drv, pdata->netdev, 1993 "%d Rx hardware queues, %d byte fifo per queue\n", 1994 pdata->rx_q_count, ((fifo_size + 1) * 256)); 1995} 1996 1997static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 1998{ 1999 unsigned int qptc, qptc_extra, queue; 2000 unsigned int prio_queues; 2001 unsigned int ppq, ppq_extra, prio; 2002 unsigned int mask; 2003 unsigned int i, j, reg, reg_val; 2004 2005 /* Map the MTL Tx Queues to Traffic Classes 2006 * Note: Tx Queues >= Traffic Classes 2007 */ 2008 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 2009 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 2010 2011 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 2012 for (j = 0; j < qptc; j++) { 2013 netif_dbg(pdata, drv, pdata->netdev, 2014 "TXq%u mapped to TC%u\n", queue, i); 2015 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2016 Q2TCMAP, i); 2017 pdata->q2tc_map[queue++] = i; 2018 } 2019 2020 if (i < qptc_extra) { 2021 netif_dbg(pdata, drv, pdata->netdev, 2022 "TXq%u mapped to TC%u\n", queue, i); 2023 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2024 Q2TCMAP, i); 2025 pdata->q2tc_map[queue++] = i; 2026 } 2027 } 2028 2029 /* Map the 8 VLAN priority values to available MTL Rx queues */ 2030 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, 2031 pdata->rx_q_count); 2032 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 2033 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 2034 2035 reg = MAC_RQC2R; 2036 reg_val = 0; 2037 for (i = 0, prio = 0; i < prio_queues;) { 2038 mask = 0; 2039 for (j = 0; j < ppq; j++) { 2040 netif_dbg(pdata, drv, pdata->netdev, 2041 "PRIO%u mapped to RXq%u\n", prio, i); 2042 mask |= (1 << prio); 2043 pdata->prio2q_map[prio++] = i; 2044 } 2045 2046 if (i < ppq_extra) { 2047 netif_dbg(pdata, drv, pdata->netdev, 2048 "PRIO%u mapped to RXq%u\n", prio, i); 2049 mask |= (1 << prio); 2050 pdata->prio2q_map[prio++] = i; 2051 } 2052 2053 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2054 2055 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2056 continue; 2057 2058 XGMAC_IOWRITE(pdata, reg, reg_val); 2059 reg += MAC_RQC2_INC; 2060 reg_val = 0; 2061 } 2062 2063 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2064 reg = MTL_RQDCM0R; 2065 reg_val = 0; 2066 for (i = 0; i < pdata->rx_q_count;) { 2067 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2068 2069 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2070 continue; 2071 2072 XGMAC_IOWRITE(pdata, reg, reg_val); 2073 2074 reg += MTL_RQDCM_INC; 2075 reg_val = 0; 2076 } 2077} 2078 2079static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 2080{ 2081 unsigned int i; 2082 2083 for (i = 0; i < pdata->rx_q_count; i++) { 2084 /* Activate flow control when less than 4k left in fifo */ 2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); 2086 2087 /* De-activate flow control when more than 6k left in fifo */ 2088 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); 2089 } 2090} 2091 2092static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2093{ 2094 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 2095 2096 /* Filtering is done using perfect filtering and hash filtering */ 2097 if (pdata->hw_feat.hash_table_size) { 2098 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2099 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2100 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2101 } 2102} 2103 2104static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2105{ 2106 unsigned int val; 2107 2108 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; 2109 2110 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2111} 2112 2113static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2114{ 2115 switch (pdata->phy_speed) { 2116 case SPEED_10000: 2117 xgbe_set_xgmii_speed(pdata); 2118 break; 2119 2120 case SPEED_2500: 2121 xgbe_set_gmii_2500_speed(pdata); 2122 break; 2123 2124 case SPEED_1000: 2125 xgbe_set_gmii_speed(pdata); 2126 break; 2127 } 2128} 2129 2130static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2131{ 2132 if (pdata->netdev->features & NETIF_F_RXCSUM) 2133 xgbe_enable_rx_csum(pdata); 2134 else 2135 xgbe_disable_rx_csum(pdata); 2136} 2137 2138static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2139{ 2140 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2141 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2142 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2143 2144 /* Set the current VLAN Hash Table register value */ 2145 xgbe_update_vlan_hash_table(pdata); 2146 2147 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 2148 xgbe_enable_rx_vlan_filtering(pdata); 2149 else 2150 xgbe_disable_rx_vlan_filtering(pdata); 2151 2152 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2153 xgbe_enable_rx_vlan_stripping(pdata); 2154 else 2155 xgbe_disable_rx_vlan_stripping(pdata); 2156} 2157 2158static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2159{ 2160 bool read_hi; 2161 u64 val; 2162 2163 switch (reg_lo) { 2164 /* These registers are always 64 bit */ 2165 case MMC_TXOCTETCOUNT_GB_LO: 2166 case MMC_TXOCTETCOUNT_G_LO: 2167 case MMC_RXOCTETCOUNT_GB_LO: 2168 case MMC_RXOCTETCOUNT_G_LO: 2169 read_hi = true; 2170 break; 2171 2172 default: 2173 read_hi = false; 2174 } 2175 2176 val = XGMAC_IOREAD(pdata, reg_lo); 2177 2178 if (read_hi) 2179 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2180 2181 return val; 2182} 2183 2184static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2185{ 2186 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2187 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2188 2189 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2190 stats->txoctetcount_gb += 2191 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2192 2193 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2194 stats->txframecount_gb += 2195 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2196 2197 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2198 stats->txbroadcastframes_g += 2199 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2200 2201 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2202 stats->txmulticastframes_g += 2203 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2204 2205 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2206 stats->tx64octets_gb += 2207 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2208 2209 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2210 stats->tx65to127octets_gb += 2211 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2212 2213 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2214 stats->tx128to255octets_gb += 2215 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2216 2217 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2218 stats->tx256to511octets_gb += 2219 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2220 2221 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2222 stats->tx512to1023octets_gb += 2223 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2224 2225 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2226 stats->tx1024tomaxoctets_gb += 2227 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2228 2229 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2230 stats->txunicastframes_gb += 2231 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2232 2233 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2234 stats->txmulticastframes_gb += 2235 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2236 2237 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2238 stats->txbroadcastframes_g += 2239 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2240 2241 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2242 stats->txunderflowerror += 2243 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2244 2245 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2246 stats->txoctetcount_g += 2247 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2248 2249 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2250 stats->txframecount_g += 2251 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2252 2253 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2254 stats->txpauseframes += 2255 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2256 2257 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2258 stats->txvlanframes_g += 2259 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2260} 2261 2262static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2263{ 2264 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2265 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2266 2267 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2268 stats->rxframecount_gb += 2269 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2270 2271 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2272 stats->rxoctetcount_gb += 2273 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2274 2275 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2276 stats->rxoctetcount_g += 2277 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2278 2279 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2280 stats->rxbroadcastframes_g += 2281 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2282 2283 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2284 stats->rxmulticastframes_g += 2285 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2286 2287 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2288 stats->rxcrcerror += 2289 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2290 2291 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2292 stats->rxrunterror += 2293 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2294 2295 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2296 stats->rxjabbererror += 2297 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2298 2299 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2300 stats->rxundersize_g += 2301 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2302 2303 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2304 stats->rxoversize_g += 2305 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2306 2307 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2308 stats->rx64octets_gb += 2309 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2310 2311 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2312 stats->rx65to127octets_gb += 2313 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2314 2315 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2316 stats->rx128to255octets_gb += 2317 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2318 2319 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2320 stats->rx256to511octets_gb += 2321 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2322 2323 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2324 stats->rx512to1023octets_gb += 2325 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2326 2327 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2328 stats->rx1024tomaxoctets_gb += 2329 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2330 2331 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2332 stats->rxunicastframes_g += 2333 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2334 2335 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2336 stats->rxlengtherror += 2337 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2338 2339 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2340 stats->rxoutofrangetype += 2341 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2342 2343 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2344 stats->rxpauseframes += 2345 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2346 2347 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2348 stats->rxfifooverflow += 2349 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2350 2351 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2352 stats->rxvlanframes_gb += 2353 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2354 2355 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2356 stats->rxwatchdogerror += 2357 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2358} 2359 2360static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2361{ 2362 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2363 2364 /* Freeze counters */ 2365 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2366 2367 stats->txoctetcount_gb += 2368 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2369 2370 stats->txframecount_gb += 2371 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2372 2373 stats->txbroadcastframes_g += 2374 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2375 2376 stats->txmulticastframes_g += 2377 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2378 2379 stats->tx64octets_gb += 2380 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2381 2382 stats->tx65to127octets_gb += 2383 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2384 2385 stats->tx128to255octets_gb += 2386 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2387 2388 stats->tx256to511octets_gb += 2389 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2390 2391 stats->tx512to1023octets_gb += 2392 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2393 2394 stats->tx1024tomaxoctets_gb += 2395 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2396 2397 stats->txunicastframes_gb += 2398 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2399 2400 stats->txmulticastframes_gb += 2401 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2402 2403 stats->txbroadcastframes_g += 2404 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2405 2406 stats->txunderflowerror += 2407 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2408 2409 stats->txoctetcount_g += 2410 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2411 2412 stats->txframecount_g += 2413 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2414 2415 stats->txpauseframes += 2416 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2417 2418 stats->txvlanframes_g += 2419 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2420 2421 stats->rxframecount_gb += 2422 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2423 2424 stats->rxoctetcount_gb += 2425 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2426 2427 stats->rxoctetcount_g += 2428 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2429 2430 stats->rxbroadcastframes_g += 2431 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2432 2433 stats->rxmulticastframes_g += 2434 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2435 2436 stats->rxcrcerror += 2437 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2438 2439 stats->rxrunterror += 2440 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2441 2442 stats->rxjabbererror += 2443 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2444 2445 stats->rxundersize_g += 2446 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2447 2448 stats->rxoversize_g += 2449 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2450 2451 stats->rx64octets_gb += 2452 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2453 2454 stats->rx65to127octets_gb += 2455 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2456 2457 stats->rx128to255octets_gb += 2458 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2459 2460 stats->rx256to511octets_gb += 2461 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2462 2463 stats->rx512to1023octets_gb += 2464 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2465 2466 stats->rx1024tomaxoctets_gb += 2467 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2468 2469 stats->rxunicastframes_g += 2470 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2471 2472 stats->rxlengtherror += 2473 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2474 2475 stats->rxoutofrangetype += 2476 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2477 2478 stats->rxpauseframes += 2479 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2480 2481 stats->rxfifooverflow += 2482 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2483 2484 stats->rxvlanframes_gb += 2485 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2486 2487 stats->rxwatchdogerror += 2488 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2489 2490 /* Un-freeze counters */ 2491 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2492} 2493 2494static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 2495{ 2496 /* Set counters to reset on read */ 2497 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 2498 2499 /* Reset the counters */ 2500 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 2501} 2502 2503static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, 2504 struct xgbe_channel *channel) 2505{ 2506 unsigned int tx_dsr, tx_pos, tx_qidx; 2507 unsigned int tx_status; 2508 unsigned long tx_timeout; 2509 2510 /* Calculate the status register to read and the position within */ 2511 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { 2512 tx_dsr = DMA_DSR0; 2513 tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + 2514 DMA_DSR0_TPS_START; 2515 } else { 2516 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; 2517 2518 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 2519 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 2520 DMA_DSRX_TPS_START; 2521 } 2522 2523 /* The Tx engine cannot be stopped if it is actively processing 2524 * descriptors. Wait for the Tx engine to enter the stopped or 2525 * suspended state. Don't wait forever though... 2526 */ 2527 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 2528 while (time_before(jiffies, tx_timeout)) { 2529 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 2530 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 2531 if ((tx_status == DMA_TPS_STOPPED) || 2532 (tx_status == DMA_TPS_SUSPENDED)) 2533 break; 2534 2535 usleep_range(500, 1000); 2536 } 2537 2538 if (!time_before(jiffies, tx_timeout)) 2539 netdev_info(pdata->netdev, 2540 "timed out waiting for Tx DMA channel %u to stop\n", 2541 channel->queue_index); 2542} 2543 2544static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 2545{ 2546 struct xgbe_channel *channel; 2547 unsigned int i; 2548 2549 /* Enable each Tx DMA channel */ 2550 channel = pdata->channel; 2551 for (i = 0; i < pdata->channel_count; i++, channel++) { 2552 if (!channel->tx_ring) 2553 break; 2554 2555 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 2556 } 2557 2558 /* Enable each Tx queue */ 2559 for (i = 0; i < pdata->tx_q_count; i++) 2560 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 2561 MTL_Q_ENABLED); 2562 2563 /* Enable MAC Tx */ 2564 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2565} 2566 2567static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 2568{ 2569 struct xgbe_channel *channel; 2570 unsigned int i; 2571 2572 /* Prepare for Tx DMA channel stop */ 2573 channel = pdata->channel; 2574 for (i = 0; i < pdata->channel_count; i++, channel++) { 2575 if (!channel->tx_ring) 2576 break; 2577 2578 xgbe_prepare_tx_stop(pdata, channel); 2579 } 2580 2581 /* Disable MAC Tx */ 2582 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2583 2584 /* Disable each Tx queue */ 2585 for (i = 0; i < pdata->tx_q_count; i++) 2586 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 2587 2588 /* Disable each Tx DMA channel */ 2589 channel = pdata->channel; 2590 for (i = 0; i < pdata->channel_count; i++, channel++) { 2591 if (!channel->tx_ring) 2592 break; 2593 2594 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 2595 } 2596} 2597 2598static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 2599{ 2600 struct xgbe_channel *channel; 2601 unsigned int reg_val, i; 2602 2603 /* Enable each Rx DMA channel */ 2604 channel = pdata->channel; 2605 for (i = 0; i < pdata->channel_count; i++, channel++) { 2606 if (!channel->rx_ring) 2607 break; 2608 2609 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 2610 } 2611 2612 /* Enable each Rx queue */ 2613 reg_val = 0; 2614 for (i = 0; i < pdata->rx_q_count; i++) 2615 reg_val |= (0x02 << (i << 1)); 2616 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 2617 2618 /* Enable MAC Rx */ 2619 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 2620 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 2621 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 2622 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 2623} 2624 2625static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 2626{ 2627 struct xgbe_channel *channel; 2628 unsigned int i; 2629 2630 /* Disable MAC Rx */ 2631 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 2632 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 2633 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 2634 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 2635 2636 /* Disable each Rx queue */ 2637 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 2638 2639 /* Disable each Rx DMA channel */ 2640 channel = pdata->channel; 2641 for (i = 0; i < pdata->channel_count; i++, channel++) { 2642 if (!channel->rx_ring) 2643 break; 2644 2645 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 2646 } 2647} 2648 2649static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 2650{ 2651 struct xgbe_channel *channel; 2652 unsigned int i; 2653 2654 /* Enable each Tx DMA channel */ 2655 channel = pdata->channel; 2656 for (i = 0; i < pdata->channel_count; i++, channel++) { 2657 if (!channel->tx_ring) 2658 break; 2659 2660 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); 2661 } 2662 2663 /* Enable MAC Tx */ 2664 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2665} 2666 2667static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 2668{ 2669 struct xgbe_channel *channel; 2670 unsigned int i; 2671 2672 /* Prepare for Tx DMA channel stop */ 2673 channel = pdata->channel; 2674 for (i = 0; i < pdata->channel_count; i++, channel++) { 2675 if (!channel->tx_ring) 2676 break; 2677 2678 xgbe_prepare_tx_stop(pdata, channel); 2679 } 2680 2681 /* Disable MAC Tx */ 2682 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2683 2684 /* Disable each Tx DMA channel */ 2685 channel = pdata->channel; 2686 for (i = 0; i < pdata->channel_count; i++, channel++) { 2687 if (!channel->tx_ring) 2688 break; 2689 2690 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); 2691 } 2692} 2693 2694static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 2695{ 2696 struct xgbe_channel *channel; 2697 unsigned int i; 2698 2699 /* Enable each Rx DMA channel */ 2700 channel = pdata->channel; 2701 for (i = 0; i < pdata->channel_count; i++, channel++) { 2702 if (!channel->rx_ring) 2703 break; 2704 2705 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); 2706 } 2707} 2708 2709static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 2710{ 2711 struct xgbe_channel *channel; 2712 unsigned int i; 2713 2714 /* Disable each Rx DMA channel */ 2715 channel = pdata->channel; 2716 for (i = 0; i < pdata->channel_count; i++, channel++) { 2717 if (!channel->rx_ring) 2718 break; 2719 2720 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); 2721 } 2722} 2723 2724static int xgbe_init(struct xgbe_prv_data *pdata) 2725{ 2726 struct xgbe_desc_if *desc_if = &pdata->desc_if; 2727 int ret; 2728 2729 DBGPR("-->xgbe_init\n"); 2730 2731 /* Flush Tx queues */ 2732 ret = xgbe_flush_tx_queues(pdata); 2733 if (ret) 2734 return ret; 2735 2736 /* 2737 * Initialize DMA related features 2738 */ 2739 xgbe_config_dma_bus(pdata); 2740 xgbe_config_dma_cache(pdata); 2741 xgbe_config_osp_mode(pdata); 2742 xgbe_config_pblx8(pdata); 2743 xgbe_config_tx_pbl_val(pdata); 2744 xgbe_config_rx_pbl_val(pdata); 2745 xgbe_config_rx_coalesce(pdata); 2746 xgbe_config_tx_coalesce(pdata); 2747 xgbe_config_rx_buffer_size(pdata); 2748 xgbe_config_tso_mode(pdata); 2749 xgbe_config_sph_mode(pdata); 2750 xgbe_config_rss(pdata); 2751 desc_if->wrapper_tx_desc_init(pdata); 2752 desc_if->wrapper_rx_desc_init(pdata); 2753 xgbe_enable_dma_interrupts(pdata); 2754 2755 /* 2756 * Initialize MTL related features 2757 */ 2758 xgbe_config_mtl_mode(pdata); 2759 xgbe_config_queue_mapping(pdata); 2760 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 2761 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 2762 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 2763 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 2764 xgbe_config_tx_fifo_size(pdata); 2765 xgbe_config_rx_fifo_size(pdata); 2766 xgbe_config_flow_control_threshold(pdata); 2767 /*TODO: Error Packet and undersized good Packet forwarding enable 2768 (FEP and FUP) 2769 */ 2770 xgbe_config_dcb_tc(pdata); 2771 xgbe_config_dcb_pfc(pdata); 2772 xgbe_enable_mtl_interrupts(pdata); 2773 2774 /* 2775 * Initialize MAC related features 2776 */ 2777 xgbe_config_mac_address(pdata); 2778 xgbe_config_rx_mode(pdata); 2779 xgbe_config_jumbo_enable(pdata); 2780 xgbe_config_flow_control(pdata); 2781 xgbe_config_mac_speed(pdata); 2782 xgbe_config_checksum_offload(pdata); 2783 xgbe_config_vlan_support(pdata); 2784 xgbe_config_mmc(pdata); 2785 xgbe_enable_mac_interrupts(pdata); 2786 2787 DBGPR("<--xgbe_init\n"); 2788 2789 return 0; 2790} 2791 2792void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 2793{ 2794 DBGPR("-->xgbe_init_function_ptrs\n"); 2795 2796 hw_if->tx_complete = xgbe_tx_complete; 2797 2798 hw_if->set_mac_address = xgbe_set_mac_address; 2799 hw_if->config_rx_mode = xgbe_config_rx_mode; 2800 2801 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 2802 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 2803 2804 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 2805 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 2806 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 2807 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 2808 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 2809 2810 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 2811 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 2812 2813 hw_if->set_gmii_speed = xgbe_set_gmii_speed; 2814 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; 2815 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; 2816 2817 hw_if->enable_tx = xgbe_enable_tx; 2818 hw_if->disable_tx = xgbe_disable_tx; 2819 hw_if->enable_rx = xgbe_enable_rx; 2820 hw_if->disable_rx = xgbe_disable_rx; 2821 2822 hw_if->powerup_tx = xgbe_powerup_tx; 2823 hw_if->powerdown_tx = xgbe_powerdown_tx; 2824 hw_if->powerup_rx = xgbe_powerup_rx; 2825 hw_if->powerdown_rx = xgbe_powerdown_rx; 2826 2827 hw_if->dev_xmit = xgbe_dev_xmit; 2828 hw_if->dev_read = xgbe_dev_read; 2829 hw_if->enable_int = xgbe_enable_int; 2830 hw_if->disable_int = xgbe_disable_int; 2831 hw_if->init = xgbe_init; 2832 hw_if->exit = xgbe_exit; 2833 2834 /* Descriptor related Sequences have to be initialized here */ 2835 hw_if->tx_desc_init = xgbe_tx_desc_init; 2836 hw_if->rx_desc_init = xgbe_rx_desc_init; 2837 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 2838 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 2839 hw_if->is_last_desc = xgbe_is_last_desc; 2840 hw_if->is_context_desc = xgbe_is_context_desc; 2841 hw_if->tx_start_xmit = xgbe_tx_start_xmit; 2842 2843 /* For FLOW ctrl */ 2844 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 2845 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 2846 2847 /* For RX coalescing */ 2848 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 2849 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 2850 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 2851 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 2852 2853 /* For RX and TX threshold config */ 2854 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 2855 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 2856 2857 /* For RX and TX Store and Forward Mode config */ 2858 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 2859 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 2860 2861 /* For TX DMA Operating on Second Frame config */ 2862 hw_if->config_osp_mode = xgbe_config_osp_mode; 2863 2864 /* For RX and TX PBL config */ 2865 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; 2866 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; 2867 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; 2868 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; 2869 hw_if->config_pblx8 = xgbe_config_pblx8; 2870 2871 /* For MMC statistics support */ 2872 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 2873 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 2874 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 2875 2876 /* For PTP config */ 2877 hw_if->config_tstamp = xgbe_config_tstamp; 2878 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; 2879 hw_if->set_tstamp_time = xgbe_set_tstamp_time; 2880 hw_if->get_tstamp_time = xgbe_get_tstamp_time; 2881 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; 2882 2883 /* For Data Center Bridging config */ 2884 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 2885 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 2886 2887 /* For Receive Side Scaling */ 2888 hw_if->enable_rss = xgbe_enable_rss; 2889 hw_if->disable_rss = xgbe_disable_rss; 2890 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 2891 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 2892 2893 DBGPR("<--xgbe_init_function_ptrs\n"); 2894} 2895