This source file includes following definitions.
- t3_wait_op_done_val
- t3_write_regs
- t3_set_reg_field
- t3_read_indirect
- t3_mc7_bd_read
- mi1_init
- t3_mi1_read
- t3_mi1_write
- mi1_wr_addr
- mi1_ext_read
- mi1_ext_write
- t3_mdio_change_bits
- t3_phy_reset
- t3_phy_advertise
- t3_phy_advertise_fiber
- t3_set_phy_speed_duplex
- t3_phy_lasi_intr_enable
- t3_phy_lasi_intr_disable
- t3_phy_lasi_intr_clear
- t3_phy_lasi_intr_handler
- t3_get_adapter_info
- t3_seeprom_read
- t3_seeprom_write
- t3_seeprom_wp
- vpdstrtouint
- vpdstrtou16
- get_vpd_params
- sf1_read
- sf1_write
- flash_wait_op
- t3_read_flash
- t3_write_flash
- t3_get_tp_version
- t3_check_tpsram_version
- t3_check_tpsram
- t3_get_fw_version
- t3_check_fw_version
- t3_flash_erase_sectors
- t3_load_fw
- t3_cim_ctl_blk_read
- t3_gate_rx_traffic
- t3_open_rx_traffic
- t3_link_changed
- t3_link_fault
- t3_link_start
- t3_set_vlan_accel
- t3_handle_intr_status
- pci_intr_handler
- pcie_intr_handler
- tp_intr_handler
- cim_intr_handler
- ulprx_intr_handler
- ulptx_intr_handler
- pmtx_intr_handler
- pmrx_intr_handler
- cplsw_intr_handler
- mps_intr_handler
- mc7_intr_handler
- mac_intr_handler
- t3_phy_intr_handler
- t3_slow_intr_handler
- calc_gpio_intr
- t3_intr_enable
- t3_intr_disable
- t3_intr_clear
- t3_xgm_intr_enable
- t3_xgm_intr_disable
- t3_port_intr_enable
- t3_port_intr_disable
- t3_port_intr_clear
- t3_sge_write_context
- clear_sge_ctxt
- t3_sge_init_ecntxt
- t3_sge_init_flcntxt
- t3_sge_init_rspcntxt
- t3_sge_init_cqcntxt
- t3_sge_enable_ecntxt
- t3_sge_disable_fl
- t3_sge_disable_rspcntxt
- t3_sge_disable_cqcntxt
- t3_sge_cqcntxt_op
- t3_config_rss
- t3_tp_set_offload_mode
- pm_num_pages
- partition_mem
- tp_wr_indirect
- tp_config
- tp_set_timers
- t3_tp_set_coalescing_size
- t3_tp_set_max_rxsize
- init_mtus
- init_cong_ctrl
- t3_load_mtus
- t3_tp_get_mib_stats
- ulp_config
- t3_set_proto_sram
- t3_config_trace_filter
- t3_config_sched
- tp_init
- chan_init_hw
- calibrate_xgm
- calibrate_xgm_t3b
- wrreg_wait
- mc7_init
- config_pcie
- t3_init_hw
- get_pci_mode
- init_link_config
- mc7_calc_size
- mc7_prep
- mac_prep
- early_hw_init
- t3_reset_adapter
- init_parity
- t3_prep_adapter
- t3_led_ready
- t3_replay_prep_adapter
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 #include "common.h"
  33 #include "regs.h"
  34 #include "sge_defs.h"
  35 #include "firmware_exports.h"
  36 
  37 static void t3_port_intr_clear(struct adapter *adapter, int idx);
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 
  46 
  47 
  48 
  49 
  50 
  51 
  52 
  53 
  54 
  55 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  56                         int polarity, int attempts, int delay, u32 *valp)
  57 {
  58         while (1) {
  59                 u32 val = t3_read_reg(adapter, reg);
  60 
  61                 if (!!(val & mask) == polarity) {
  62                         if (valp)
  63                                 *valp = val;
  64                         return 0;
  65                 }
  66                 if (--attempts == 0)
  67                         return -EAGAIN;
  68                 if (delay)
  69                         udelay(delay);
  70         }
  71 }
  72 
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 
  82 
  83 
  84 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
  85                    int n, unsigned int offset)
  86 {
  87         while (n--) {
  88                 t3_write_reg(adapter, p->reg_addr + offset, p->val);
  89                 p++;
  90         }
  91 }
  92 
  93 
  94 
  95 
  96 
  97 
  98 
  99 
 100 
 101 
 102 
 103 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
 104                       u32 val)
 105 {
 106         u32 v = t3_read_reg(adapter, addr) & ~mask;
 107 
 108         t3_write_reg(adapter, addr, v | val);
 109         t3_read_reg(adapter, addr);     
 110 }
 111 
 112 
 113 
 114 
 115 
 116 
 117 
 118 
 119 
 120 
 121 
 122 
 123 
 124 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
 125                              unsigned int data_reg, u32 *vals,
 126                              unsigned int nregs, unsigned int start_idx)
 127 {
 128         while (nregs--) {
 129                 t3_write_reg(adap, addr_reg, start_idx);
 130                 *vals++ = t3_read_reg(adap, data_reg);
 131                 start_idx++;
 132         }
 133 }
 134 
 135 
 136 
 137 
 138 
 139 
 140 
 141 
 142 
 143 
 144 
 145 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
 146                    u64 *buf)
 147 {
 148         static const int shift[] = { 0, 0, 16, 24 };
 149         static const int step[] = { 0, 32, 16, 8 };
 150 
 151         unsigned int size64 = mc7->size / 8;    
 152         struct adapter *adap = mc7->adapter;
 153 
 154         if (start >= size64 || start + n > size64)
 155                 return -EINVAL;
 156 
 157         start *= (8 << mc7->width);
 158         while (n--) {
 159                 int i;
 160                 u64 val64 = 0;
 161 
 162                 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
 163                         int attempts = 10;
 164                         u32 val;
 165 
 166                         t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
 167                         t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
 168                         val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
 169                         while ((val & F_BUSY) && attempts--)
 170                                 val = t3_read_reg(adap,
 171                                                   mc7->offset + A_MC7_BD_OP);
 172                         if (val & F_BUSY)
 173                                 return -EIO;
 174 
 175                         val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
 176                         if (mc7->width == 0) {
 177                                 val64 = t3_read_reg(adap,
 178                                                     mc7->offset +
 179                                                     A_MC7_BD_DATA0);
 180                                 val64 |= (u64) val << 32;
 181                         } else {
 182                                 if (mc7->width > 1)
 183                                         val >>= shift[mc7->width];
 184                                 val64 |= (u64) val << (step[mc7->width] * i);
 185                         }
 186                         start += 8;
 187                 }
 188                 *buf++ = val64;
 189         }
 190         return 0;
 191 }
 192 
 193 
 194 
 195 
 196 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
 197 {
 198         u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
 199         u32 val = F_PREEN | V_CLKDIV(clkdiv);
 200 
 201         t3_write_reg(adap, A_MI1_CFG, val);
 202 }
 203 
 204 #define MDIO_ATTEMPTS 20
 205 
 206 
 207 
 208 
 209 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
 210                        u16 reg_addr)
 211 {
 212         struct port_info *pi = netdev_priv(dev);
 213         struct adapter *adapter = pi->adapter;
 214         int ret;
 215         u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 216 
 217         mutex_lock(&adapter->mdio_lock);
 218         t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 219         t3_write_reg(adapter, A_MI1_ADDR, addr);
 220         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
 221         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 222         if (!ret)
 223                 ret = t3_read_reg(adapter, A_MI1_DATA);
 224         mutex_unlock(&adapter->mdio_lock);
 225         return ret;
 226 }
 227 
 228 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
 229                         u16 reg_addr, u16 val)
 230 {
 231         struct port_info *pi = netdev_priv(dev);
 232         struct adapter *adapter = pi->adapter;
 233         int ret;
 234         u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
 235 
 236         mutex_lock(&adapter->mdio_lock);
 237         t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
 238         t3_write_reg(adapter, A_MI1_ADDR, addr);
 239         t3_write_reg(adapter, A_MI1_DATA, val);
 240         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 241         ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
 242         mutex_unlock(&adapter->mdio_lock);
 243         return ret;
 244 }
 245 
 246 static const struct mdio_ops mi1_mdio_ops = {
 247         .read = t3_mi1_read,
 248         .write = t3_mi1_write,
 249         .mode_support = MDIO_SUPPORTS_C22
 250 };
 251 
 252 
 253 
 254 
 255 
 256 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
 257                        int reg_addr)
 258 {
 259         u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
 260 
 261         t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
 262         t3_write_reg(adapter, A_MI1_ADDR, addr);
 263         t3_write_reg(adapter, A_MI1_DATA, reg_addr);
 264         t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
 265         return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 266                                MDIO_ATTEMPTS, 10);
 267 }
 268 
 269 
 270 
 271 
 272 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
 273                         u16 reg_addr)
 274 {
 275         struct port_info *pi = netdev_priv(dev);
 276         struct adapter *adapter = pi->adapter;
 277         int ret;
 278 
 279         mutex_lock(&adapter->mdio_lock);
 280         ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 281         if (!ret) {
 282                 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
 283                 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 284                                       MDIO_ATTEMPTS, 10);
 285                 if (!ret)
 286                         ret = t3_read_reg(adapter, A_MI1_DATA);
 287         }
 288         mutex_unlock(&adapter->mdio_lock);
 289         return ret;
 290 }
 291 
 292 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
 293                          u16 reg_addr, u16 val)
 294 {
 295         struct port_info *pi = netdev_priv(dev);
 296         struct adapter *adapter = pi->adapter;
 297         int ret;
 298 
 299         mutex_lock(&adapter->mdio_lock);
 300         ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
 301         if (!ret) {
 302                 t3_write_reg(adapter, A_MI1_DATA, val);
 303                 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
 304                 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
 305                                       MDIO_ATTEMPTS, 10);
 306         }
 307         mutex_unlock(&adapter->mdio_lock);
 308         return ret;
 309 }
 310 
 311 static const struct mdio_ops mi1_mdio_ext_ops = {
 312         .read = mi1_ext_read,
 313         .write = mi1_ext_write,
 314         .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
 315 };
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 
 327 
 328 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
 329                         unsigned int set)
 330 {
 331         int ret;
 332         unsigned int val;
 333 
 334         ret = t3_mdio_read(phy, mmd, reg, &val);
 335         if (!ret) {
 336                 val &= ~clear;
 337                 ret = t3_mdio_write(phy, mmd, reg, val | set);
 338         }
 339         return ret;
 340 }
 341 
 342 
 343 
 344 
 345 
 346 
 347 
 348 
 349 
 350 
 351 
 352 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
 353 {
 354         int err;
 355         unsigned int ctl;
 356 
 357         err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
 358                                   MDIO_CTRL1_RESET);
 359         if (err || !wait)
 360                 return err;
 361 
 362         do {
 363                 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
 364                 if (err)
 365                         return err;
 366                 ctl &= MDIO_CTRL1_RESET;
 367                 if (ctl)
 368                         msleep(1);
 369         } while (ctl && --wait);
 370 
 371         return ctl ? -1 : 0;
 372 }
 373 
 374 
 375 
 376 
 377 
 378 
 379 
 380 
 381 
 382 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
 383 {
 384         int err;
 385         unsigned int val = 0;
 386 
 387         err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
 388         if (err)
 389                 return err;
 390 
 391         val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 392         if (advert & ADVERTISED_1000baseT_Half)
 393                 val |= ADVERTISE_1000HALF;
 394         if (advert & ADVERTISED_1000baseT_Full)
 395                 val |= ADVERTISE_1000FULL;
 396 
 397         err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
 398         if (err)
 399                 return err;
 400 
 401         val = 1;
 402         if (advert & ADVERTISED_10baseT_Half)
 403                 val |= ADVERTISE_10HALF;
 404         if (advert & ADVERTISED_10baseT_Full)
 405                 val |= ADVERTISE_10FULL;
 406         if (advert & ADVERTISED_100baseT_Half)
 407                 val |= ADVERTISE_100HALF;
 408         if (advert & ADVERTISED_100baseT_Full)
 409                 val |= ADVERTISE_100FULL;
 410         if (advert & ADVERTISED_Pause)
 411                 val |= ADVERTISE_PAUSE_CAP;
 412         if (advert & ADVERTISED_Asym_Pause)
 413                 val |= ADVERTISE_PAUSE_ASYM;
 414         return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 415 }
 416 
 417 
 418 
 419 
 420 
 421 
 422 
 423 
 424 
 425 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
 426 {
 427         unsigned int val = 0;
 428 
 429         if (advert & ADVERTISED_1000baseT_Half)
 430                 val |= ADVERTISE_1000XHALF;
 431         if (advert & ADVERTISED_1000baseT_Full)
 432                 val |= ADVERTISE_1000XFULL;
 433         if (advert & ADVERTISED_Pause)
 434                 val |= ADVERTISE_1000XPAUSE;
 435         if (advert & ADVERTISED_Asym_Pause)
 436                 val |= ADVERTISE_1000XPSE_ASYM;
 437         return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
 438 }
 439 
 440 
 441 
 442 
 443 
 444 
 445 
 446 
 447 
 448 
 449 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
 450 {
 451         int err;
 452         unsigned int ctl;
 453 
 454         err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
 455         if (err)
 456                 return err;
 457 
 458         if (speed >= 0) {
 459                 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
 460                 if (speed == SPEED_100)
 461                         ctl |= BMCR_SPEED100;
 462                 else if (speed == SPEED_1000)
 463                         ctl |= BMCR_SPEED1000;
 464         }
 465         if (duplex >= 0) {
 466                 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
 467                 if (duplex == DUPLEX_FULL)
 468                         ctl |= BMCR_FULLDPLX;
 469         }
 470         if (ctl & BMCR_SPEED1000) 
 471                 ctl |= BMCR_ANENABLE;
 472         return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
 473 }
 474 
 475 int t3_phy_lasi_intr_enable(struct cphy *phy)
 476 {
 477         return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
 478                              MDIO_PMA_LASI_LSALARM);
 479 }
 480 
 481 int t3_phy_lasi_intr_disable(struct cphy *phy)
 482 {
 483         return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
 484 }
 485 
 486 int t3_phy_lasi_intr_clear(struct cphy *phy)
 487 {
 488         u32 val;
 489 
 490         return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
 491 }
 492 
 493 int t3_phy_lasi_intr_handler(struct cphy *phy)
 494 {
 495         unsigned int status;
 496         int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
 497                                &status);
 498 
 499         if (err)
 500                 return err;
 501         return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
 502 }
 503 
 504 static const struct adapter_info t3_adap_info[] = {
 505         {1, 1, 0,
 506          F_GPIO2_OEN | F_GPIO4_OEN |
 507          F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 508          &mi1_mdio_ops, "Chelsio PE9000"},
 509         {1, 1, 0,
 510          F_GPIO2_OEN | F_GPIO4_OEN |
 511          F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
 512          &mi1_mdio_ops, "Chelsio T302"},
 513         {1, 0, 0,
 514          F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
 515          F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 516          { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 517          &mi1_mdio_ext_ops, "Chelsio T310"},
 518         {1, 1, 0,
 519          F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
 520          F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
 521          F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 522          { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 523          &mi1_mdio_ext_ops, "Chelsio T320"},
 524         {},
 525         {},
 526         {1, 0, 0,
 527          F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 528          F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
 529          { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 530          &mi1_mdio_ext_ops, "Chelsio T310" },
 531         {1, 0, 0,
 532          F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
 533          F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
 534          { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
 535          &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
 536 };
 537 
 538 
 539 
 540 
 541 
 542 const struct adapter_info *t3_get_adapter_info(unsigned int id)
 543 {
 544         return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
 545 }
 546 
 547 struct port_type_info {
 548         int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
 549                         int phy_addr, const struct mdio_ops *ops);
 550 };
 551 
 552 static const struct port_type_info port_types[] = {
 553         { NULL },
 554         { t3_ael1002_phy_prep },
 555         { t3_vsc8211_phy_prep },
 556         { NULL},
 557         { t3_xaui_direct_phy_prep },
 558         { t3_ael2005_phy_prep },
 559         { t3_qt2045_phy_prep },
 560         { t3_ael1006_phy_prep },
 561         { NULL },
 562         { t3_aq100x_phy_prep },
 563         { t3_ael2020_phy_prep },
 564 };
 565 
 566 #define VPD_ENTRY(name, len) \
 567         u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
 568 
 569 
 570 
 571 
 572 
 573 struct t3_vpd {
 574         u8 id_tag;
 575         u8 id_len[2];
 576         u8 id_data[16];
 577         u8 vpdr_tag;
 578         u8 vpdr_len[2];
 579         VPD_ENTRY(pn, 16);      
 580         VPD_ENTRY(ec, 16);      
 581         VPD_ENTRY(sn, SERNUM_LEN); 
 582         VPD_ENTRY(na, 12);      
 583         VPD_ENTRY(cclk, 6);     
 584         VPD_ENTRY(mclk, 6);     
 585         VPD_ENTRY(uclk, 6);     
 586         VPD_ENTRY(mdc, 6);      
 587         VPD_ENTRY(mt, 2);       
 588         VPD_ENTRY(xaui0cfg, 6); 
 589         VPD_ENTRY(xaui1cfg, 6); 
 590         VPD_ENTRY(port0, 2);    
 591         VPD_ENTRY(port1, 2);    
 592         VPD_ENTRY(port2, 2);    
 593         VPD_ENTRY(port3, 2);    
 594         VPD_ENTRY(rv, 1);       
 595         u32 pad;                
 596 };
 597 
 598 #define EEPROM_MAX_POLL   40
 599 #define EEPROM_STAT_ADDR  0x4000
 600 #define VPD_BASE          0xc00
 601 
 602 
 603 
 604 
 605 
 606 
 607 
 608 
 609 
 610 
 611 
 612 
 613 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
 614 {
 615         u16 val;
 616         int attempts = EEPROM_MAX_POLL;
 617         u32 v;
 618         unsigned int base = adapter->params.pci.vpd_cap_addr;
 619 
 620         if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
 621                 return -EINVAL;
 622 
 623         pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
 624         do {
 625                 udelay(10);
 626                 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
 627         } while (!(val & PCI_VPD_ADDR_F) && --attempts);
 628 
 629         if (!(val & PCI_VPD_ADDR_F)) {
 630                 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
 631                 return -EIO;
 632         }
 633         pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
 634         *data = cpu_to_le32(v);
 635         return 0;
 636 }
 637 
 638 
 639 
 640 
 641 
 642 
 643 
 644 
 645 
 646 
 647 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
 648 {
 649         u16 val;
 650         int attempts = EEPROM_MAX_POLL;
 651         unsigned int base = adapter->params.pci.vpd_cap_addr;
 652 
 653         if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
 654                 return -EINVAL;
 655 
 656         pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
 657                                le32_to_cpu(data));
 658         pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
 659                               addr | PCI_VPD_ADDR_F);
 660         do {
 661                 msleep(1);
 662                 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
 663         } while ((val & PCI_VPD_ADDR_F) && --attempts);
 664 
 665         if (val & PCI_VPD_ADDR_F) {
 666                 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
 667                 return -EIO;
 668         }
 669         return 0;
 670 }
 671 
 672 
 673 
 674 
 675 
 676 
 677 
 678 
 679 int t3_seeprom_wp(struct adapter *adapter, int enable)
 680 {
 681         return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
 682 }
 683 
 684 static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
 685 {
 686         char tok[256];
 687 
 688         memcpy(tok, s, len);
 689         tok[len] = 0;
 690         return kstrtouint(strim(tok), base, val);
 691 }
 692 
 693 static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
 694 {
 695         char tok[256];
 696 
 697         memcpy(tok, s, len);
 698         tok[len] = 0;
 699         return kstrtou16(strim(tok), base, val);
 700 }
 701 
 702 
 703 
 704 
 705 
 706 
 707 
 708 
 709 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 710 {
 711         int i, addr, ret;
 712         struct t3_vpd vpd;
 713 
 714         
 715 
 716 
 717 
 718         ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
 719         if (ret)
 720                 return ret;
 721         addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
 722 
 723         for (i = 0; i < sizeof(vpd); i += 4) {
 724                 ret = t3_seeprom_read(adapter, addr + i,
 725                                       (__le32 *)((u8 *)&vpd + i));
 726                 if (ret)
 727                         return ret;
 728         }
 729 
 730         ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
 731         if (ret)
 732                 return ret;
 733         ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
 734         if (ret)
 735                 return ret;
 736         ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
 737         if (ret)
 738                 return ret;
 739         ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
 740         if (ret)
 741                 return ret;
 742         ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
 743         if (ret)
 744                 return ret;
 745         memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
 746 
 747         
 748         if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
 749                 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
 750                 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
 751         } else {
 752                 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
 753                 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
 754                 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
 755                                   &p->xauicfg[0]);
 756                 if (ret)
 757                         return ret;
 758                 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
 759                                   &p->xauicfg[1]);
 760                 if (ret)
 761                         return ret;
 762         }
 763 
 764         ret = hex2bin(p->eth_base, vpd.na_data, 6);
 765         if (ret < 0)
 766                 return -EINVAL;
 767         return 0;
 768 }
 769 
 770 
 771 enum {
 772         SF_ATTEMPTS = 5,        
 773         SF_SEC_SIZE = 64 * 1024,        
 774         SF_SIZE = SF_SEC_SIZE * 8,      
 775 
 776         
 777         SF_PROG_PAGE = 2,       
 778         SF_WR_DISABLE = 4,      
 779         SF_RD_STATUS = 5,       
 780         SF_WR_ENABLE = 6,       
 781         SF_RD_DATA_FAST = 0xb,  
 782         SF_ERASE_SECTOR = 0xd8, 
 783 
 784         FW_FLASH_BOOT_ADDR = 0x70000,   
 785         FW_VERS_ADDR = 0x7fffc,    
 786         FW_MIN_SIZE = 8            
 787 };
 788 
 789 
 790 
 791 
 792 
 793 
 794 
 795 
 796 
 797 
 798 
 799 
 800 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
 801                     u32 *valp)
 802 {
 803         int ret;
 804 
 805         if (!byte_cnt || byte_cnt > 4)
 806                 return -EINVAL;
 807         if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 808                 return -EBUSY;
 809         t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
 810         ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 811         if (!ret)
 812                 *valp = t3_read_reg(adapter, A_SF_DATA);
 813         return ret;
 814 }
 815 
 816 
 817 
 818 
 819 
 820 
 821 
 822 
 823 
 824 
 825 
 826 
 827 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
 828                      u32 val)
 829 {
 830         if (!byte_cnt || byte_cnt > 4)
 831                 return -EINVAL;
 832         if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
 833                 return -EBUSY;
 834         t3_write_reg(adapter, A_SF_DATA, val);
 835         t3_write_reg(adapter, A_SF_OP,
 836                      V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
 837         return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
 838 }
 839 
 840 
 841 
 842 
 843 
 844 
 845 
 846 
 847 
 848 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
 849 {
 850         int ret;
 851         u32 status;
 852 
 853         while (1) {
 854                 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
 855                     (ret = sf1_read(adapter, 1, 0, &status)) != 0)
 856                         return ret;
 857                 if (!(status & 1))
 858                         return 0;
 859                 if (--attempts == 0)
 860                         return -EAGAIN;
 861                 if (delay)
 862                         msleep(delay);
 863         }
 864 }
 865 
 866 
 867 
 868 
 869 
 870 
 871 
 872 
 873 
 874 
 875 
 876 
 877 
 878 
 879 static int t3_read_flash(struct adapter *adapter, unsigned int addr,
 880                          unsigned int nwords, u32 *data, int byte_oriented)
 881 {
 882         int ret;
 883 
 884         if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
 885                 return -EINVAL;
 886 
 887         addr = swab32(addr) | SF_RD_DATA_FAST;
 888 
 889         if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
 890             (ret = sf1_read(adapter, 1, 1, data)) != 0)
 891                 return ret;
 892 
 893         for (; nwords; nwords--, data++) {
 894                 ret = sf1_read(adapter, 4, nwords > 1, data);
 895                 if (ret)
 896                         return ret;
 897                 if (byte_oriented)
 898                         *data = htonl(*data);
 899         }
 900         return 0;
 901 }
 902 
 903 
 904 
 905 
 906 
 907 
 908 
 909 
 910 
 911 
 912 
 913 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
 914                           unsigned int n, const u8 *data)
 915 {
 916         int ret;
 917         u32 buf[64];
 918         unsigned int i, c, left, val, offset = addr & 0xff;
 919 
 920         if (addr + n > SF_SIZE || offset + n > 256)
 921                 return -EINVAL;
 922 
 923         val = swab32(addr) | SF_PROG_PAGE;
 924 
 925         if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
 926             (ret = sf1_write(adapter, 4, 1, val)) != 0)
 927                 return ret;
 928 
 929         for (left = n; left; left -= c) {
 930                 c = min(left, 4U);
 931                 for (val = 0, i = 0; i < c; ++i)
 932                         val = (val << 8) + *data++;
 933 
 934                 ret = sf1_write(adapter, c, c != left, val);
 935                 if (ret)
 936                         return ret;
 937         }
 938         if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
 939                 return ret;
 940 
 941         
 942         ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
 943         if (ret)
 944                 return ret;
 945 
 946         if (memcmp(data - n, (u8 *) buf + offset, n))
 947                 return -EIO;
 948         return 0;
 949 }
 950 
 951 
 952 
 953 
 954 
 955 
 956 
 957 
 958 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
 959 {
 960         int ret;
 961 
 962         
 963         t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
 964         ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
 965                               1, 1, 5, 1);
 966         if (ret)
 967                 return ret;
 968 
 969         *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
 970 
 971         return 0;
 972 }
 973 
 974 
 975 
 976 
 977 
 978 
 979 
 980 int t3_check_tpsram_version(struct adapter *adapter)
 981 {
 982         int ret;
 983         u32 vers;
 984         unsigned int major, minor;
 985 
 986         if (adapter->params.rev == T3_REV_A)
 987                 return 0;
 988 
 989 
 990         ret = t3_get_tp_version(adapter, &vers);
 991         if (ret)
 992                 return ret;
 993 
 994         major = G_TP_VERSION_MAJOR(vers);
 995         minor = G_TP_VERSION_MINOR(vers);
 996 
 997         if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
 998                 return 0;
 999         else {
1000                 CH_ERR(adapter, "found wrong TP version (%u.%u), "
1001                        "driver compiled for version %d.%d\n", major, minor,
1002                        TP_VERSION_MAJOR, TP_VERSION_MINOR);
1003         }
1004         return -EINVAL;
1005 }
1006 
1007 
1008 
1009 
1010 
1011 
1012 
1013 
1014 
1015 
1016 
1017 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
1018                     unsigned int size)
1019 {
1020         u32 csum;
1021         unsigned int i;
1022         const __be32 *p = (const __be32 *)tp_sram;
1023 
1024         
1025         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1026                 csum += ntohl(p[i]);
1027         if (csum != 0xffffffff) {
1028                 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1029                        csum);
1030                 return -EINVAL;
1031         }
1032 
1033         return 0;
1034 }
1035 
1036 enum fw_version_type {
1037         FW_VERSION_N3,
1038         FW_VERSION_T3
1039 };
1040 
1041 
1042 
1043 
1044 
1045 
1046 
1047 
1048 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1049 {
1050         return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1051 }
1052 
1053 
1054 
1055 
1056 
1057 
1058 
1059 
1060 int t3_check_fw_version(struct adapter *adapter)
1061 {
1062         int ret;
1063         u32 vers;
1064         unsigned int type, major, minor;
1065 
1066         ret = t3_get_fw_version(adapter, &vers);
1067         if (ret)
1068                 return ret;
1069 
1070         type = G_FW_VERSION_TYPE(vers);
1071         major = G_FW_VERSION_MAJOR(vers);
1072         minor = G_FW_VERSION_MINOR(vers);
1073 
1074         if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1075             minor == FW_VERSION_MINOR)
1076                 return 0;
1077         else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1078                 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1079                         "driver compiled for version %u.%u\n", major, minor,
1080                         FW_VERSION_MAJOR, FW_VERSION_MINOR);
1081         else {
1082                 CH_WARN(adapter, "found newer FW version(%u.%u), "
1083                         "driver compiled for version %u.%u\n", major, minor,
1084                         FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085                 return 0;
1086         }
1087         return -EINVAL;
1088 }
1089 
1090 
1091 
1092 
1093 
1094 
1095 
1096 
1097 
1098 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1099 {
1100         while (start <= end) {
1101                 int ret;
1102 
1103                 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1104                     (ret = sf1_write(adapter, 4, 0,
1105                                      SF_ERASE_SECTOR | (start << 8))) != 0 ||
1106                     (ret = flash_wait_op(adapter, 5, 500)) != 0)
1107                         return ret;
1108                 start++;
1109         }
1110         return 0;
1111 }
1112 
1113 
1114 
1115 
1116 
1117 
1118 
1119 
1120 
1121 
1122 
1123 
1124 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1125 {
1126         u32 csum;
1127         unsigned int i;
1128         const __be32 *p = (const __be32 *)fw_data;
1129         int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1130 
1131         if ((size & 3) || size < FW_MIN_SIZE)
1132                 return -EINVAL;
1133         if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1134                 return -EFBIG;
1135 
1136         for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1137                 csum += ntohl(p[i]);
1138         if (csum != 0xffffffff) {
1139                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1140                        csum);
1141                 return -EINVAL;
1142         }
1143 
1144         ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1145         if (ret)
1146                 goto out;
1147 
1148         size -= 8;              
1149         for (addr = FW_FLASH_BOOT_ADDR; size;) {
1150                 unsigned int chunk_size = min(size, 256U);
1151 
1152                 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1153                 if (ret)
1154                         goto out;
1155 
1156                 addr += chunk_size;
1157                 fw_data += chunk_size;
1158                 size -= chunk_size;
1159         }
1160 
1161         ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1162 out:
1163         if (ret)
1164                 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1165         return ret;
1166 }
1167 
1168 #define CIM_CTL_BASE 0x2000
1169 
1170 
1171 
1172 
1173 
1174 
1175 
1176 
1177 
1178 
1179 
1180 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1181                         unsigned int n, unsigned int *valp)
1182 {
1183         int ret = 0;
1184 
1185         if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1186                 return -EBUSY;
1187 
1188         for ( ; !ret && n--; addr += 4) {
1189                 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1190                 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1191                                       0, 5, 2);
1192                 if (!ret)
1193                         *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1194         }
1195         return ret;
1196 }
1197 
1198 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1199                                u32 *rx_hash_high, u32 *rx_hash_low)
1200 {
1201         
1202         t3_mac_disable_exact_filters(mac);
1203 
1204         
1205         *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1206         t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1207                          F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1208                          F_DISBCAST);
1209 
1210         *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1211         t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1212 
1213         *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1214         t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1215 
1216         
1217         msleep(1);
1218 }
1219 
1220 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1221                                u32 rx_hash_high, u32 rx_hash_low)
1222 {
1223         t3_mac_enable_exact_filters(mac);
1224         t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1225                          F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1226                          rx_cfg);
1227         t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1228         t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1229 }
1230 
1231 
1232 
1233 
1234 
1235 
1236 
1237 
1238 
1239 
1240 void t3_link_changed(struct adapter *adapter, int port_id)
1241 {
1242         int link_ok, speed, duplex, fc;
1243         struct port_info *pi = adap2pinfo(adapter, port_id);
1244         struct cphy *phy = &pi->phy;
1245         struct cmac *mac = &pi->mac;
1246         struct link_config *lc = &pi->link_config;
1247 
1248         phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1249 
1250         if (!lc->link_ok && link_ok) {
1251                 u32 rx_cfg, rx_hash_high, rx_hash_low;
1252                 u32 status;
1253 
1254                 t3_xgm_intr_enable(adapter, port_id);
1255                 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1256                 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1257                 t3_mac_enable(mac, MAC_DIRECTION_RX);
1258 
1259                 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1260                 if (status & F_LINKFAULTCHANGE) {
1261                         mac->stats.link_faults++;
1262                         pi->link_fault = 1;
1263                 }
1264                 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1265         }
1266 
1267         if (lc->requested_fc & PAUSE_AUTONEG)
1268                 fc &= lc->requested_fc;
1269         else
1270                 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1271 
1272         if (link_ok == lc->link_ok && speed == lc->speed &&
1273             duplex == lc->duplex && fc == lc->fc)
1274                 return;                            
1275 
1276         if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1277             uses_xaui(adapter)) {
1278                 if (link_ok)
1279                         t3b_pcs_reset(mac);
1280                 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1281                              link_ok ? F_TXACTENABLE | F_RXEN : 0);
1282         }
1283         lc->link_ok = link_ok;
1284         lc->speed = speed < 0 ? SPEED_INVALID : speed;
1285         lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1286 
1287         if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1288                 
1289                 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1290                 lc->fc = fc;
1291         }
1292 
1293         t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1294                            speed, duplex, fc);
1295 }
1296 
1297 void t3_link_fault(struct adapter *adapter, int port_id)
1298 {
1299         struct port_info *pi = adap2pinfo(adapter, port_id);
1300         struct cmac *mac = &pi->mac;
1301         struct cphy *phy = &pi->phy;
1302         struct link_config *lc = &pi->link_config;
1303         int link_ok, speed, duplex, fc, link_fault;
1304         u32 rx_cfg, rx_hash_high, rx_hash_low;
1305 
1306         t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1307 
1308         if (adapter->params.rev > 0 && uses_xaui(adapter))
1309                 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1310 
1311         t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1312         t3_mac_enable(mac, MAC_DIRECTION_RX);
1313 
1314         t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1315 
1316         link_fault = t3_read_reg(adapter,
1317                                  A_XGM_INT_STATUS + mac->offset);
1318         link_fault &= F_LINKFAULTCHANGE;
1319 
1320         link_ok = lc->link_ok;
1321         speed = lc->speed;
1322         duplex = lc->duplex;
1323         fc = lc->fc;
1324 
1325         phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1326 
1327         if (link_fault) {
1328                 lc->link_ok = 0;
1329                 lc->speed = SPEED_INVALID;
1330                 lc->duplex = DUPLEX_INVALID;
1331 
1332                 t3_os_link_fault(adapter, port_id, 0);
1333 
1334                 
1335                 if (link_ok)
1336                         mac->stats.link_faults++;
1337         } else {
1338                 if (link_ok)
1339                         t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1340                                      F_TXACTENABLE | F_RXEN);
1341 
1342                 pi->link_fault = 0;
1343                 lc->link_ok = (unsigned char)link_ok;
1344                 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1345                 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1346                 t3_os_link_fault(adapter, port_id, link_ok);
1347         }
1348 }
1349 
1350 
1351 
1352 
1353 
1354 
1355 
1356 
1357 
1358 
1359 
1360 
1361 
1362 
1363 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1364 {
1365         unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1366 
1367         lc->link_ok = 0;
1368         if (lc->supported & SUPPORTED_Autoneg) {
1369                 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1370                 if (fc) {
1371                         lc->advertising |= ADVERTISED_Asym_Pause;
1372                         if (fc & PAUSE_RX)
1373                                 lc->advertising |= ADVERTISED_Pause;
1374                 }
1375                 phy->ops->advertise(phy, lc->advertising);
1376 
1377                 if (lc->autoneg == AUTONEG_DISABLE) {
1378                         lc->speed = lc->requested_speed;
1379                         lc->duplex = lc->requested_duplex;
1380                         lc->fc = (unsigned char)fc;
1381                         t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1382                                                    fc);
1383                         
1384                         phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1385                 } else
1386                         phy->ops->autoneg_enable(phy);
1387         } else {
1388                 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1389                 lc->fc = (unsigned char)fc;
1390                 phy->ops->reset(phy, 0);
1391         }
1392         return 0;
1393 }
1394 
1395 
1396 
1397 
1398 
1399 
1400 
1401 
1402 
1403 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1404 {
1405         t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1406                          ports << S_VLANEXTRACTIONENABLE,
1407                          on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1408 }
1409 
1410 struct intr_info {
1411         unsigned int mask;      
1412         const char *msg;        
1413         short stat_idx;         
1414         unsigned short fatal;   
1415 };
1416 
1417 
1418 
1419 
1420 
1421 
1422 
1423 
1424 
1425 
1426 
1427 
1428 
1429 
1430 
1431 
1432 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433                                  unsigned int mask,
1434                                  const struct intr_info *acts,
1435                                  unsigned long *stats)
1436 {
1437         int fatal = 0;
1438         unsigned int status = t3_read_reg(adapter, reg) & mask;
1439 
1440         for (; acts->mask; ++acts) {
1441                 if (!(status & acts->mask))
1442                         continue;
1443                 if (acts->fatal) {
1444                         fatal++;
1445                         CH_ALERT(adapter, "%s (0x%x)\n",
1446                                  acts->msg, status & acts->mask);
1447                         status &= ~acts->mask;
1448                 } else if (acts->msg)
1449                         CH_WARN(adapter, "%s (0x%x)\n",
1450                                 acts->msg, status & acts->mask);
1451                 if (acts->stat_idx >= 0)
1452                         stats[acts->stat_idx]++;
1453         }
1454         if (status)             
1455                 t3_write_reg(adapter, reg, status);
1456         return fatal;
1457 }
1458 
1459 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1460                        F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1461                        F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1462                        F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1463                        V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1464                        F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1465                        F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1466                        F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1467                        F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1468                        F_LOPIODRBDROPERR)
1469 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1470                        F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1471                        F_NFASRCHFAIL)
1472 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1473 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1474                        V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1475                        F_TXFIFO_UNDERRUN)
1476 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1477                         F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1478                         F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1479                         F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1480                         V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1481                         V_CFPARERR(M_CFPARERR) )
1482 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1483                         F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1484                          \
1485                         F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1486                         F_TXPARERR | V_BISTERR(M_BISTERR))
1487 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1488                          F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1489                          F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1490 #define ULPTX_INTR_MASK 0xfc
1491 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1492                          F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1493                          F_ZERO_SWITCH_ERROR)
1494 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1495                        F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1496                        F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1497                        F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1498                        F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1499                        F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1500                        F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1501                        F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1502 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1503                         V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1504                         V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1505 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1506                         V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1507                         V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1508 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1509                        V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1510                        V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1511                        V_MCAPARERRENB(M_MCAPARERRENB))
1512 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1513 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1514                       F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1515                       F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1516                       F_MPS0 | F_CPL_SWITCH)
1517 
1518 
1519 
1520 static void pci_intr_handler(struct adapter *adapter)
1521 {
1522         static const struct intr_info pcix1_intr_info[] = {
1523                 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1524                 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1525                 {F_RCVTARABT, "PCI received target abort", -1, 1},
1526                 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1527                 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1528                 {F_DETPARERR, "PCI detected parity error", -1, 1},
1529                 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1530                 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1531                 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1532                  1},
1533                 {F_DETCORECCERR, "PCI correctable ECC error",
1534                  STAT_PCI_CORR_ECC, 0},
1535                 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1536                 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1537                 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1538                  1},
1539                 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1540                  1},
1541                 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1542                  1},
1543                 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1544                  "error", -1, 1},
1545                 {0}
1546         };
1547 
1548         if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1549                                   pcix1_intr_info, adapter->irq_stats))
1550                 t3_fatal_err(adapter);
1551 }
1552 
1553 
1554 
1555 
1556 static void pcie_intr_handler(struct adapter *adapter)
1557 {
1558         static const struct intr_info pcie_intr_info[] = {
1559                 {F_PEXERR, "PCI PEX error", -1, 1},
1560                 {F_UNXSPLCPLERRR,
1561                  "PCI unexpected split completion DMA read error", -1, 1},
1562                 {F_UNXSPLCPLERRC,
1563                  "PCI unexpected split completion DMA command error", -1, 1},
1564                 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1565                 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1566                 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1567                 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1568                 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1569                  "PCI MSI-X table/PBA parity error", -1, 1},
1570                 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1571                 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1572                 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1573                 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1574                 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1575                 {0}
1576         };
1577 
1578         if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1579                 CH_ALERT(adapter, "PEX error code 0x%x\n",
1580                          t3_read_reg(adapter, A_PCIE_PEX_ERR));
1581 
1582         if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1583                                   pcie_intr_info, adapter->irq_stats))
1584                 t3_fatal_err(adapter);
1585 }
1586 
1587 
1588 
1589 
1590 static void tp_intr_handler(struct adapter *adapter)
1591 {
1592         static const struct intr_info tp_intr_info[] = {
1593                 {0xffffff, "TP parity error", -1, 1},
1594                 {0x1000000, "TP out of Rx pages", -1, 1},
1595                 {0x2000000, "TP out of Tx pages", -1, 1},
1596                 {0}
1597         };
1598 
1599         static const struct intr_info tp_intr_info_t3c[] = {
1600                 {0x1fffffff, "TP parity error", -1, 1},
1601                 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1602                 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1603                 {0}
1604         };
1605 
1606         if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1607                                   adapter->params.rev < T3_REV_C ?
1608                                   tp_intr_info : tp_intr_info_t3c, NULL))
1609                 t3_fatal_err(adapter);
1610 }
1611 
1612 
1613 
1614 
1615 static void cim_intr_handler(struct adapter *adapter)
1616 {
1617         static const struct intr_info cim_intr_info[] = {
1618                 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1619                 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1620                 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1621                 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1622                 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1623                 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1624                 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1625                 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1626                 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1627                 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1628                 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1629                 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1630                 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1631                 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1632                 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1633                 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1634                 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1635                 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1636                 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1637                 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1638                 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1639                 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1640                 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1641                 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1642                 {0}
1643         };
1644 
1645         if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1646                                   cim_intr_info, NULL))
1647                 t3_fatal_err(adapter);
1648 }
1649 
1650 
1651 
1652 
1653 static void ulprx_intr_handler(struct adapter *adapter)
1654 {
1655         static const struct intr_info ulprx_intr_info[] = {
1656                 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1657                 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1658                 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1659                 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1660                 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1661                 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1662                 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1663                 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1664                 {0}
1665         };
1666 
1667         if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1668                                   ulprx_intr_info, NULL))
1669                 t3_fatal_err(adapter);
1670 }
1671 
1672 
1673 
1674 
1675 static void ulptx_intr_handler(struct adapter *adapter)
1676 {
1677         static const struct intr_info ulptx_intr_info[] = {
1678                 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1679                  STAT_ULP_CH0_PBL_OOB, 0},
1680                 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1681                  STAT_ULP_CH1_PBL_OOB, 0},
1682                 {0xfc, "ULP TX parity error", -1, 1},
1683                 {0}
1684         };
1685 
1686         if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1687                                   ulptx_intr_info, adapter->irq_stats))
1688                 t3_fatal_err(adapter);
1689 }
1690 
1691 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1692         F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1693         F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1694         F_ICSPI1_TX_FRAMING_ERROR)
1695 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1696         F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1697         F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1698         F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1699 
1700 
1701 
1702 
1703 static void pmtx_intr_handler(struct adapter *adapter)
1704 {
1705         static const struct intr_info pmtx_intr_info[] = {
1706                 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1707                 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1708                 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1709                 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1710                  "PMTX ispi parity error", -1, 1},
1711                 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1712                  "PMTX ospi parity error", -1, 1},
1713                 {0}
1714         };
1715 
1716         if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1717                                   pmtx_intr_info, NULL))
1718                 t3_fatal_err(adapter);
1719 }
1720 
1721 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1722         F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1723         F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1724         F_IESPI1_TX_FRAMING_ERROR)
1725 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1726         F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1727         F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1728         F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1729 
1730 
1731 
1732 
1733 static void pmrx_intr_handler(struct adapter *adapter)
1734 {
1735         static const struct intr_info pmrx_intr_info[] = {
1736                 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1737                 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1738                 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1739                 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1740                  "PMRX ispi parity error", -1, 1},
1741                 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1742                  "PMRX ospi parity error", -1, 1},
1743                 {0}
1744         };
1745 
1746         if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1747                                   pmrx_intr_info, NULL))
1748                 t3_fatal_err(adapter);
1749 }
1750 
1751 
1752 
1753 
1754 static void cplsw_intr_handler(struct adapter *adapter)
1755 {
1756         static const struct intr_info cplsw_intr_info[] = {
1757                 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1758                 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1759                 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1760                 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1761                 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1762                 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1763                 {0}
1764         };
1765 
1766         if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1767                                   cplsw_intr_info, NULL))
1768                 t3_fatal_err(adapter);
1769 }
1770 
1771 
1772 
1773 
1774 static void mps_intr_handler(struct adapter *adapter)
1775 {
1776         static const struct intr_info mps_intr_info[] = {
1777                 {0x1ff, "MPS parity error", -1, 1},
1778                 {0}
1779         };
1780 
1781         if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1782                                   mps_intr_info, NULL))
1783                 t3_fatal_err(adapter);
1784 }
1785 
1786 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1787 
1788 
1789 
1790 
1791 static void mc7_intr_handler(struct mc7 *mc7)
1792 {
1793         struct adapter *adapter = mc7->adapter;
1794         u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1795 
1796         if (cause & F_CE) {
1797                 mc7->stats.corr_err++;
1798                 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1799                         "data 0x%x 0x%x 0x%x\n", mc7->name,
1800                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1801                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1802                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1803                         t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1804         }
1805 
1806         if (cause & F_UE) {
1807                 mc7->stats.uncorr_err++;
1808                 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1809                          "data 0x%x 0x%x 0x%x\n", mc7->name,
1810                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1811                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1812                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1813                          t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1814         }
1815 
1816         if (G_PE(cause)) {
1817                 mc7->stats.parity_err++;
1818                 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1819                          mc7->name, G_PE(cause));
1820         }
1821 
1822         if (cause & F_AE) {
1823                 u32 addr = 0;
1824 
1825                 if (adapter->params.rev > 0)
1826                         addr = t3_read_reg(adapter,
1827                                            mc7->offset + A_MC7_ERR_ADDR);
1828                 mc7->stats.addr_err++;
1829                 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1830                          mc7->name, addr);
1831         }
1832 
1833         if (cause & MC7_INTR_FATAL)
1834                 t3_fatal_err(adapter);
1835 
1836         t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1837 }
1838 
1839 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1840                         V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1841 
1842 
1843 
1844 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1845 {
1846         struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1847         
1848 
1849 
1850 
1851 
1852 
1853         u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1854                     ~F_RXFIFO_OVERFLOW;
1855 
1856         if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1857                 mac->stats.tx_fifo_parity_err++;
1858                 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1859         }
1860         if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1861                 mac->stats.rx_fifo_parity_err++;
1862                 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1863         }
1864         if (cause & F_TXFIFO_UNDERRUN)
1865                 mac->stats.tx_fifo_urun++;
1866         if (cause & F_RXFIFO_OVERFLOW)
1867                 mac->stats.rx_fifo_ovfl++;
1868         if (cause & V_SERDES_LOS(M_SERDES_LOS))
1869                 mac->stats.serdes_signal_loss++;
1870         if (cause & F_XAUIPCSCTCERR)
1871                 mac->stats.xaui_pcs_ctc_err++;
1872         if (cause & F_XAUIPCSALIGNCHANGE)
1873                 mac->stats.xaui_pcs_align_change++;
1874         if (cause & F_XGM_INT) {
1875                 t3_set_reg_field(adap,
1876                                  A_XGM_INT_ENABLE + mac->offset,
1877                                  F_XGM_INT, 0);
1878                 mac->stats.link_faults++;
1879 
1880                 t3_os_link_fault_handler(adap, idx);
1881         }
1882 
1883         if (cause & XGM_INTR_FATAL)
1884                 t3_fatal_err(adap);
1885 
1886         t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1887         return cause != 0;
1888 }
1889 
1890 
1891 
1892 
1893 int t3_phy_intr_handler(struct adapter *adapter)
1894 {
1895         u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1896 
1897         for_each_port(adapter, i) {
1898                 struct port_info *p = adap2pinfo(adapter, i);
1899 
1900                 if (!(p->phy.caps & SUPPORTED_IRQ))
1901                         continue;
1902 
1903                 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1904                         int phy_cause = p->phy.ops->intr_handler(&p->phy);
1905 
1906                         if (phy_cause & cphy_cause_link_change)
1907                                 t3_link_changed(adapter, i);
1908                         if (phy_cause & cphy_cause_fifo_error)
1909                                 p->phy.fifo_errors++;
1910                         if (phy_cause & cphy_cause_module_change)
1911                                 t3_os_phymod_changed(adapter, i);
1912                 }
1913         }
1914 
1915         t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1916         return 0;
1917 }
1918 
1919 
1920 
1921 
1922 int t3_slow_intr_handler(struct adapter *adapter)
1923 {
1924         u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1925 
1926         cause &= adapter->slow_intr_mask;
1927         if (!cause)
1928                 return 0;
1929         if (cause & F_PCIM0) {
1930                 if (is_pcie(adapter))
1931                         pcie_intr_handler(adapter);
1932                 else
1933                         pci_intr_handler(adapter);
1934         }
1935         if (cause & F_SGE3)
1936                 t3_sge_err_intr_handler(adapter);
1937         if (cause & F_MC7_PMRX)
1938                 mc7_intr_handler(&adapter->pmrx);
1939         if (cause & F_MC7_PMTX)
1940                 mc7_intr_handler(&adapter->pmtx);
1941         if (cause & F_MC7_CM)
1942                 mc7_intr_handler(&adapter->cm);
1943         if (cause & F_CIM)
1944                 cim_intr_handler(adapter);
1945         if (cause & F_TP1)
1946                 tp_intr_handler(adapter);
1947         if (cause & F_ULP2_RX)
1948                 ulprx_intr_handler(adapter);
1949         if (cause & F_ULP2_TX)
1950                 ulptx_intr_handler(adapter);
1951         if (cause & F_PM1_RX)
1952                 pmrx_intr_handler(adapter);
1953         if (cause & F_PM1_TX)
1954                 pmtx_intr_handler(adapter);
1955         if (cause & F_CPL_SWITCH)
1956                 cplsw_intr_handler(adapter);
1957         if (cause & F_MPS0)
1958                 mps_intr_handler(adapter);
1959         if (cause & F_MC5A)
1960                 t3_mc5_intr_handler(&adapter->mc5);
1961         if (cause & F_XGMAC0_0)
1962                 mac_intr_handler(adapter, 0);
1963         if (cause & F_XGMAC0_1)
1964                 mac_intr_handler(adapter, 1);
1965         if (cause & F_T3DBG)
1966                 t3_os_ext_intr_handler(adapter);
1967 
1968         
1969         t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1970         t3_read_reg(adapter, A_PL_INT_CAUSE0);  
1971         return 1;
1972 }
1973 
1974 static unsigned int calc_gpio_intr(struct adapter *adap)
1975 {
1976         unsigned int i, gpi_intr = 0;
1977 
1978         for_each_port(adap, i)
1979                 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1980                     adapter_info(adap)->gpio_intr[i])
1981                         gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1982         return gpi_intr;
1983 }
1984 
1985 
1986 
1987 
1988 
1989 
1990 
1991 
1992 
1993 void t3_intr_enable(struct adapter *adapter)
1994 {
1995         static const struct addr_val_pair intr_en_avp[] = {
1996                 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1997                 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1998                 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1999                  MC7_INTR_MASK},
2000                 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2001                  MC7_INTR_MASK},
2002                 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
2003                 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
2004                 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
2005                 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
2006                 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
2007                 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
2008         };
2009 
2010         adapter->slow_intr_mask = PL_INTR_MASK;
2011 
2012         t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2013         t3_write_reg(adapter, A_TP_INT_ENABLE,
2014                      adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2015 
2016         if (adapter->params.rev > 0) {
2017                 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2018                              CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2019                 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2020                              ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2021                              F_PBL_BOUND_ERR_CH1);
2022         } else {
2023                 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2024                 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2025         }
2026 
2027         t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2028 
2029         if (is_pcie(adapter))
2030                 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2031         else
2032                 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2033         t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2034         t3_read_reg(adapter, A_PL_INT_ENABLE0); 
2035 }
2036 
2037 
2038 
2039 
2040 
2041 
2042 
2043 
2044 void t3_intr_disable(struct adapter *adapter)
2045 {
2046         t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2047         t3_read_reg(adapter, A_PL_INT_ENABLE0); 
2048         adapter->slow_intr_mask = 0;
2049 }
2050 
2051 
2052 
2053 
2054 
2055 
2056 
2057 void t3_intr_clear(struct adapter *adapter)
2058 {
2059         static const unsigned int cause_reg_addr[] = {
2060                 A_SG_INT_CAUSE,
2061                 A_SG_RSPQ_FL_STATUS,
2062                 A_PCIX_INT_CAUSE,
2063                 A_MC7_INT_CAUSE,
2064                 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2065                 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2066                 A_CIM_HOST_INT_CAUSE,
2067                 A_TP_INT_CAUSE,
2068                 A_MC5_DB_INT_CAUSE,
2069                 A_ULPRX_INT_CAUSE,
2070                 A_ULPTX_INT_CAUSE,
2071                 A_CPL_INTR_CAUSE,
2072                 A_PM1_TX_INT_CAUSE,
2073                 A_PM1_RX_INT_CAUSE,
2074                 A_MPS_INT_CAUSE,
2075                 A_T3DBG_INT_CAUSE,
2076         };
2077         unsigned int i;
2078 
2079         
2080         for_each_port(adapter, i)
2081             t3_port_intr_clear(adapter, i);
2082 
2083         for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2084                 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2085 
2086         if (is_pcie(adapter))
2087                 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2088         t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2089         t3_read_reg(adapter, A_PL_INT_CAUSE0);  
2090 }
2091 
2092 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2093 {
2094         struct port_info *pi = adap2pinfo(adapter, idx);
2095 
2096         t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2097                      XGM_EXTRA_INTR_MASK);
2098 }
2099 
2100 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2101 {
2102         struct port_info *pi = adap2pinfo(adapter, idx);
2103 
2104         t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2105                      0x7ff);
2106 }
2107 
2108 
2109 
2110 
2111 
2112 
2113 
2114 
2115 
2116 void t3_port_intr_enable(struct adapter *adapter, int idx)
2117 {
2118         struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119 
2120         t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2121         t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); 
2122         phy->ops->intr_enable(phy);
2123 }
2124 
2125 
2126 
2127 
2128 
2129 
2130 
2131 
2132 
2133 void t3_port_intr_disable(struct adapter *adapter, int idx)
2134 {
2135         struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2136 
2137         t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2138         t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); 
2139         phy->ops->intr_disable(phy);
2140 }
2141 
2142 
2143 
2144 
2145 
2146 
2147 
2148 
2149 
2150 static void t3_port_intr_clear(struct adapter *adapter, int idx)
2151 {
2152         struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2153 
2154         t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2155         t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); 
2156         phy->ops->intr_clear(phy);
2157 }
2158 
2159 #define SG_CONTEXT_CMD_ATTEMPTS 100
2160 
2161 
2162 
2163 
2164 
2165 
2166 
2167 
2168 
2169 
2170 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2171                                 unsigned int type)
2172 {
2173         if (type == F_RESPONSEQ) {
2174                 
2175 
2176 
2177 
2178 
2179 
2180                 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2181                 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2182                 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2183                 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2184         } else {
2185                 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2186                 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2187                 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2188                 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2189         }
2190         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2191                      V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2192         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2193                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2194 }
2195 
2196 
2197 
2198 
2199 
2200 
2201 
2202 
2203 
2204 
2205 
2206 
2207 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2208                           unsigned int type)
2209 {
2210         t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2211         t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2212         t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2213         t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2214         t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2215         t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2216         t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2217         t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2218         t3_write_reg(adap, A_SG_CONTEXT_CMD,
2219                      V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2220         return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2221                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2222 }
2223 
2224 
2225 
2226 
2227 
2228 
2229 
2230 
2231 
2232 
2233 
2234 
2235 
2236 
2237 
2238 
2239 
2240 
2241 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2242                        enum sge_context_type type, int respq, u64 base_addr,
2243                        unsigned int size, unsigned int token, int gen,
2244                        unsigned int cidx)
2245 {
2246         unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2247 
2248         if (base_addr & 0xfff)  
2249                 return -EINVAL;
2250         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2251                 return -EBUSY;
2252 
2253         base_addr >>= 12;
2254         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2255                      V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2256         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2257                      V_EC_BASE_LO(base_addr & 0xffff));
2258         base_addr >>= 16;
2259         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2260         base_addr >>= 32;
2261         t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2262                      V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2263                      V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2264                      F_EC_VALID);
2265         return t3_sge_write_context(adapter, id, F_EGRESS);
2266 }
2267 
2268 
2269 
2270 
2271 
2272 
2273 
2274 
2275 
2276 
2277 
2278 
2279 
2280 
2281 
2282 
2283 
2284 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2285                         int gts_enable, u64 base_addr, unsigned int size,
2286                         unsigned int bsize, unsigned int cong_thres, int gen,
2287                         unsigned int cidx)
2288 {
2289         if (base_addr & 0xfff)  
2290                 return -EINVAL;
2291         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2292                 return -EBUSY;
2293 
2294         base_addr >>= 12;
2295         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2296         base_addr >>= 32;
2297         t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2298                      V_FL_BASE_HI((u32) base_addr) |
2299                      V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2300         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2301                      V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2302                      V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2303         t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2304                      V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2305                      V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2306         return t3_sge_write_context(adapter, id, F_FREELIST);
2307 }
2308 
2309 
2310 
2311 
2312 
2313 
2314 
2315 
2316 
2317 
2318 
2319 
2320 
2321 
2322 
2323 
2324 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2325                          int irq_vec_idx, u64 base_addr, unsigned int size,
2326                          unsigned int fl_thres, int gen, unsigned int cidx)
2327 {
2328         unsigned int intr = 0;
2329 
2330         if (base_addr & 0xfff)  
2331                 return -EINVAL;
2332         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2333                 return -EBUSY;
2334 
2335         base_addr >>= 12;
2336         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2337                      V_CQ_INDEX(cidx));
2338         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2339         base_addr >>= 32;
2340         if (irq_vec_idx >= 0)
2341                 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2342         t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343                      V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2344         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2345         return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2346 }
2347 
2348 
2349 
2350 
2351 
2352 
2353 
2354 
2355 
2356 
2357 
2358 
2359 
2360 
2361 
2362 
2363 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2364                         unsigned int size, int rspq, int ovfl_mode,
2365                         unsigned int credits, unsigned int credit_thres)
2366 {
2367         if (base_addr & 0xfff)  
2368                 return -EINVAL;
2369         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2370                 return -EBUSY;
2371 
2372         base_addr >>= 12;
2373         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2374         t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2375         base_addr >>= 32;
2376         t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2377                      V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2378                      V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2379                      V_CQ_ERR(ovfl_mode));
2380         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2381                      V_CQ_CREDIT_THRES(credit_thres));
2382         return t3_sge_write_context(adapter, id, F_CQ);
2383 }
2384 
2385 
2386 
2387 
2388 
2389 
2390 
2391 
2392 
2393 
2394 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2395 {
2396         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2397                 return -EBUSY;
2398 
2399         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2400         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2401         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2402         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2403         t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2404         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2405                      V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2406         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2407                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2408 }
2409 
2410 
2411 
2412 
2413 
2414 
2415 
2416 
2417 
2418 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2419 {
2420         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2421                 return -EBUSY;
2422 
2423         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2424         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2425         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2426         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2427         t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2428         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2429                      V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2430         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2431                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2432 }
2433 
2434 
2435 
2436 
2437 
2438 
2439 
2440 
2441 
2442 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2443 {
2444         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2445                 return -EBUSY;
2446 
2447         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2448         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2449         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2450         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2451         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2452         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2453                      V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2454         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2455                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2456 }
2457 
2458 
2459 
2460 
2461 
2462 
2463 
2464 
2465 
2466 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2467 {
2468         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2469                 return -EBUSY;
2470 
2471         t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2472         t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2473         t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2474         t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2475         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2476         t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2477                      V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2478         return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2479                                0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2480 }
2481 
2482 
2483 
2484 
2485 
2486 
2487 
2488 
2489 
2490 
2491 
2492 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2493                       unsigned int credits)
2494 {
2495         u32 val;
2496 
2497         if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2498                 return -EBUSY;
2499 
2500         t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2501         t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2502                      V_CONTEXT(id) | F_CQ);
2503         if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2504                                 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2505                 return -EIO;
2506 
2507         if (op >= 2 && op < 7) {
2508                 if (adapter->params.rev > 0)
2509                         return G_CQ_INDEX(val);
2510 
2511                 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2512                              V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2513                 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2514                                     F_CONTEXT_CMD_BUSY, 0,
2515                                     SG_CONTEXT_CMD_ATTEMPTS, 1))
2516                         return -EIO;
2517                 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2518         }
2519         return 0;
2520 }
2521 
2522 
2523 
2524 
2525 
2526 
2527 
2528 
2529 
2530 
2531 
2532 
2533 
2534 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2535                    const u8 * cpus, const u16 *rspq)
2536 {
2537         int i, j, cpu_idx = 0, q_idx = 0;
2538 
2539         if (cpus)
2540                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2541                         u32 val = i << 16;
2542 
2543                         for (j = 0; j < 2; ++j) {
2544                                 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2545                                 if (cpus[cpu_idx] == 0xff)
2546                                         cpu_idx = 0;
2547                         }
2548                         t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2549                 }
2550 
2551         if (rspq)
2552                 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2553                         t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2554                                      (i << 16) | rspq[q_idx++]);
2555                         if (rspq[q_idx] == 0xffff)
2556                                 q_idx = 0;
2557                 }
2558 
2559         t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2560 }
2561 
2562 
2563 
2564 
2565 
2566 
2567 
2568 
2569 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2570 {
2571         if (is_offload(adap) || !enable)
2572                 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2573                                  V_NICMODE(!enable));
2574 }
2575 
2576 
2577 
2578 
2579 
2580 
2581 
2582 
2583 
2584 
2585 static inline unsigned int pm_num_pages(unsigned int mem_size,
2586                                         unsigned int pg_size)
2587 {
2588         unsigned int n = mem_size / pg_size;
2589 
2590         return n - n % 24;
2591 }
2592 
2593 #define mem_region(adap, start, size, reg) \
2594         t3_write_reg((adap), A_ ## reg, (start)); \
2595         start += size
2596 
2597 
2598 
2599 
2600 
2601 
2602 
2603 
2604 
2605 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2606 {
2607         unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2608         unsigned int timers = 0, timers_shift = 22;
2609 
2610         if (adap->params.rev > 0) {
2611                 if (tids <= 16 * 1024) {
2612                         timers = 1;
2613                         timers_shift = 16;
2614                 } else if (tids <= 64 * 1024) {
2615                         timers = 2;
2616                         timers_shift = 18;
2617                 } else if (tids <= 256 * 1024) {
2618                         timers = 3;
2619                         timers_shift = 20;
2620                 }
2621         }
2622 
2623         t3_write_reg(adap, A_TP_PMM_SIZE,
2624                      p->chan_rx_size | (p->chan_tx_size >> 16));
2625 
2626         t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2627         t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2628         t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2629         t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2630                          V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2631 
2632         t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2633         t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2634         t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2635 
2636         pstructs = p->rx_num_pgs + p->tx_num_pgs;
2637         
2638         pstructs += 48;
2639         pstructs -= pstructs % 24;
2640         t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2641 
2642         m = tids * TCB_SIZE;
2643         mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2644         mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2645         t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2646         m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2647         mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2648         mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2649         mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2650         mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2651 
2652         m = (m + 4095) & ~0xfff;
2653         t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2654         t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2655 
2656         tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2657         m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2658             adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2659         if (tids < m)
2660                 adap->params.mc5.nservers += m - tids;
2661 }
2662 
2663 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2664                                   u32 val)
2665 {
2666         t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2667         t3_write_reg(adap, A_TP_PIO_DATA, val);
2668 }
2669 
2670 static void tp_config(struct adapter *adap, const struct tp_params *p)
2671 {
2672         t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2673                      F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2674                      F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2675         t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2676                      F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2677                      V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2678         t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2679                      V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2680                      V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2681                      F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2682         t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2683                          F_IPV6ENABLE | F_NICMODE);
2684         t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2685         t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2686         t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2687                          adap->params.rev > 0 ? F_ENABLEESND :
2688                          F_T3A_ENABLEESND);
2689 
2690         t3_set_reg_field(adap, A_TP_PC_CONFIG,
2691                          F_ENABLEEPCMDAFULL,
2692                          F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2693                          F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2694         t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2695                          F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2696                          F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2697         t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2698         t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2699 
2700         if (adap->params.rev > 0) {
2701                 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2702                 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2703                                  F_TXPACEAUTO);
2704                 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2705                 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2706         } else
2707                 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2708 
2709         if (adap->params.rev == T3_REV_C)
2710                 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2711                                  V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2712                                  V_TABLELATENCYDELTA(4));
2713 
2714         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2715         t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2716         t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2717         t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2718 }
2719 
2720 
2721 #define TP_TMR_RES 50
2722 
2723 
2724 #define TP_DACK_TIMER 50
2725 #define TP_RTO_MIN    250
2726 
2727 
2728 
2729 
2730 
2731 
2732 
2733 
2734 
2735 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2736 {
2737         unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2738         unsigned int dack_re = fls(core_clk / 5000) - 1;        
2739         unsigned int tstamp_re = fls(core_clk / 1000);  
2740         unsigned int tps = core_clk >> tre;
2741 
2742         t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2743                      V_DELAYEDACKRESOLUTION(dack_re) |
2744                      V_TIMESTAMPRESOLUTION(tstamp_re));
2745         t3_write_reg(adap, A_TP_DACK_TIMER,
2746                      (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2747         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2748         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2749         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2750         t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2751         t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2752                      V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2753                      V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2754                      V_KEEPALIVEMAX(9));
2755 
2756 #define SECONDS * tps
2757 
2758         t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2759         t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2760         t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2761         t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2762         t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2763         t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2764         t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2765         t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2766         t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2767 
2768 #undef SECONDS
2769 }
2770 
2771 
2772 
2773 
2774 
2775 
2776 
2777 
2778 
2779 static int t3_tp_set_coalescing_size(struct adapter *adap,
2780                                      unsigned int size, int psh)
2781 {
2782         u32 val;
2783 
2784         if (size > MAX_RX_COALESCING_LEN)
2785                 return -EINVAL;
2786 
2787         val = t3_read_reg(adap, A_TP_PARA_REG3);
2788         val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2789 
2790         if (size) {
2791                 val |= F_RXCOALESCEENABLE;
2792                 if (psh)
2793                         val |= F_RXCOALESCEPSHEN;
2794                 size = min(MAX_RX_COALESCING_LEN, size);
2795                 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2796                              V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2797         }
2798         t3_write_reg(adap, A_TP_PARA_REG3, val);
2799         return 0;
2800 }
2801 
2802 
2803 
2804 
2805 
2806 
2807 
2808 
2809 
2810 static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2811 {
2812         t3_write_reg(adap, A_TP_PARA_REG7,
2813                      V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2814 }
2815 
2816 static void init_mtus(unsigned short mtus[])
2817 {
2818         
2819 
2820 
2821 
2822 
2823         mtus[0] = 88;
2824         mtus[1] = 88;
2825         mtus[2] = 256;
2826         mtus[3] = 512;
2827         mtus[4] = 576;
2828         mtus[5] = 1024;
2829         mtus[6] = 1280;
2830         mtus[7] = 1492;
2831         mtus[8] = 1500;
2832         mtus[9] = 2002;
2833         mtus[10] = 2048;
2834         mtus[11] = 4096;
2835         mtus[12] = 4352;
2836         mtus[13] = 8192;
2837         mtus[14] = 9000;
2838         mtus[15] = 9600;
2839 }
2840 
2841 
2842 
2843 
2844 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2845 {
2846         a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2847         a[9] = 2;
2848         a[10] = 3;
2849         a[11] = 4;
2850         a[12] = 5;
2851         a[13] = 6;
2852         a[14] = 7;
2853         a[15] = 8;
2854         a[16] = 9;
2855         a[17] = 10;
2856         a[18] = 14;
2857         a[19] = 17;
2858         a[20] = 21;
2859         a[21] = 25;
2860         a[22] = 30;
2861         a[23] = 35;
2862         a[24] = 45;
2863         a[25] = 60;
2864         a[26] = 80;
2865         a[27] = 100;
2866         a[28] = 200;
2867         a[29] = 300;
2868         a[30] = 400;
2869         a[31] = 500;
2870 
2871         b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2872         b[9] = b[10] = 1;
2873         b[11] = b[12] = 2;
2874         b[13] = b[14] = b[15] = b[16] = 3;
2875         b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2876         b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2877         b[28] = b[29] = 6;
2878         b[30] = b[31] = 7;
2879 }
2880 
2881 
2882 #define CC_MIN_INCR 2U
2883 
2884 
2885 
2886 
2887 
2888 
2889 
2890 
2891 
2892 
2893 
2894 
2895 
2896 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2897                   unsigned short alpha[NCCTRL_WIN],
2898                   unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2899 {
2900         static const unsigned int avg_pkts[NCCTRL_WIN] = {
2901                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2902                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2903                 28672, 40960, 57344, 81920, 114688, 163840, 229376
2904         };
2905 
2906         unsigned int i, w;
2907 
2908         for (i = 0; i < NMTUS; ++i) {
2909                 unsigned int mtu = min(mtus[i], mtu_cap);
2910                 unsigned int log2 = fls(mtu);
2911 
2912                 if (!(mtu & ((1 << log2) >> 2)))        
2913                         log2--;
2914                 t3_write_reg(adap, A_TP_MTU_TABLE,
2915                              (i << 24) | (log2 << 16) | mtu);
2916 
2917                 for (w = 0; w < NCCTRL_WIN; ++w) {
2918                         unsigned int inc;
2919 
2920                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2921                                   CC_MIN_INCR);
2922 
2923                         t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2924                                      (w << 16) | (beta[w] << 13) | inc);
2925                 }
2926         }
2927 }
2928 
2929 
2930 
2931 
2932 
2933 
2934 
2935 
2936 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2937 {
2938         t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2939                          sizeof(*tps) / sizeof(u32), 0);
2940 }
2941 
2942 #define ulp_region(adap, name, start, len) \
2943         t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2944         t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2945                      (start) + (len) - 1); \
2946         start += len
2947 
2948 #define ulptx_region(adap, name, start, len) \
2949         t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2950         t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2951                      (start) + (len) - 1)
2952 
2953 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2954 {
2955         unsigned int m = p->chan_rx_size;
2956 
2957         ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2958         ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2959         ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2960         ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2961         ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2962         ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2963         ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2964         t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2965 }
2966 
2967 
2968 
2969 
2970 
2971 
2972 
2973 
2974 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2975 {
2976         int i;
2977         const __be32 *buf = (const __be32 *)data;
2978 
2979         for (i = 0; i < PROTO_SRAM_LINES; i++) {
2980                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2981                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2982                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2983                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2984                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2985 
2986                 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2987                 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2988                         return -EIO;
2989         }
2990         t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2991 
2992         return 0;
2993 }
2994 
2995 void t3_config_trace_filter(struct adapter *adapter,
2996                             const struct trace_params *tp, int filter_index,
2997                             int invert, int enable)
2998 {
2999         u32 addr, key[4], mask[4];
3000 
3001         key[0] = tp->sport | (tp->sip << 16);
3002         key[1] = (tp->sip >> 16) | (tp->dport << 16);
3003         key[2] = tp->dip;
3004         key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3005 
3006         mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3007         mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3008         mask[2] = tp->dip_mask;
3009         mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3010 
3011         if (invert)
3012                 key[3] |= (1 << 29);
3013         if (enable)
3014                 key[3] |= (1 << 28);
3015 
3016         addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3017         tp_wr_indirect(adapter, addr++, key[0]);
3018         tp_wr_indirect(adapter, addr++, mask[0]);
3019         tp_wr_indirect(adapter, addr++, key[1]);
3020         tp_wr_indirect(adapter, addr++, mask[1]);
3021         tp_wr_indirect(adapter, addr++, key[2]);
3022         tp_wr_indirect(adapter, addr++, mask[2]);
3023         tp_wr_indirect(adapter, addr++, key[3]);
3024         tp_wr_indirect(adapter, addr, mask[3]);
3025         t3_read_reg(adapter, A_TP_PIO_DATA);
3026 }
3027 
3028 
3029 
3030 
3031 
3032 
3033 
3034 
3035 
3036 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3037 {
3038         unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3039         unsigned int clk = adap->params.vpd.cclk * 1000;
3040         unsigned int selected_cpt = 0, selected_bpt = 0;
3041 
3042         if (kbps > 0) {
3043                 kbps *= 125;    
3044                 for (cpt = 1; cpt <= 255; cpt++) {
3045                         tps = clk / cpt;
3046                         bpt = (kbps + tps / 2) / tps;
3047                         if (bpt > 0 && bpt <= 255) {
3048                                 v = bpt * tps;
3049                                 delta = v >= kbps ? v - kbps : kbps - v;
3050                                 if (delta <= mindelta) {
3051                                         mindelta = delta;
3052                                         selected_cpt = cpt;
3053                                         selected_bpt = bpt;
3054                                 }
3055                         } else if (selected_cpt)
3056                                 break;
3057                 }
3058                 if (!selected_cpt)
3059                         return -EINVAL;
3060         }
3061         t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3062                      A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3063         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3064         if (sched & 1)
3065                 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3066         else
3067                 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3068         t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3069         return 0;
3070 }
3071 
3072 static int tp_init(struct adapter *adap, const struct tp_params *p)
3073 {
3074         int busy = 0;
3075 
3076         tp_config(adap, p);
3077         t3_set_vlan_accel(adap, 3, 0);
3078 
3079         if (is_offload(adap)) {
3080                 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3081                 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3082                 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3083                                        0, 1000, 5);
3084                 if (busy)
3085                         CH_ERR(adap, "TP initialization timed out\n");
3086         }
3087 
3088         if (!busy)
3089                 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3090         return busy;
3091 }
3092 
3093 
3094 
3095 
3096 
3097 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3098 {
3099         int i;
3100 
3101         if (chan_map != 3) {                                 
3102                 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3103                 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3104                 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3105                              (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3106                                               F_TPTXPORT1EN | F_PORT1ACTIVE));
3107                 t3_write_reg(adap, A_PM1_TX_CFG,
3108                              chan_map == 1 ? 0xffffffff : 0);
3109         } else {                                             
3110                 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3111                 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3112                 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3113                              V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3114                 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3115                              F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3116                              F_ENFORCEPKT);
3117                 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3118                 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3119                 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3120                              V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3121                 for (i = 0; i < 16; i++)
3122                         t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3123                                      (i << 16) | 0x1010);
3124         }
3125 }
3126 
3127 static int calibrate_xgm(struct adapter *adapter)
3128 {
3129         if (uses_xaui(adapter)) {
3130                 unsigned int v, i;
3131 
3132                 for (i = 0; i < 5; ++i) {
3133                         t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3134                         t3_read_reg(adapter, A_XGM_XAUI_IMP);
3135                         msleep(1);
3136                         v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3137                         if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3138                                 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3139                                              V_XAUIIMP(G_CALIMP(v) >> 2));
3140                                 return 0;
3141                         }
3142                 }
3143                 CH_ERR(adapter, "MAC calibration failed\n");
3144                 return -1;
3145         } else {
3146                 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3147                              V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3148                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3149                                  F_XGM_IMPSETUPDATE);
3150         }
3151         return 0;
3152 }
3153 
3154 static void calibrate_xgm_t3b(struct adapter *adapter)
3155 {
3156         if (!uses_xaui(adapter)) {
3157                 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3158                              F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3159                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3160                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3161                                  F_XGM_IMPSETUPDATE);
3162                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3163                                  0);
3164                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3165                 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3166         }
3167 }
3168 
3169 struct mc7_timing_params {
3170         unsigned char ActToPreDly;
3171         unsigned char ActToRdWrDly;
3172         unsigned char PreCyc;
3173         unsigned char RefCyc[5];
3174         unsigned char BkCyc;
3175         unsigned char WrToRdDly;
3176         unsigned char RdToWrDly;
3177 };
3178 
3179 
3180 
3181 
3182 
3183 
3184 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3185 {
3186         t3_write_reg(adapter, addr, val);
3187         t3_read_reg(adapter, addr);     
3188         if (!(t3_read_reg(adapter, addr) & F_BUSY))
3189                 return 0;
3190         CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3191         return -EIO;
3192 }
3193 
3194 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3195 {
3196         static const unsigned int mc7_mode[] = {
3197                 0x632, 0x642, 0x652, 0x432, 0x442
3198         };
3199         static const struct mc7_timing_params mc7_timings[] = {
3200                 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3201                 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3202                 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3203                 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3204                 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3205         };
3206 
3207         u32 val;
3208         unsigned int width, density, slow, attempts;
3209         struct adapter *adapter = mc7->adapter;
3210         const struct mc7_timing_params *p = &mc7_timings[mem_type];
3211 
3212         if (!mc7->size)
3213                 return 0;
3214 
3215         val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3216         slow = val & F_SLOW;
3217         width = G_WIDTH(val);
3218         density = G_DEN(val);
3219 
3220         t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3221         val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);    
3222         msleep(1);
3223 
3224         if (!slow) {
3225                 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3226                 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3227                 msleep(1);
3228                 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3229                     (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3230                         CH_ERR(adapter, "%s MC7 calibration timed out\n",
3231                                mc7->name);
3232                         goto out_fail;
3233                 }
3234         }
3235 
3236         t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3237                      V_ACTTOPREDLY(p->ActToPreDly) |
3238                      V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3239                      V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3240                      V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3241 
3242         t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3243                      val | F_CLKEN | F_TERM150);
3244         t3_read_reg(adapter, mc7->offset + A_MC7_CFG);  
3245 
3246         if (!slow)
3247                 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3248                                  F_DLLENB);
3249         udelay(1);
3250 
3251         val = slow ? 3 : 6;
3252         if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3253             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3254             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3255             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3256                 goto out_fail;
3257 
3258         if (!slow) {
3259                 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3260                 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3261                 udelay(5);
3262         }
3263 
3264         if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3265             wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3266             wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3267             wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3268                        mc7_mode[mem_type]) ||
3269             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3270             wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3271                 goto out_fail;
3272 
3273         
3274         mc7_clock = mc7_clock * 7812 + mc7_clock / 2;   
3275         mc7_clock /= 1000000;   
3276 
3277         t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3278                      F_PERREFEN | V_PREREFDIV(mc7_clock));
3279         t3_read_reg(adapter, mc7->offset + A_MC7_REF);  
3280 
3281         t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3282         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3283         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3284         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3285                      (mc7->size << width) - 1);
3286         t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3287         t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);      
3288 
3289         attempts = 50;
3290         do {
3291                 msleep(250);
3292                 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3293         } while ((val & F_BUSY) && --attempts);
3294         if (val & F_BUSY) {
3295                 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3296                 goto out_fail;
3297         }
3298 
3299         
3300         t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3301         return 0;
3302 
3303 out_fail:
3304         return -1;
3305 }
3306 
3307 static void config_pcie(struct adapter *adap)
3308 {
3309         static const u16 ack_lat[4][6] = {
3310                 {237, 416, 559, 1071, 2095, 4143},
3311                 {128, 217, 289, 545, 1057, 2081},
3312                 {73, 118, 154, 282, 538, 1050},
3313                 {67, 107, 86, 150, 278, 534}
3314         };
3315         static const u16 rpl_tmr[4][6] = {
3316                 {711, 1248, 1677, 3213, 6285, 12429},
3317                 {384, 651, 867, 1635, 3171, 6243},
3318                 {219, 354, 462, 846, 1614, 3150},
3319                 {201, 321, 258, 450, 834, 1602}
3320         };
3321 
3322         u16 val, devid;
3323         unsigned int log2_width, pldsize;
3324         unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3325 
3326         pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3327         pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3328 
3329         pci_read_config_word(adap->pdev, 0x2, &devid);
3330         if (devid == 0x37) {
3331                 pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3332                                            val & ~PCI_EXP_DEVCTL_READRQ &
3333                                            ~PCI_EXP_DEVCTL_PAYLOAD);
3334                 pldsize = 0;
3335         }
3336 
3337         pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3338 
3339         fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3340         fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3341             G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3342         log2_width = fls(adap->params.pci.width) - 1;
3343         acklat = ack_lat[log2_width][pldsize];
3344         if (val & PCI_EXP_LNKCTL_ASPM_L0S)      
3345                 acklat += fst_trn_tx * 4;
3346         rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3347 
3348         if (adap->params.rev == 0)
3349                 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3350                                  V_T3A_ACKLAT(M_T3A_ACKLAT),
3351                                  V_T3A_ACKLAT(acklat));
3352         else
3353                 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3354                                  V_ACKLAT(acklat));
3355 
3356         t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3357                          V_REPLAYLMT(rpllmt));
3358 
3359         t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3360         t3_set_reg_field(adap, A_PCIE_CFG, 0,
3361                          F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3362                          F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3363 }
3364 
3365 
3366 
3367 
3368 
3369 
3370 
3371 
3372 
3373 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3374 {
3375         int err = -EIO, attempts, i;
3376         const struct vpd_params *vpd = &adapter->params.vpd;
3377 
3378         if (adapter->params.rev > 0)
3379                 calibrate_xgm_t3b(adapter);
3380         else if (calibrate_xgm(adapter))
3381                 goto out_err;
3382 
3383         if (vpd->mclk) {
3384                 partition_mem(adapter, &adapter->params.tp);
3385 
3386                 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3387                     mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3388                     mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3389                     t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3390                                 adapter->params.mc5.nfilters,
3391                                 adapter->params.mc5.nroutes))
3392                         goto out_err;
3393 
3394                 for (i = 0; i < 32; i++)
3395                         if (clear_sge_ctxt(adapter, i, F_CQ))
3396                                 goto out_err;
3397         }
3398 
3399         if (tp_init(adapter, &adapter->params.tp))
3400                 goto out_err;
3401 
3402         t3_tp_set_coalescing_size(adapter,
3403                                   min(adapter->params.sge.max_pkt_size,
3404                                       MAX_RX_COALESCING_LEN), 1);
3405         t3_tp_set_max_rxsize(adapter,
3406                              min(adapter->params.sge.max_pkt_size, 16384U));
3407         ulp_config(adapter, &adapter->params.tp);
3408 
3409         if (is_pcie(adapter))
3410                 config_pcie(adapter);
3411         else
3412                 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3413                                  F_DMASTOPEN | F_CLIDECEN);
3414 
3415         if (adapter->params.rev == T3_REV_C)
3416                 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3417                                  F_CFG_CQE_SOP_MASK);
3418 
3419         t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3420         t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3421         t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3422         chan_init_hw(adapter, adapter->params.chan_map);
3423         t3_sge_init(adapter, &adapter->params.sge);
3424         t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3425 
3426         t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3427 
3428         t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3429         t3_write_reg(adapter, A_CIM_BOOT_CFG,
3430                      V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3431         t3_read_reg(adapter, A_CIM_BOOT_CFG);   
3432 
3433         attempts = 100;
3434         do {                    
3435                 msleep(20);
3436         } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3437         if (!attempts) {
3438                 CH_ERR(adapter, "uP initialization timed out\n");
3439                 goto out_err;
3440         }
3441 
3442         err = 0;
3443 out_err:
3444         return err;
3445 }
3446 
3447 
3448 
3449 
3450 
3451 
3452 
3453 
3454 
3455 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3456 {
3457         static unsigned short speed_map[] = { 33, 66, 100, 133 };
3458         u32 pci_mode;
3459 
3460         if (pci_is_pcie(adapter->pdev)) {
3461                 u16 val;
3462 
3463                 p->variant = PCI_VARIANT_PCIE;
3464                 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3465                 p->width = (val >> 4) & 0x3f;
3466                 return;
3467         }
3468 
3469         pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3470         p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3471         p->width = (pci_mode & F_64BIT) ? 64 : 32;
3472         pci_mode = G_PCIXINITPAT(pci_mode);
3473         if (pci_mode == 0)
3474                 p->variant = PCI_VARIANT_PCI;
3475         else if (pci_mode < 4)
3476                 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3477         else if (pci_mode < 8)
3478                 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3479         else
3480                 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3481 }
3482 
3483 
3484 
3485 
3486 
3487 
3488 
3489 
3490 
3491 
3492 static void init_link_config(struct link_config *lc, unsigned int caps)
3493 {
3494         lc->supported = caps;
3495         lc->requested_speed = lc->speed = SPEED_INVALID;
3496         lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3497         lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3498         if (lc->supported & SUPPORTED_Autoneg) {
3499                 lc->advertising = lc->supported;
3500                 lc->autoneg = AUTONEG_ENABLE;
3501                 lc->requested_fc |= PAUSE_AUTONEG;
3502         } else {
3503                 lc->advertising = 0;
3504                 lc->autoneg = AUTONEG_DISABLE;
3505         }
3506 }
3507 
3508 
3509 
3510 
3511 
3512 
3513 
3514 
3515 static unsigned int mc7_calc_size(u32 cfg)
3516 {
3517         unsigned int width = G_WIDTH(cfg);
3518         unsigned int banks = !!(cfg & F_BKS) + 1;
3519         unsigned int org = !!(cfg & F_ORG) + 1;
3520         unsigned int density = G_DEN(cfg);
3521         unsigned int MBs = ((256 << density) * banks) / (org << width);
3522 
3523         return MBs << 20;
3524 }
3525 
3526 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3527                      unsigned int base_addr, const char *name)
3528 {
3529         u32 cfg;
3530 
3531         mc7->adapter = adapter;
3532         mc7->name = name;
3533         mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3534         cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3535         mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3536         mc7->width = G_WIDTH(cfg);
3537 }
3538 
3539 static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3540 {
3541         u16 devid;
3542 
3543         mac->adapter = adapter;
3544         pci_read_config_word(adapter->pdev, 0x2, &devid);
3545 
3546         if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3547                 index = 0;
3548         mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3549         mac->nucast = 1;
3550 
3551         if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3552                 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3553                              is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3554                 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3555                                  F_ENRGMII, 0);
3556         }
3557 }
3558 
3559 static void early_hw_init(struct adapter *adapter,
3560                           const struct adapter_info *ai)
3561 {
3562         u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3563 
3564         mi1_init(adapter, ai);
3565         t3_write_reg(adapter, A_I2C_CFG,        
3566                      V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3567         t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3568                      ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3569         t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3570         t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3571 
3572         if (adapter->params.rev == 0 || !uses_xaui(adapter))
3573                 val |= F_ENRGMII;
3574 
3575         
3576         t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3577         t3_read_reg(adapter, A_XGM_PORT_CFG);
3578 
3579         val |= F_CLKDIVRESET_;
3580         t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3581         t3_read_reg(adapter, A_XGM_PORT_CFG);
3582         t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3583         t3_read_reg(adapter, A_XGM_PORT_CFG);
3584 }
3585 
3586 
3587 
3588 
3589 
3590 
3591 int t3_reset_adapter(struct adapter *adapter)
3592 {
3593         int i, save_and_restore_pcie =
3594             adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3595         uint16_t devid = 0;
3596 
3597         if (save_and_restore_pcie)
3598                 pci_save_state(adapter->pdev);
3599         t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3600 
3601         
3602 
3603 
3604 
3605         for (i = 0; i < 10; i++) {
3606                 msleep(50);
3607                 pci_read_config_word(adapter->pdev, 0x00, &devid);
3608                 if (devid == 0x1425)
3609                         break;
3610         }
3611 
3612         if (devid != 0x1425)
3613                 return -1;
3614 
3615         if (save_and_restore_pcie)
3616                 pci_restore_state(adapter->pdev);
3617         return 0;
3618 }
3619 
3620 static int init_parity(struct adapter *adap)
3621 {
3622         int i, err, addr;
3623 
3624         if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3625                 return -EBUSY;
3626 
3627         for (err = i = 0; !err && i < 16; i++)
3628                 err = clear_sge_ctxt(adap, i, F_EGRESS);
3629         for (i = 0xfff0; !err && i <= 0xffff; i++)
3630                 err = clear_sge_ctxt(adap, i, F_EGRESS);
3631         for (i = 0; !err && i < SGE_QSETS; i++)
3632                 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3633         if (err)
3634                 return err;
3635 
3636         t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3637         for (i = 0; i < 4; i++)
3638                 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3639                         t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3640                                      F_IBQDBGWR | V_IBQDBGQID(i) |
3641                                      V_IBQDBGADDR(addr));
3642                         err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3643                                               F_IBQDBGBUSY, 0, 2, 1);
3644                         if (err)
3645                                 return err;
3646                 }
3647         return 0;
3648 }
3649 
3650 
3651 
3652 
3653 
3654 
3655 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3656                     int reset)
3657 {
3658         int ret;
3659         unsigned int i, j = -1;
3660 
3661         get_pci_mode(adapter, &adapter->params.pci);
3662 
3663         adapter->params.info = ai;
3664         adapter->params.nports = ai->nports0 + ai->nports1;
3665         adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3666         adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3667         
3668 
3669 
3670 
3671 
3672 
3673 
3674 
3675         adapter->params.linkpoll_period = 10;
3676         adapter->params.stats_update_period = is_10G(adapter) ?
3677             MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3678         adapter->params.pci.vpd_cap_addr =
3679             pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3680         ret = get_vpd_params(adapter, &adapter->params.vpd);
3681         if (ret < 0)
3682                 return ret;
3683 
3684         if (reset && t3_reset_adapter(adapter))
3685                 return -1;
3686 
3687         t3_sge_prep(adapter, &adapter->params.sge);
3688 
3689         if (adapter->params.vpd.mclk) {
3690                 struct tp_params *p = &adapter->params.tp;
3691 
3692                 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3693                 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3694                 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3695 
3696                 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3697                 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3698                 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3699                 p->cm_size = t3_mc7_size(&adapter->cm);
3700                 p->chan_rx_size = p->pmrx_size / 2;     
3701                 p->chan_tx_size = p->pmtx_size / p->nchan;
3702                 p->rx_pg_size = 64 * 1024;
3703                 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3704                 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3705                 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3706                 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3707                     adapter->params.rev > 0 ? 12 : 6;
3708         }
3709 
3710         adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3711                                   t3_mc7_size(&adapter->pmtx) &&
3712                                   t3_mc7_size(&adapter->cm);
3713 
3714         if (is_offload(adapter)) {
3715                 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3716                 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3717                     DEFAULT_NFILTERS : 0;
3718                 adapter->params.mc5.nroutes = 0;
3719                 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3720 
3721                 init_mtus(adapter->params.mtus);
3722                 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3723         }
3724 
3725         early_hw_init(adapter, ai);
3726         ret = init_parity(adapter);
3727         if (ret)
3728                 return ret;
3729 
3730         for_each_port(adapter, i) {
3731                 u8 hw_addr[6];
3732                 const struct port_type_info *pti;
3733                 struct port_info *p = adap2pinfo(adapter, i);
3734 
3735                 while (!adapter->params.vpd.port_type[++j])
3736                         ;
3737 
3738                 pti = &port_types[adapter->params.vpd.port_type[j]];
3739                 if (!pti->phy_prep) {
3740                         CH_ALERT(adapter, "Invalid port type index %d\n",
3741                                  adapter->params.vpd.port_type[j]);
3742                         return -EINVAL;
3743                 }
3744 
3745                 p->phy.mdio.dev = adapter->port[i];
3746                 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3747                                     ai->mdio_ops);
3748                 if (ret)
3749                         return ret;
3750                 mac_prep(&p->mac, adapter, j);
3751 
3752                 
3753 
3754 
3755 
3756 
3757                 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3758                 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3759 
3760                 memcpy(adapter->port[i]->dev_addr, hw_addr,
3761                        ETH_ALEN);
3762                 init_link_config(&p->link_config, p->phy.caps);
3763                 p->phy.ops->power_down(&p->phy, 1);
3764 
3765                 
3766 
3767 
3768 
3769 
3770                 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3771                     adapter->params.linkpoll_period > 10)
3772                         adapter->params.linkpoll_period = 10;
3773         }
3774 
3775         return 0;
3776 }
3777 
3778 void t3_led_ready(struct adapter *adapter)
3779 {
3780         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3781                          F_GPIO0_OUT_VAL);
3782 }
3783 
3784 int t3_replay_prep_adapter(struct adapter *adapter)
3785 {
3786         const struct adapter_info *ai = adapter->params.info;
3787         unsigned int i, j = -1;
3788         int ret;
3789 
3790         early_hw_init(adapter, ai);
3791         ret = init_parity(adapter);
3792         if (ret)
3793                 return ret;
3794 
3795         for_each_port(adapter, i) {
3796                 const struct port_type_info *pti;
3797                 struct port_info *p = adap2pinfo(adapter, i);
3798 
3799                 while (!adapter->params.vpd.port_type[++j])
3800                         ;
3801 
3802                 pti = &port_types[adapter->params.vpd.port_type[j]];
3803                 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3804                 if (ret)
3805                         return ret;
3806                 p->phy.ops->power_down(&p->phy, 1);
3807         }
3808 
3809         return 0;
3810 }
3811