root/drivers/infiniband/hw/qib/qib_iba7322.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qib_read_ureg32
  2. qib_read_ureg
  3. qib_write_ureg
  4. qib_read_kreg32
  5. qib_read_kreg64
  6. qib_write_kreg
  7. qib_read_kreg_port
  8. qib_write_kreg_port
  9. qib_write_kreg_ctxt
  10. read_7322_creg
  11. read_7322_creg32
  12. write_7322_creg_port
  13. read_7322_creg_port
  14. read_7322_creg32_port
  15. qib_disarm_7322_senderrbufs
  16. err_decode
  17. flush_fifo
  18. qib_7322_sdma_sendctrl
  19. qib_7322_sdma_hw_clean_up
  20. qib_sdma_7322_setlengen
  21. qib_sdma_update_7322_tail
  22. qib_7322_sdma_hw_start_up
  23. sdma_7322_p_errors
  24. handle_7322_errors
  25. qib_error_tasklet
  26. reenable_chase
  27. disable_chase
  28. handle_serdes_issues
  29. handle_7322_p_errors
  30. qib_7322_set_intr_state
  31. qib_7322_clear_freeze
  32. qib_7322_handle_hwerrors
  33. qib_7322_init_hwerrors
  34. qib_set_7322_armlaunch
  35. qib_set_ib_7322_lstate
  36. set_vls
  37. qib_7322_bringup_serdes
  38. qib_7322_mini_quiet_serdes
  39. qib_setup_7322_setextled
  40. qib_7322_notify_dca
  41. qib_update_rhdrq_dca
  42. qib_update_sdma_dca
  43. qib_setup_dca
  44. qib_irq_notifier_notify
  45. qib_irq_notifier_release
  46. qib_7322_free_irq
  47. qib_setup_7322_cleanup
  48. sdma_7322_intr
  49. qib_wantpiobuf_7322_intr
  50. unknown_7322_ibits
  51. unknown_7322_gpio_intr
  52. unlikely_7322_intr
  53. adjust_rcv_timeout
  54. qib_7322intr
  55. qib_7322pintr
  56. qib_7322bufavail
  57. sdma_intr
  58. sdma_idle_intr
  59. sdma_progress_intr
  60. sdma_cleanup_intr
  61. reset_dca_notifier
  62. setup_dca_notifier
  63. qib_setup_7322_interrupt
  64. qib_7322_boardname
  65. qib_do_7322_reset
  66. qib_7322_put_tid
  67. qib_7322_clear_tids
  68. qib_7322_tidtemplate
  69. qib_7322_get_base_info
  70. qib_7322_get_msgheader
  71. qib_7322_config_ctxts
  72. qib_7322_get_ib_cfg
  73. qib_7322_set_ib_cfg
  74. qib_7322_set_loopback
  75. get_vl_weights
  76. set_vl_weights
  77. qib_7322_get_ib_table
  78. qib_7322_set_ib_table
  79. qib_update_7322_usrhead
  80. qib_7322_hdrqempty
  81. rcvctrl_7322_mod
  82. sendctrl_7322_mod
  83. qib_portcntr_7322
  84. init_7322_cntrnames
  85. qib_read_7322cntrs
  86. qib_read_7322portcntrs
  87. qib_get_7322_faststats
  88. qib_7322_intr_fallback
  89. qib_7322_mini_pcs_reset
  90. autoneg_7322_sendpkt
  91. qib_autoneg_7322_send
  92. set_7322_ibspeed_fast
  93. try_7322_autoneg
  94. autoneg_7322_work
  95. try_7322_ipg
  96. ipg_7322_work
  97. qib_7322_iblink_state
  98. qib_7322_phys_portstate
  99. qib_7322_ib_updown
  100. gpio_7322_mod
  101. qib_7322_eeprom_wen
  102. get_7322_chip_params
  103. qib_7322_set_baseaddrs
  104. sendctrl_hook
  105. qsfp_7322_event
  106. qib_init_7322_qsfp
  107. set_no_qsfp_atten
  108. setup_txselect
  109. qib_late_7322_initreg
  110. write_7322_init_portregs
  111. write_7322_initregs
  112. qib_init_7322_variables
  113. qib_7322_getsendbuf
  114. qib_set_cntr_7322_sample
  115. qib_sdma_set_7322_desc_cnt
  116. dump_sdma_7322_state
  117. qib_7322_sdma_init_early
  118. init_sdma_7322_regs
  119. qib_sdma_7322_gethead
  120. qib_sdma_7322_busy
  121. qib_7322_setpbc_control
  122. qib_7322_initvl15_bufs
  123. qib_7322_init_ctxt
  124. qib_7322_txchk_change
  125. writescratch
  126. qib_7322_tempsense_rd
  127. qib_init_iba7322_funcs
  128. set_txdds
  129. get_atten_table
  130. find_best_ent
  131. init_txdds_table
  132. ahb_mod
  133. ibsd_wr_allchans
  134. serdes_7322_los_enable
  135. serdes_7322_init
  136. serdes_7322_init_old
  137. serdes_7322_init_new
  138. set_man_code
  139. set_man_mode_h1
  140. clock_man
  141. write_tx_serdes_param
  142. adj_tx_serdes
  143. force_h1
  144. qib_r_grab
  145. qib_r_wait_for_rdy
  146. qib_r_shift
  147. qib_r_update
  148. setup_7322_link_recovery
  149. check_7322_rxe_status

   1 /*
   2  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
   3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
   4  *
   5  * This software is available to you under a choice of one of two
   6  * licenses.  You may choose to be licensed under the terms of the GNU
   7  * General Public License (GPL) Version 2, available from the file
   8  * COPYING in the main directory of this source tree, or the
   9  * OpenIB.org BSD license below:
  10  *
  11  *     Redistribution and use in source and binary forms, with or
  12  *     without modification, are permitted provided that the following
  13  *     conditions are met:
  14  *
  15  *      - Redistributions of source code must retain the above
  16  *        copyright notice, this list of conditions and the following
  17  *        disclaimer.
  18  *
  19  *      - Redistributions in binary form must reproduce the above
  20  *        copyright notice, this list of conditions and the following
  21  *        disclaimer in the documentation and/or other materials
  22  *        provided with the distribution.
  23  *
  24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31  * SOFTWARE.
  32  */
  33 
  34 /*
  35  * This file contains all of the code that is specific to the
  36  * InfiniPath 7322 chip
  37  */
  38 
  39 #include <linux/interrupt.h>
  40 #include <linux/pci.h>
  41 #include <linux/delay.h>
  42 #include <linux/io.h>
  43 #include <linux/jiffies.h>
  44 #include <linux/module.h>
  45 #include <rdma/ib_verbs.h>
  46 #include <rdma/ib_smi.h>
  47 #ifdef CONFIG_INFINIBAND_QIB_DCA
  48 #include <linux/dca.h>
  49 #endif
  50 
  51 #include "qib.h"
  52 #include "qib_7322_regs.h"
  53 #include "qib_qsfp.h"
  54 
  55 #include "qib_mad.h"
  56 #include "qib_verbs.h"
  57 
  58 #undef pr_fmt
  59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
  60 
  61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
  62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
  63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
  64 static irqreturn_t qib_7322intr(int irq, void *data);
  65 static irqreturn_t qib_7322bufavail(int irq, void *data);
  66 static irqreturn_t sdma_intr(int irq, void *data);
  67 static irqreturn_t sdma_idle_intr(int irq, void *data);
  68 static irqreturn_t sdma_progress_intr(int irq, void *data);
  69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
  70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
  71                                   struct qib_ctxtdata *rcd);
  72 static u8 qib_7322_phys_portstate(u64);
  73 static u32 qib_7322_iblink_state(u64);
  74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
  75                                    u16 linitcmd);
  76 static void force_h1(struct qib_pportdata *);
  77 static void adj_tx_serdes(struct qib_pportdata *);
  78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
  79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
  80 
  81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
  83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
  84 static int serdes_7322_init_old(struct qib_pportdata *);
  85 static int serdes_7322_init_new(struct qib_pportdata *);
  86 static void dump_sdma_7322_state(struct qib_pportdata *);
  87 
  88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  89 
  90 /* LE2 serdes values for different cases */
  91 #define LE2_DEFAULT 5
  92 #define LE2_5m 4
  93 #define LE2_QME 0
  94 
  95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
  96 #define IBSD(hw_pidx) (hw_pidx + 2)
  97 
  98 /* these are variables for documentation and experimentation purposes */
  99 static const unsigned rcv_int_timeout = 375;
 100 static const unsigned rcv_int_count = 16;
 101 static const unsigned sdma_idle_cnt = 64;
 102 
 103 /* Time to stop altering Rx Equalization parameters, after link up. */
 104 #define RXEQ_DISABLE_MSECS 2500
 105 
 106 /*
 107  * Number of VLs we are configured to use (to allow for more
 108  * credits per vl, etc.)
 109  */
 110 ushort qib_num_cfg_vls = 2;
 111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
 112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
 113 
 114 static ushort qib_chase = 1;
 115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
 116 MODULE_PARM_DESC(chase, "Enable state chase handling");
 117 
 118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
 120 MODULE_PARM_DESC(long_attenuation,
 121                  "attenuation cutoff (dB) for long copper cable setup");
 122 
 123 static ushort qib_singleport;
 124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 126 
 127 static ushort qib_krcvq01_no_msi;
 128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
 129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
 130 
 131 /*
 132  * Receive header queue sizes
 133  */
 134 static unsigned qib_rcvhdrcnt;
 135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 137 
 138 static unsigned qib_rcvhdrsize;
 139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 141 
 142 static unsigned qib_rcvhdrentsize;
 143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 145 
 146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 147 /* for read back, default index is ~5m copper cable */
 148 static char txselect_list[MAX_ATTEN_LEN] = "10";
 149 static struct kparam_string kp_txselect = {
 150         .string = txselect_list,
 151         .maxlen = MAX_ATTEN_LEN
 152 };
 153 static int  setup_txselect(const char *, const struct kernel_param *);
 154 module_param_call(txselect, setup_txselect, param_get_string,
 155                   &kp_txselect, S_IWUSR | S_IRUGO);
 156 MODULE_PARM_DESC(txselect,
 157                  "Tx serdes indices (for no QSFP or invalid QSFP data)");
 158 
 159 #define BOARD_QME7342 5
 160 #define BOARD_QMH7342 6
 161 #define BOARD_QMH7360 9
 162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 163                     BOARD_QMH7342)
 164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
 165                     BOARD_QME7342)
 166 
 167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
 168 
 169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
 170 
 171 #define MASK_ACROSS(lsb, msb) \
 172         (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
 173 
 174 #define SYM_RMASK(regname, fldname) ((u64)              \
 175         QIB_7322_##regname##_##fldname##_RMASK)
 176 
 177 #define SYM_MASK(regname, fldname) ((u64)               \
 178         QIB_7322_##regname##_##fldname##_RMASK <<       \
 179          QIB_7322_##regname##_##fldname##_LSB)
 180 
 181 #define SYM_FIELD(value, regname, fldname) ((u64)       \
 182         (((value) >> SYM_LSB(regname, fldname)) &       \
 183          SYM_RMASK(regname, fldname)))
 184 
 185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
 186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
 187         (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
 188 
 189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
 190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
 191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
 192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
 193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
 194 /* Below because most, but not all, fields of IntMask have that full suffix */
 195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
 196 
 197 
 198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
 199 
 200 /*
 201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
 202  * and 7 is reserved.  We currently use only 2KB and 4KB
 203  */
 204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
 205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
 206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
 207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
 208 
 209 #define SendIBSLIDAssignMask \
 210         QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
 211 #define SendIBSLMCMask \
 212         QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
 213 
 214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
 215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
 216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
 217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
 218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
 219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
 220 
 221 #define _QIB_GPIO_SDA_NUM 1
 222 #define _QIB_GPIO_SCL_NUM 0
 223 #define QIB_EEPROM_WEN_NUM 14
 224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
 225 
 226 /* HW counter clock is at 4nsec */
 227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
 228 
 229 /* full speed IB port 1 only */
 230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
 231 #define PORT_SPD_CAP_SHIFT 3
 232 
 233 /* full speed featuremask, both ports */
 234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
 235 
 236 /*
 237  * This file contains almost all the chip-specific register information and
 238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
 239  */
 240 
 241 /* Use defines to tie machine-generated names to lower-case names */
 242 #define kr_contextcnt KREG_IDX(ContextCnt)
 243 #define kr_control KREG_IDX(Control)
 244 #define kr_counterregbase KREG_IDX(CntrRegBase)
 245 #define kr_errclear KREG_IDX(ErrClear)
 246 #define kr_errmask KREG_IDX(ErrMask)
 247 #define kr_errstatus KREG_IDX(ErrStatus)
 248 #define kr_extctrl KREG_IDX(EXTCtrl)
 249 #define kr_extstatus KREG_IDX(EXTStatus)
 250 #define kr_gpio_clear KREG_IDX(GPIOClear)
 251 #define kr_gpio_mask KREG_IDX(GPIOMask)
 252 #define kr_gpio_out KREG_IDX(GPIOOut)
 253 #define kr_gpio_status KREG_IDX(GPIOStatus)
 254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
 255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
 256 #define kr_fmask KREG_IDX(feature_mask)
 257 #define kr_act_fmask KREG_IDX(active_feature_mask)
 258 #define kr_hwerrclear KREG_IDX(HwErrClear)
 259 #define kr_hwerrmask KREG_IDX(HwErrMask)
 260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
 261 #define kr_intclear KREG_IDX(IntClear)
 262 #define kr_intmask KREG_IDX(IntMask)
 263 #define kr_intredirect KREG_IDX(IntRedirect0)
 264 #define kr_intstatus KREG_IDX(IntStatus)
 265 #define kr_pagealign KREG_IDX(PageAlign)
 266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
 267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
 268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
 269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
 270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
 271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
 272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
 273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
 274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
 275 #define kr_revision KREG_IDX(Revision)
 276 #define kr_scratch KREG_IDX(Scratch)
 277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
 278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
 279 #define kr_sendctrl KREG_IDX(SendCtrl)
 280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
 281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
 282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
 283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
 284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
 285 #define kr_sendpiosize KREG_IDX(SendBufSize)
 286 #define kr_sendregbase KREG_IDX(SendRegBase)
 287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
 288 #define kr_userregbase KREG_IDX(UserRegBase)
 289 #define kr_intgranted KREG_IDX(Int_Granted)
 290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
 291 #define kr_intblocked KREG_IDX(IntBlocked)
 292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
 293 
 294 /*
 295  * per-port kernel registers.  Access only with qib_read_kreg_port()
 296  * or qib_write_kreg_port()
 297  */
 298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
 299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
 300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
 301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
 302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
 303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
 304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
 305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
 306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
 307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
 308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
 309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
 310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
 311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
 312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
 313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
 314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
 315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
 316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
 317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
 318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
 319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
 320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
 321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
 322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
 323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
 324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
 325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
 326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
 327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
 328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
 329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
 330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
 331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
 332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
 333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
 334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
 335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
 336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
 337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
 338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
 339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
 340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
 341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
 342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
 343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
 344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
 345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
 346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
 347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 348 
 349 /*
 350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
 351  * or qib_write_kreg_ctxt()
 352  */
 353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
 354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
 355 
 356 /*
 357  * TID Flow table, per context.  Reduces
 358  * number of hdrq updates to one per flow (or on errors).
 359  * context 0 and 1 share same memory, but have distinct
 360  * addresses.  Since for now, we never use expected sends
 361  * on kernel contexts, we don't worry about that (we initialize
 362  * those entries for ctxt 0/1 on driver load twice, for example).
 363  */
 364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
 365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
 366 
 367 /* these are the error bits in the tid flows, and are W1C */
 368 #define TIDFLOW_ERRBITS  ( \
 369         (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
 370         SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
 371         (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
 372         SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
 373 
 374 /* Most (not all) Counters are per-IBport.
 375  * Requires LBIntCnt is at offset 0 in the group
 376  */
 377 #define CREG_IDX(regname) \
 378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 379 
 380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
 381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
 382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
 383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
 384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
 385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
 386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
 387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
 388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
 389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
 390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
 391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
 392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
 393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
 394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
 395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
 396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
 397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
 398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
 399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
 400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
 401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
 402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
 403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
 404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
 405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
 406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
 407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
 408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
 409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
 410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
 411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
 412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
 413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
 414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
 415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
 416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
 417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
 418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
 419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
 420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
 421 #define crp_wordsend CREG_IDX(TxDwordCnt)
 422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
 423 
 424 /* these are the (few) counters that are not port-specific */
 425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
 426                         QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
 427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
 428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
 429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
 430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
 431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
 432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
 433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
 434 
 435 /* no chip register for # of IB ports supported, so define */
 436 #define NUM_IB_PORTS 2
 437 
 438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
 439 #define NUM_VL15_BUFS NUM_IB_PORTS
 440 
 441 /*
 442  * context 0 and 1 are special, and there is no chip register that
 443  * defines this value, so we have to define it here.
 444  * These are all allocated to either 0 or 1 for single port
 445  * hardware configuration, otherwise each gets half
 446  */
 447 #define KCTXT0_EGRCNT 2048
 448 
 449 /* values for vl and port fields in PBC, 7322-specific */
 450 #define PBC_PORT_SEL_LSB 26
 451 #define PBC_PORT_SEL_RMASK 1
 452 #define PBC_VL_NUM_LSB 27
 453 #define PBC_VL_NUM_RMASK 7
 454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
 455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
 456 
 457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
 458         [IB_RATE_2_5_GBPS] = 16,
 459         [IB_RATE_5_GBPS] = 8,
 460         [IB_RATE_10_GBPS] = 4,
 461         [IB_RATE_20_GBPS] = 2,
 462         [IB_RATE_30_GBPS] = 2,
 463         [IB_RATE_40_GBPS] = 1
 464 };
 465 
 466 static const char * const qib_sdma_state_names[] = {
 467         [qib_sdma_state_s00_hw_down]          = "s00_HwDown",
 468         [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
 469         [qib_sdma_state_s20_idle]             = "s20_Idle",
 470         [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
 471         [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
 472         [qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
 473         [qib_sdma_state_s99_running]          = "s99_Running",
 474 };
 475 
 476 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
 477 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
 478 
 479 /* link training states, from IBC */
 480 #define IB_7322_LT_STATE_DISABLED        0x00
 481 #define IB_7322_LT_STATE_LINKUP          0x01
 482 #define IB_7322_LT_STATE_POLLACTIVE      0x02
 483 #define IB_7322_LT_STATE_POLLQUIET       0x03
 484 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
 485 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
 486 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
 487 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
 488 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
 489 #define IB_7322_LT_STATE_CFGIDLE         0x0b
 490 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
 491 #define IB_7322_LT_STATE_TXREVLANES      0x0d
 492 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
 493 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
 494 #define IB_7322_LT_STATE_CFGENH          0x10
 495 #define IB_7322_LT_STATE_CFGTEST         0x11
 496 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
 497 #define IB_7322_LT_STATE_CFGWAITENH      0x13
 498 
 499 /* link state machine states from IBC */
 500 #define IB_7322_L_STATE_DOWN             0x0
 501 #define IB_7322_L_STATE_INIT             0x1
 502 #define IB_7322_L_STATE_ARM              0x2
 503 #define IB_7322_L_STATE_ACTIVE           0x3
 504 #define IB_7322_L_STATE_ACT_DEFER        0x4
 505 
 506 static const u8 qib_7322_physportstate[0x20] = {
 507         [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
 508         [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
 509         [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
 510         [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
 511         [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
 512         [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
 513         [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
 514         [IB_7322_LT_STATE_CFGRCVFCFG] =
 515                 IB_PHYSPORTSTATE_CFG_TRAIN,
 516         [IB_7322_LT_STATE_CFGWAITRMT] =
 517                 IB_PHYSPORTSTATE_CFG_TRAIN,
 518         [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
 519         [IB_7322_LT_STATE_RECOVERRETRAIN] =
 520                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 521         [IB_7322_LT_STATE_RECOVERWAITRMT] =
 522                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 523         [IB_7322_LT_STATE_RECOVERIDLE] =
 524                 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
 525         [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
 526         [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
 527         [IB_7322_LT_STATE_CFGWAITRMTTEST] =
 528                 IB_PHYSPORTSTATE_CFG_TRAIN,
 529         [IB_7322_LT_STATE_CFGWAITENH] =
 530                 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
 531         [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
 532         [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
 533         [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
 534         [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
 535 };
 536 
 537 #ifdef CONFIG_INFINIBAND_QIB_DCA
 538 struct qib_irq_notify {
 539         int rcv;
 540         void *arg;
 541         struct irq_affinity_notify notify;
 542 };
 543 #endif
 544 
 545 struct qib_chip_specific {
 546         u64 __iomem *cregbase;
 547         u64 *cntrs;
 548         spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
 549         spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
 550         u64 main_int_mask;      /* clear bits which have dedicated handlers */
 551         u64 int_enable_mask;  /* for per port interrupts in single port mode */
 552         u64 errormask;
 553         u64 hwerrmask;
 554         u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
 555         u64 gpio_mask; /* shadow the gpio mask register */
 556         u64 extctrl; /* shadow the gpio output enable, etc... */
 557         u32 ncntrs;
 558         u32 nportcntrs;
 559         u32 cntrnamelen;
 560         u32 portcntrnamelen;
 561         u32 numctxts;
 562         u32 rcvegrcnt;
 563         u32 updthresh; /* current AvailUpdThld */
 564         u32 updthresh_dflt; /* default AvailUpdThld */
 565         u32 r1;
 566         u32 num_msix_entries;
 567         u32 sdmabufcnt;
 568         u32 lastbuf_for_pio;
 569         u32 stay_in_freeze;
 570         u32 recovery_ports_initted;
 571 #ifdef CONFIG_INFINIBAND_QIB_DCA
 572         u32 dca_ctrl;
 573         int rhdr_cpu[18];
 574         int sdma_cpu[2];
 575         u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
 576 #endif
 577         struct qib_msix_entry *msix_entries;
 578         unsigned long *sendchkenable;
 579         unsigned long *sendgrhchk;
 580         unsigned long *sendibchk;
 581         u32 rcvavail_timeout[18];
 582         char emsgbuf[128]; /* for device error interrupt msg buffer */
 583 };
 584 
 585 /* Table of entries in "human readable" form Tx Emphasis. */
 586 struct txdds_ent {
 587         u8 amp;
 588         u8 pre;
 589         u8 main;
 590         u8 post;
 591 };
 592 
 593 struct vendor_txdds_ent {
 594         u8 oui[QSFP_VOUI_LEN];
 595         u8 *partnum;
 596         struct txdds_ent sdr;
 597         struct txdds_ent ddr;
 598         struct txdds_ent qdr;
 599 };
 600 
 601 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
 602 
 603 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 604 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
 605 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 606 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 607 
 608 #define H1_FORCE_VAL 8
 609 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
 610 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
 611 
 612 /* The static and dynamic registers are paired, and the pairs indexed by spd */
 613 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
 614         + ((spd) * 2))
 615 
 616 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
 617 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
 618 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
 619 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
 620 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
 621 
 622 struct qib_chippport_specific {
 623         u64 __iomem *kpregbase;
 624         u64 __iomem *cpregbase;
 625         u64 *portcntrs;
 626         struct qib_pportdata *ppd;
 627         wait_queue_head_t autoneg_wait;
 628         struct delayed_work autoneg_work;
 629         struct delayed_work ipg_work;
 630         struct timer_list chase_timer;
 631         /*
 632          * these 5 fields are used to establish deltas for IB symbol
 633          * errors and linkrecovery errors.  They can be reported on
 634          * some chips during link negotiation prior to INIT, and with
 635          * DDR when faking DDR negotiations with non-IBTA switches.
 636          * The chip counters are adjusted at driver unload if there is
 637          * a non-zero delta.
 638          */
 639         u64 ibdeltainprog;
 640         u64 ibsymdelta;
 641         u64 ibsymsnap;
 642         u64 iblnkerrdelta;
 643         u64 iblnkerrsnap;
 644         u64 iblnkdownsnap;
 645         u64 iblnkdowndelta;
 646         u64 ibmalfdelta;
 647         u64 ibmalfsnap;
 648         u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
 649         u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
 650         unsigned long qdr_dfe_time;
 651         unsigned long chase_end;
 652         u32 autoneg_tries;
 653         u32 recovery_init;
 654         u32 qdr_dfe_on;
 655         u32 qdr_reforce;
 656         /*
 657          * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
 658          * entry zero is unused, to simplify indexing
 659          */
 660         u8 h1_val;
 661         u8 no_eep;  /* txselect table index to use if no qsfp info */
 662         u8 ipg_tries;
 663         u8 ibmalfusesnap;
 664         struct qib_qsfp_data qsfp_data;
 665         char epmsgbuf[192]; /* for port error interrupt msg buffer */
 666         char sdmamsgbuf[192]; /* for per-port sdma error messages */
 667 };
 668 
 669 static struct {
 670         const char *name;
 671         irq_handler_t handler;
 672         int lsb;
 673         int port; /* 0 if not port-specific, else port # */
 674         int dca;
 675 } irq_table[] = {
 676         { "", qib_7322intr, -1, 0, 0 },
 677         { " (buf avail)", qib_7322bufavail,
 678                 SYM_LSB(IntStatus, SendBufAvail), 0, 0},
 679         { " (sdma 0)", sdma_intr,
 680                 SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
 681         { " (sdma 1)", sdma_intr,
 682                 SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
 683         { " (sdmaI 0)", sdma_idle_intr,
 684                 SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
 685         { " (sdmaI 1)", sdma_idle_intr,
 686                 SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
 687         { " (sdmaP 0)", sdma_progress_intr,
 688                 SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
 689         { " (sdmaP 1)", sdma_progress_intr,
 690                 SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
 691         { " (sdmaC 0)", sdma_cleanup_intr,
 692                 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
 693         { " (sdmaC 1)", sdma_cleanup_intr,
 694                 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
 695 };
 696 
 697 #ifdef CONFIG_INFINIBAND_QIB_DCA
 698 
 699 static const struct dca_reg_map {
 700         int     shadow_inx;
 701         int     lsb;
 702         u64     mask;
 703         u16     regno;
 704 } dca_rcvhdr_reg_map[] = {
 705         { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
 706            ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
 707         { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
 708            ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
 709         { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
 710            ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
 711         { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
 712            ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
 713         { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
 714            ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
 715         { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
 716            ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
 717         { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
 718            ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
 719         { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
 720            ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
 721         { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
 722            ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
 723         { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
 724            ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
 725         { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
 726            ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
 727         { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
 728            ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
 729         { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
 730            ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
 731         { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
 732            ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
 733         { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
 734            ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
 735         { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
 736            ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
 737         { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
 738            ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
 739         { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
 740            ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
 741 };
 742 #endif
 743 
 744 /* ibcctrl bits */
 745 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
 746 /* cycle through TS1/TS2 till OK */
 747 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
 748 /* wait for TS1, then go on */
 749 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
 750 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
 751 
 752 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
 753 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
 754 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
 755 
 756 #define BLOB_7322_IBCHG 0x101
 757 
 758 static inline void qib_write_kreg(const struct qib_devdata *dd,
 759                                   const u32 regno, u64 value);
 760 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
 761 static void write_7322_initregs(struct qib_devdata *);
 762 static void write_7322_init_portregs(struct qib_pportdata *);
 763 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
 764 static void check_7322_rxe_status(struct qib_pportdata *);
 765 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
 766 #ifdef CONFIG_INFINIBAND_QIB_DCA
 767 static void qib_setup_dca(struct qib_devdata *dd);
 768 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
 769 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
 770 #endif
 771 
 772 /**
 773  * qib_read_ureg32 - read 32-bit virtualized per-context register
 774  * @dd: device
 775  * @regno: register number
 776  * @ctxt: context number
 777  *
 778  * Return the contents of a register that is virtualized to be per context.
 779  * Returns -1 on errors (not distinguishable from valid contents at
 780  * runtime; we may add a separate error variable at some point).
 781  */
 782 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
 783                                   enum qib_ureg regno, int ctxt)
 784 {
 785         if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 786                 return 0;
 787         return readl(regno + (u64 __iomem *)(
 788                 (dd->ureg_align * ctxt) + (dd->userbase ?
 789                  (char __iomem *)dd->userbase :
 790                  (char __iomem *)dd->kregbase + dd->uregbase)));
 791 }
 792 
 793 /**
 794  * qib_read_ureg - read virtualized per-context register
 795  * @dd: device
 796  * @regno: register number
 797  * @ctxt: context number
 798  *
 799  * Return the contents of a register that is virtualized to be per context.
 800  * Returns -1 on errors (not distinguishable from valid contents at
 801  * runtime; we may add a separate error variable at some point).
 802  */
 803 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
 804                                 enum qib_ureg regno, int ctxt)
 805 {
 806 
 807         if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 808                 return 0;
 809         return readq(regno + (u64 __iomem *)(
 810                 (dd->ureg_align * ctxt) + (dd->userbase ?
 811                  (char __iomem *)dd->userbase :
 812                  (char __iomem *)dd->kregbase + dd->uregbase)));
 813 }
 814 
 815 /**
 816  * qib_write_ureg - write virtualized per-context register
 817  * @dd: device
 818  * @regno: register number
 819  * @value: value
 820  * @ctxt: context
 821  *
 822  * Write the contents of a register that is virtualized to be per context.
 823  */
 824 static inline void qib_write_ureg(const struct qib_devdata *dd,
 825                                   enum qib_ureg regno, u64 value, int ctxt)
 826 {
 827         u64 __iomem *ubase;
 828 
 829         if (dd->userbase)
 830                 ubase = (u64 __iomem *)
 831                         ((char __iomem *) dd->userbase +
 832                          dd->ureg_align * ctxt);
 833         else
 834                 ubase = (u64 __iomem *)
 835                         (dd->uregbase +
 836                          (char __iomem *) dd->kregbase +
 837                          dd->ureg_align * ctxt);
 838 
 839         if (dd->kregbase && (dd->flags & QIB_PRESENT))
 840                 writeq(value, &ubase[regno]);
 841 }
 842 
 843 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
 844                                   const u32 regno)
 845 {
 846         if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 847                 return -1;
 848         return readl((u32 __iomem *) &dd->kregbase[regno]);
 849 }
 850 
 851 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
 852                                   const u32 regno)
 853 {
 854         if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
 855                 return -1;
 856         return readq(&dd->kregbase[regno]);
 857 }
 858 
 859 static inline void qib_write_kreg(const struct qib_devdata *dd,
 860                                   const u32 regno, u64 value)
 861 {
 862         if (dd->kregbase && (dd->flags & QIB_PRESENT))
 863                 writeq(value, &dd->kregbase[regno]);
 864 }
 865 
 866 /*
 867  * not many sanity checks for the port-specific kernel register routines,
 868  * since they are only used when it's known to be safe.
 869 */
 870 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
 871                                      const u16 regno)
 872 {
 873         if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
 874                 return 0ULL;
 875         return readq(&ppd->cpspec->kpregbase[regno]);
 876 }
 877 
 878 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
 879                                        const u16 regno, u64 value)
 880 {
 881         if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
 882             (ppd->dd->flags & QIB_PRESENT))
 883                 writeq(value, &ppd->cpspec->kpregbase[regno]);
 884 }
 885 
 886 /**
 887  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
 888  * @dd: the qlogic_ib device
 889  * @regno: the register number to write
 890  * @ctxt: the context containing the register
 891  * @value: the value to write
 892  */
 893 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
 894                                        const u16 regno, unsigned ctxt,
 895                                        u64 value)
 896 {
 897         qib_write_kreg(dd, regno + ctxt, value);
 898 }
 899 
 900 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
 901 {
 902         if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 903                 return 0;
 904         return readq(&dd->cspec->cregbase[regno]);
 905 
 906 
 907 }
 908 
 909 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
 910 {
 911         if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
 912                 return 0;
 913         return readl(&dd->cspec->cregbase[regno]);
 914 
 915 
 916 }
 917 
 918 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
 919                                         u16 regno, u64 value)
 920 {
 921         if (ppd->cpspec && ppd->cpspec->cpregbase &&
 922             (ppd->dd->flags & QIB_PRESENT))
 923                 writeq(value, &ppd->cpspec->cpregbase[regno]);
 924 }
 925 
 926 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
 927                                       u16 regno)
 928 {
 929         if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 930             !(ppd->dd->flags & QIB_PRESENT))
 931                 return 0;
 932         return readq(&ppd->cpspec->cpregbase[regno]);
 933 }
 934 
 935 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
 936                                         u16 regno)
 937 {
 938         if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
 939             !(ppd->dd->flags & QIB_PRESENT))
 940                 return 0;
 941         return readl(&ppd->cpspec->cpregbase[regno]);
 942 }
 943 
 944 /* bits in Control register */
 945 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
 946 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
 947 
 948 /* bits in general interrupt regs */
 949 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
 950 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
 951 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
 952 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
 953 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
 954 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
 955 #define QIB_I_C_ERROR INT_MASK(Err)
 956 
 957 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
 958 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
 959 #define QIB_I_GPIO INT_MASK(AssertGPIO)
 960 #define QIB_I_P_SDMAINT(pidx) \
 961         (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 962          INT_MASK_P(SDmaProgress, pidx) | \
 963          INT_MASK_PM(SDmaCleanupDone, pidx))
 964 
 965 /* Interrupt bits that are "per port" */
 966 #define QIB_I_P_BITSEXTANT(pidx) \
 967         (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
 968         INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
 969         INT_MASK_P(SDmaProgress, pidx) | \
 970         INT_MASK_PM(SDmaCleanupDone, pidx))
 971 
 972 /* Interrupt bits that are common to a device */
 973 /* currently unused: QIB_I_SPIOSENT */
 974 #define QIB_I_C_BITSEXTANT \
 975         (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
 976         QIB_I_SPIOSENT | \
 977         QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
 978 
 979 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
 980         QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
 981 
 982 /*
 983  * Error bits that are "per port".
 984  */
 985 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
 986 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
 987 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
 988 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
 989 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
 990 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
 991 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
 992 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
 993 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
 994 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
 995 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
 996 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
 997 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
 998 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
 999 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
1000 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
1001 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
1002 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
1003 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
1004 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
1005 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
1006 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1007 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1008 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1009 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1010 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1011 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1012 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1013 
1014 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1015 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1016 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1017 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1018 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1019 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1020 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1021 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1022 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1023 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1024 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1025 
1026 /* Error bits that are common to a device */
1027 #define QIB_E_RESET ERR_MASK(ResetNegated)
1028 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1029 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1030 
1031 
1032 /*
1033  * Per chip (rather than per-port) errors.  Most either do
1034  * nothing but trigger a print (because they self-recover, or
1035  * always occur in tandem with other errors that handle the
1036  * issue), or because they indicate errors with no recovery,
1037  * but we want to know that they happened.
1038  */
1039 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1040 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1041 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1042 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1043 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1044 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1045 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1046 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1047 
1048 /* SDMA chip errors (not per port)
1049  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1050  * the SDMAHALT error immediately, so we just print the dup error via the
1051  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1052  * as well, but since this is port-independent, by definition, it's
1053  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1054  * packet send errors, and so are handled in the same manner as other
1055  * per-packet errors.
1056  */
1057 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1058 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1059 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1060 
1061 /*
1062  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1063  * it is used to print "common" packet errors.
1064  */
1065 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1066         QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1067         QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1068         QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1069         QIB_E_P_REBP)
1070 
1071 /* Error Bits that Packet-related (Receive, per-port) */
1072 #define QIB_E_P_RPKTERRS (\
1073         QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1074         QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1075         QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1076         QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1077         QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1078         QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1079 
1080 /*
1081  * Error bits that are Send-related (per port)
1082  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1083  * All of these potentially need to have a buffer disarmed
1084  */
1085 #define QIB_E_P_SPKTERRS (\
1086         QIB_E_P_SUNEXP_PKTNUM |\
1087         QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1088         QIB_E_P_SMAXPKTLEN |\
1089         QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1090         QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1091         QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1092 
1093 #define QIB_E_SPKTERRS ( \
1094                 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1095                 ERR_MASK_N(SendUnsupportedVLErr) |                      \
1096                 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1097 
1098 #define QIB_E_P_SDMAERRS ( \
1099         QIB_E_P_SDMAHALT | \
1100         QIB_E_P_SDMADESCADDRMISALIGN | \
1101         QIB_E_P_SDMAUNEXPDATA | \
1102         QIB_E_P_SDMAMISSINGDW | \
1103         QIB_E_P_SDMADWEN | \
1104         QIB_E_P_SDMARPYTAG | \
1105         QIB_E_P_SDMA1STDESC | \
1106         QIB_E_P_SDMABASE | \
1107         QIB_E_P_SDMATAILOUTOFBOUND | \
1108         QIB_E_P_SDMAOUTOFBOUND | \
1109         QIB_E_P_SDMAGENMISMATCH)
1110 
1111 /*
1112  * This sets some bits more than once, but makes it more obvious which
1113  * bits are not handled under other categories, and the repeat definition
1114  * is not a problem.
1115  */
1116 #define QIB_E_P_BITSEXTANT ( \
1117         QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1118         QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1119         QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1120         QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1121         )
1122 
1123 /*
1124  * These are errors that can occur when the link
1125  * changes state while a packet is being sent or received.  This doesn't
1126  * cover things like EBP or VCRC that can be the result of a sending
1127  * having the link change state, so we receive a "known bad" packet.
1128  * All of these are "per port", so renamed:
1129  */
1130 #define QIB_E_P_LINK_PKTERRS (\
1131         QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1132         QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1133         QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1134         QIB_E_P_RUNEXPCHAR)
1135 
1136 /*
1137  * This sets some bits more than once, but makes it more obvious which
1138  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1139  * and the repeat definition is not a problem.
1140  */
1141 #define QIB_E_C_BITSEXTANT (\
1142         QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1143         QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1144         QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1145 
1146 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1147 #define E_SPKT_ERRS_IGNORE 0
1148 
1149 #define QIB_EXTS_MEMBIST_DISABLED \
1150         SYM_MASK(EXTStatus, MemBISTDisabled)
1151 #define QIB_EXTS_MEMBIST_ENDTEST \
1152         SYM_MASK(EXTStatus, MemBISTEndTest)
1153 
1154 #define QIB_E_SPIOARMLAUNCH \
1155         ERR_MASK(SendArmLaunchErr)
1156 
1157 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1158 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1159 
1160 /*
1161  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1162  * and also if forced QDR (only QDR enabled).  It's enabled for the
1163  * forced QDR case so that scrambling will be enabled by the TS3
1164  * exchange, when supported by both sides of the link.
1165  */
1166 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1167 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1168 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1169 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1170 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1171 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1172         SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1173 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1174 
1175 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1176 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1177 
1178 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1179 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1180 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1181 
1182 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1183 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1184 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1185         SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1186 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1187         SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1188 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1189 
1190 #define IBA7322_REDIRECT_VEC_PER_REG 12
1191 
1192 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1193 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1194 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1195 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1196 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1197 
1198 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1199 
1200 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1201         .msg = #fldname , .sz = sizeof(#fldname) }
1202 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1203         fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1204 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1205         HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1206         HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1207         HWE_AUTO(PCIESerdesPClkNotDetect),
1208         HWE_AUTO(PowerOnBISTFailed),
1209         HWE_AUTO(TempsenseTholdReached),
1210         HWE_AUTO(MemoryErr),
1211         HWE_AUTO(PCIeBusParityErr),
1212         HWE_AUTO(PcieCplTimeout),
1213         HWE_AUTO(PciePoisonedTLP),
1214         HWE_AUTO_P(SDmaMemReadErr, 1),
1215         HWE_AUTO_P(SDmaMemReadErr, 0),
1216         HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1217         HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1218         HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1219         HWE_AUTO(statusValidNoEop),
1220         HWE_AUTO(LATriggered),
1221         { .mask = 0, .sz = 0 }
1222 };
1223 
1224 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1225         .msg = #fldname, .sz = sizeof(#fldname) }
1226 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1227         .msg = #fldname, .sz = sizeof(#fldname) }
1228 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1229         E_AUTO(RcvEgrFullErr),
1230         E_AUTO(RcvHdrFullErr),
1231         E_AUTO(ResetNegated),
1232         E_AUTO(HardwareErr),
1233         E_AUTO(InvalidAddrErr),
1234         E_AUTO(SDmaVL15Err),
1235         E_AUTO(SBufVL15MisUseErr),
1236         E_AUTO(InvalidEEPCmd),
1237         E_AUTO(RcvContextShareErr),
1238         E_AUTO(SendVLMismatchErr),
1239         E_AUTO(SendArmLaunchErr),
1240         E_AUTO(SendSpecialTriggerErr),
1241         E_AUTO(SDmaWrongPortErr),
1242         E_AUTO(SDmaBufMaskDuplicateErr),
1243         { .mask = 0, .sz = 0 }
1244 };
1245 
1246 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1247         E_P_AUTO(IBStatusChanged),
1248         E_P_AUTO(SHeadersErr),
1249         E_P_AUTO(VL15BufMisuseErr),
1250         /*
1251          * SDmaHaltErr is not really an error, make it clearer;
1252          */
1253         {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1254                 .sz = 11},
1255         E_P_AUTO(SDmaDescAddrMisalignErr),
1256         E_P_AUTO(SDmaUnexpDataErr),
1257         E_P_AUTO(SDmaMissingDwErr),
1258         E_P_AUTO(SDmaDwEnErr),
1259         E_P_AUTO(SDmaRpyTagErr),
1260         E_P_AUTO(SDma1stDescErr),
1261         E_P_AUTO(SDmaBaseErr),
1262         E_P_AUTO(SDmaTailOutOfBoundErr),
1263         E_P_AUTO(SDmaOutOfBoundErr),
1264         E_P_AUTO(SDmaGenMismatchErr),
1265         E_P_AUTO(SendBufMisuseErr),
1266         E_P_AUTO(SendUnsupportedVLErr),
1267         E_P_AUTO(SendUnexpectedPktNumErr),
1268         E_P_AUTO(SendDroppedDataPktErr),
1269         E_P_AUTO(SendDroppedSmpPktErr),
1270         E_P_AUTO(SendPktLenErr),
1271         E_P_AUTO(SendUnderRunErr),
1272         E_P_AUTO(SendMaxPktLenErr),
1273         E_P_AUTO(SendMinPktLenErr),
1274         E_P_AUTO(RcvIBLostLinkErr),
1275         E_P_AUTO(RcvHdrErr),
1276         E_P_AUTO(RcvHdrLenErr),
1277         E_P_AUTO(RcvBadTidErr),
1278         E_P_AUTO(RcvBadVersionErr),
1279         E_P_AUTO(RcvIBFlowErr),
1280         E_P_AUTO(RcvEBPErr),
1281         E_P_AUTO(RcvUnsupportedVLErr),
1282         E_P_AUTO(RcvUnexpectedCharErr),
1283         E_P_AUTO(RcvShortPktLenErr),
1284         E_P_AUTO(RcvLongPktLenErr),
1285         E_P_AUTO(RcvMaxPktLenErr),
1286         E_P_AUTO(RcvMinPktLenErr),
1287         E_P_AUTO(RcvICRCErr),
1288         E_P_AUTO(RcvVCRCErr),
1289         E_P_AUTO(RcvFormatErr),
1290         { .mask = 0, .sz = 0 }
1291 };
1292 
1293 /*
1294  * Below generates "auto-message" for interrupts not specific to any port or
1295  * context
1296  */
1297 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1298         .msg = #fldname, .sz = sizeof(#fldname) }
1299 /* Below generates "auto-message" for interrupts specific to a port */
1300 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1301         SYM_LSB(IntMask, fldname##Mask##_0), \
1302         SYM_LSB(IntMask, fldname##Mask##_1)), \
1303         .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1304 /* For some reason, the SerDesTrimDone bits are reversed */
1305 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1306         SYM_LSB(IntMask, fldname##Mask##_1), \
1307         SYM_LSB(IntMask, fldname##Mask##_0)), \
1308         .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1309 /*
1310  * Below generates "auto-message" for interrupts specific to a context,
1311  * with ctxt-number appended
1312  */
1313 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1314         SYM_LSB(IntMask, fldname##0IntMask), \
1315         SYM_LSB(IntMask, fldname##17IntMask)), \
1316         .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1317 
1318 #define TXSYMPTOM_AUTO_P(fldname) \
1319         { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1320         .msg = #fldname, .sz = sizeof(#fldname) }
1321 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1322         TXSYMPTOM_AUTO_P(NonKeyPacket),
1323         TXSYMPTOM_AUTO_P(GRHFail),
1324         TXSYMPTOM_AUTO_P(PkeyFail),
1325         TXSYMPTOM_AUTO_P(QPFail),
1326         TXSYMPTOM_AUTO_P(SLIDFail),
1327         TXSYMPTOM_AUTO_P(RawIPV6),
1328         TXSYMPTOM_AUTO_P(PacketTooSmall),
1329         { .mask = 0, .sz = 0 }
1330 };
1331 
1332 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1333 
1334 /*
1335  * Called when we might have an error that is specific to a particular
1336  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1337  * because we don't need to force the update of pioavail
1338  */
1339 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1340 {
1341         struct qib_devdata *dd = ppd->dd;
1342         u32 i;
1343         int any;
1344         u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1345         u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1346         unsigned long sbuf[4];
1347 
1348         /*
1349          * It's possible that sendbuffererror could have bits set; might
1350          * have already done this as a result of hardware error handling.
1351          */
1352         any = 0;
1353         for (i = 0; i < regcnt; ++i) {
1354                 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1355                 if (sbuf[i]) {
1356                         any = 1;
1357                         qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1358                 }
1359         }
1360 
1361         if (any)
1362                 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1363 }
1364 
1365 /* No txe_recover yet, if ever */
1366 
1367 /* No decode__errors yet */
1368 static void err_decode(char *msg, size_t len, u64 errs,
1369                        const struct qib_hwerror_msgs *msp)
1370 {
1371         u64 these, lmask;
1372         int took, multi, n = 0;
1373 
1374         while (errs && msp && msp->mask) {
1375                 multi = (msp->mask & (msp->mask - 1));
1376                 while (errs & msp->mask) {
1377                         these = (errs & msp->mask);
1378                         lmask = (these & (these - 1)) ^ these;
1379                         if (len) {
1380                                 if (n++) {
1381                                         /* separate the strings */
1382                                         *msg++ = ',';
1383                                         len--;
1384                                 }
1385                                 /* msp->sz counts the nul */
1386                                 took = min_t(size_t, msp->sz - (size_t)1, len);
1387                                 memcpy(msg,  msp->msg, took);
1388                                 len -= took;
1389                                 msg += took;
1390                                 if (len)
1391                                         *msg = '\0';
1392                         }
1393                         errs &= ~lmask;
1394                         if (len && multi) {
1395                                 /* More than one bit this mask */
1396                                 int idx = -1;
1397 
1398                                 while (lmask & msp->mask) {
1399                                         ++idx;
1400                                         lmask >>= 1;
1401                                 }
1402                                 took = scnprintf(msg, len, "_%d", idx);
1403                                 len -= took;
1404                                 msg += took;
1405                         }
1406                 }
1407                 ++msp;
1408         }
1409         /* If some bits are left, show in hex. */
1410         if (len && errs)
1411                 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1412                         (unsigned long long) errs);
1413 }
1414 
1415 /* only called if r1 set */
1416 static void flush_fifo(struct qib_pportdata *ppd)
1417 {
1418         struct qib_devdata *dd = ppd->dd;
1419         u32 __iomem *piobuf;
1420         u32 bufn;
1421         u32 *hdr;
1422         u64 pbc;
1423         const unsigned hdrwords = 7;
1424         static struct ib_header ibhdr = {
1425                 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1426                 .lrh[1] = IB_LID_PERMISSIVE,
1427                 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1428                 .lrh[3] = IB_LID_PERMISSIVE,
1429                 .u.oth.bth[0] = cpu_to_be32(
1430                         (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1431                 .u.oth.bth[1] = cpu_to_be32(0),
1432                 .u.oth.bth[2] = cpu_to_be32(0),
1433                 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1434                 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1435         };
1436 
1437         /*
1438          * Send a dummy VL15 packet to flush the launch FIFO.
1439          * This will not actually be sent since the TxeBypassIbc bit is set.
1440          */
1441         pbc = PBC_7322_VL15_SEND |
1442                 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1443                 (hdrwords + SIZE_OF_CRC);
1444         piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1445         if (!piobuf)
1446                 return;
1447         writeq(pbc, piobuf);
1448         hdr = (u32 *) &ibhdr;
1449         if (dd->flags & QIB_PIO_FLUSH_WC) {
1450                 qib_flush_wc();
1451                 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1452                 qib_flush_wc();
1453                 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1454                 qib_flush_wc();
1455         } else
1456                 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1457         qib_sendbuf_done(dd, bufn);
1458 }
1459 
1460 /*
1461  * This is called with interrupts disabled and sdma_lock held.
1462  */
1463 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1464 {
1465         struct qib_devdata *dd = ppd->dd;
1466         u64 set_sendctrl = 0;
1467         u64 clr_sendctrl = 0;
1468 
1469         if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1470                 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1471         else
1472                 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1473 
1474         if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1475                 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1476         else
1477                 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1478 
1479         if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1480                 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1481         else
1482                 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1483 
1484         if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1485                 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1486                                 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1487                                 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1488         else
1489                 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1490                                 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1491                                 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1492 
1493         spin_lock(&dd->sendctrl_lock);
1494 
1495         /* If we are draining everything, block sends first */
1496         if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1497                 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1498                 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1499                 qib_write_kreg(dd, kr_scratch, 0);
1500         }
1501 
1502         ppd->p_sendctrl |= set_sendctrl;
1503         ppd->p_sendctrl &= ~clr_sendctrl;
1504 
1505         if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1506                 qib_write_kreg_port(ppd, krp_sendctrl,
1507                                     ppd->p_sendctrl |
1508                                     SYM_MASK(SendCtrl_0, SDmaCleanup));
1509         else
1510                 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1511         qib_write_kreg(dd, kr_scratch, 0);
1512 
1513         if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1514                 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1515                 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1516                 qib_write_kreg(dd, kr_scratch, 0);
1517         }
1518 
1519         spin_unlock(&dd->sendctrl_lock);
1520 
1521         if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1522                 flush_fifo(ppd);
1523 }
1524 
1525 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1526 {
1527         __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1528 }
1529 
1530 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1531 {
1532         /*
1533          * Set SendDmaLenGen and clear and set
1534          * the MSB of the generation count to enable generation checking
1535          * and load the internal generation counter.
1536          */
1537         qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1538         qib_write_kreg_port(ppd, krp_senddmalengen,
1539                             ppd->sdma_descq_cnt |
1540                             (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1541 }
1542 
1543 /*
1544  * Must be called with sdma_lock held, or before init finished.
1545  */
1546 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1547 {
1548         /* Commit writes to memory and advance the tail on the chip */
1549         wmb();
1550         ppd->sdma_descq_tail = tail;
1551         qib_write_kreg_port(ppd, krp_senddmatail, tail);
1552 }
1553 
1554 /*
1555  * This is called with interrupts disabled and sdma_lock held.
1556  */
1557 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1558 {
1559         /*
1560          * Drain all FIFOs.
1561          * The hardware doesn't require this but we do it so that verbs
1562          * and user applications don't wait for link active to send stale
1563          * data.
1564          */
1565         sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1566 
1567         qib_sdma_7322_setlengen(ppd);
1568         qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1569         ppd->sdma_head_dma[0] = 0;
1570         qib_7322_sdma_sendctrl(ppd,
1571                 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1572 }
1573 
1574 #define DISABLES_SDMA ( \
1575         QIB_E_P_SDMAHALT | \
1576         QIB_E_P_SDMADESCADDRMISALIGN | \
1577         QIB_E_P_SDMAMISSINGDW | \
1578         QIB_E_P_SDMADWEN | \
1579         QIB_E_P_SDMARPYTAG | \
1580         QIB_E_P_SDMA1STDESC | \
1581         QIB_E_P_SDMABASE | \
1582         QIB_E_P_SDMATAILOUTOFBOUND | \
1583         QIB_E_P_SDMAOUTOFBOUND | \
1584         QIB_E_P_SDMAGENMISMATCH)
1585 
1586 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1587 {
1588         unsigned long flags;
1589         struct qib_devdata *dd = ppd->dd;
1590 
1591         errs &= QIB_E_P_SDMAERRS;
1592         err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1593                    errs, qib_7322p_error_msgs);
1594 
1595         if (errs & QIB_E_P_SDMAUNEXPDATA)
1596                 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1597                             ppd->port);
1598 
1599         spin_lock_irqsave(&ppd->sdma_lock, flags);
1600 
1601         if (errs != QIB_E_P_SDMAHALT) {
1602                 /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1603                 qib_dev_porterr(dd, ppd->port,
1604                         "SDMA %s 0x%016llx %s\n",
1605                         qib_sdma_state_names[ppd->sdma_state.current_state],
1606                         errs, ppd->cpspec->sdmamsgbuf);
1607                 dump_sdma_7322_state(ppd);
1608         }
1609 
1610         switch (ppd->sdma_state.current_state) {
1611         case qib_sdma_state_s00_hw_down:
1612                 break;
1613 
1614         case qib_sdma_state_s10_hw_start_up_wait:
1615                 if (errs & QIB_E_P_SDMAHALT)
1616                         __qib_sdma_process_event(ppd,
1617                                 qib_sdma_event_e20_hw_started);
1618                 break;
1619 
1620         case qib_sdma_state_s20_idle:
1621                 break;
1622 
1623         case qib_sdma_state_s30_sw_clean_up_wait:
1624                 break;
1625 
1626         case qib_sdma_state_s40_hw_clean_up_wait:
1627                 if (errs & QIB_E_P_SDMAHALT)
1628                         __qib_sdma_process_event(ppd,
1629                                 qib_sdma_event_e50_hw_cleaned);
1630                 break;
1631 
1632         case qib_sdma_state_s50_hw_halt_wait:
1633                 if (errs & QIB_E_P_SDMAHALT)
1634                         __qib_sdma_process_event(ppd,
1635                                 qib_sdma_event_e60_hw_halted);
1636                 break;
1637 
1638         case qib_sdma_state_s99_running:
1639                 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1640                 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1641                 break;
1642         }
1643 
1644         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1645 }
1646 
1647 /*
1648  * handle per-device errors (not per-port errors)
1649  */
1650 static noinline void handle_7322_errors(struct qib_devdata *dd)
1651 {
1652         char *msg;
1653         u64 iserr = 0;
1654         u64 errs;
1655         u64 mask;
1656 
1657         qib_stats.sps_errints++;
1658         errs = qib_read_kreg64(dd, kr_errstatus);
1659         if (!errs) {
1660                 qib_devinfo(dd->pcidev,
1661                         "device error interrupt, but no error bits set!\n");
1662                 goto done;
1663         }
1664 
1665         /* don't report errors that are masked */
1666         errs &= dd->cspec->errormask;
1667         msg = dd->cspec->emsgbuf;
1668 
1669         /* do these first, they are most important */
1670         if (errs & QIB_E_HARDWARE) {
1671                 *msg = '\0';
1672                 qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1673         }
1674 
1675         if (errs & QIB_E_SPKTERRS) {
1676                 qib_disarm_7322_senderrbufs(dd->pport);
1677                 qib_stats.sps_txerrs++;
1678         } else if (errs & QIB_E_INVALIDADDR)
1679                 qib_stats.sps_txerrs++;
1680         else if (errs & QIB_E_ARMLAUNCH) {
1681                 qib_stats.sps_txerrs++;
1682                 qib_disarm_7322_senderrbufs(dd->pport);
1683         }
1684         qib_write_kreg(dd, kr_errclear, errs);
1685 
1686         /*
1687          * The ones we mask off are handled specially below
1688          * or above.  Also mask SDMADISABLED by default as it
1689          * is too chatty.
1690          */
1691         mask = QIB_E_HARDWARE;
1692         *msg = '\0';
1693 
1694         err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1695                    qib_7322error_msgs);
1696 
1697         /*
1698          * Getting reset is a tragedy for all ports. Mark the device
1699          * _and_ the ports as "offline" in way meaningful to each.
1700          */
1701         if (errs & QIB_E_RESET) {
1702                 int pidx;
1703 
1704                 qib_dev_err(dd,
1705                         "Got reset, requires re-init (unload and reload driver)\n");
1706                 dd->flags &= ~QIB_INITTED;  /* needs re-init */
1707                 /* mark as having had error */
1708                 *dd->devstatusp |= QIB_STATUS_HWERROR;
1709                 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1710                         if (dd->pport[pidx].link_speed_supported)
1711                                 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1712         }
1713 
1714         if (*msg && iserr)
1715                 qib_dev_err(dd, "%s error\n", msg);
1716 
1717         /*
1718          * If there were hdrq or egrfull errors, wake up any processes
1719          * waiting in poll.  We used to try to check which contexts had
1720          * the overflow, but given the cost of that and the chip reads
1721          * to support it, it's better to just wake everybody up if we
1722          * get an overflow; waiters can poll again if it's not them.
1723          */
1724         if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1725                 qib_handle_urcv(dd, ~0U);
1726                 if (errs & ERR_MASK(RcvEgrFullErr))
1727                         qib_stats.sps_buffull++;
1728                 else
1729                         qib_stats.sps_hdrfull++;
1730         }
1731 
1732 done:
1733         return;
1734 }
1735 
1736 static void qib_error_tasklet(unsigned long data)
1737 {
1738         struct qib_devdata *dd = (struct qib_devdata *)data;
1739 
1740         handle_7322_errors(dd);
1741         qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1742 }
1743 
1744 static void reenable_chase(struct timer_list *t)
1745 {
1746         struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1747         struct qib_pportdata *ppd = cp->ppd;
1748 
1749         ppd->cpspec->chase_timer.expires = 0;
1750         qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1751                 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1752 }
1753 
1754 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1755                 u8 ibclt)
1756 {
1757         ppd->cpspec->chase_end = 0;
1758 
1759         if (!qib_chase)
1760                 return;
1761 
1762         qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1763                 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1764         ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1765         add_timer(&ppd->cpspec->chase_timer);
1766 }
1767 
1768 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1769 {
1770         u8 ibclt;
1771         unsigned long tnow;
1772 
1773         ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1774 
1775         /*
1776          * Detect and handle the state chase issue, where we can
1777          * get stuck if we are unlucky on timing on both sides of
1778          * the link.   If we are, we disable, set a timer, and
1779          * then re-enable.
1780          */
1781         switch (ibclt) {
1782         case IB_7322_LT_STATE_CFGRCVFCFG:
1783         case IB_7322_LT_STATE_CFGWAITRMT:
1784         case IB_7322_LT_STATE_TXREVLANES:
1785         case IB_7322_LT_STATE_CFGENH:
1786                 tnow = jiffies;
1787                 if (ppd->cpspec->chase_end &&
1788                      time_after(tnow, ppd->cpspec->chase_end))
1789                         disable_chase(ppd, tnow, ibclt);
1790                 else if (!ppd->cpspec->chase_end)
1791                         ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1792                 break;
1793         default:
1794                 ppd->cpspec->chase_end = 0;
1795                 break;
1796         }
1797 
1798         if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1799               ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1800              ibclt == IB_7322_LT_STATE_LINKUP) &&
1801             (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1802                 force_h1(ppd);
1803                 ppd->cpspec->qdr_reforce = 1;
1804                 if (!ppd->dd->cspec->r1)
1805                         serdes_7322_los_enable(ppd, 0);
1806         } else if (ppd->cpspec->qdr_reforce &&
1807                 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1808                  (ibclt == IB_7322_LT_STATE_CFGENH ||
1809                 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1810                 ibclt == IB_7322_LT_STATE_LINKUP))
1811                 force_h1(ppd);
1812 
1813         if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1814             ppd->link_speed_enabled == QIB_IB_QDR &&
1815             (ibclt == IB_7322_LT_STATE_CFGTEST ||
1816              ibclt == IB_7322_LT_STATE_CFGENH ||
1817              (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1818               ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1819                 adj_tx_serdes(ppd);
1820 
1821         if (ibclt != IB_7322_LT_STATE_LINKUP) {
1822                 u8 ltstate = qib_7322_phys_portstate(ibcst);
1823                 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1824                                           LinkTrainingState);
1825                 if (!ppd->dd->cspec->r1 &&
1826                     pibclt == IB_7322_LT_STATE_LINKUP &&
1827                     ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1828                     ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1829                     ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1830                     ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1831                         /* If the link went down (but no into recovery,
1832                          * turn LOS back on */
1833                         serdes_7322_los_enable(ppd, 1);
1834                 if (!ppd->cpspec->qdr_dfe_on &&
1835                     ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1836                         ppd->cpspec->qdr_dfe_on = 1;
1837                         ppd->cpspec->qdr_dfe_time = 0;
1838                         /* On link down, reenable QDR adaptation */
1839                         qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1840                                             ppd->dd->cspec->r1 ?
1841                                             QDR_STATIC_ADAPT_DOWN_R1 :
1842                                             QDR_STATIC_ADAPT_DOWN);
1843                         pr_info(
1844                                 "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1845                                 ppd->dd->unit, ppd->port, ibclt);
1846                 }
1847         }
1848 }
1849 
1850 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1851 
1852 /*
1853  * This is per-pport error handling.
1854  * will likely get it's own MSIx interrupt (one for each port,
1855  * although just a single handler).
1856  */
1857 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1858 {
1859         char *msg;
1860         u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1861         struct qib_devdata *dd = ppd->dd;
1862 
1863         /* do this as soon as possible */
1864         fmask = qib_read_kreg64(dd, kr_act_fmask);
1865         if (!fmask)
1866                 check_7322_rxe_status(ppd);
1867 
1868         errs = qib_read_kreg_port(ppd, krp_errstatus);
1869         if (!errs)
1870                 qib_devinfo(dd->pcidev,
1871                          "Port%d error interrupt, but no error bits set!\n",
1872                          ppd->port);
1873         if (!fmask)
1874                 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1875         if (!errs)
1876                 goto done;
1877 
1878         msg = ppd->cpspec->epmsgbuf;
1879         *msg = '\0';
1880 
1881         if (errs & ~QIB_E_P_BITSEXTANT) {
1882                 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1883                            errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1884                 if (!*msg)
1885                         snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1886                                  "no others");
1887                 qib_dev_porterr(dd, ppd->port,
1888                         "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1889                         (errs & ~QIB_E_P_BITSEXTANT), msg);
1890                 *msg = '\0';
1891         }
1892 
1893         if (errs & QIB_E_P_SHDR) {
1894                 u64 symptom;
1895 
1896                 /* determine cause, then write to clear */
1897                 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1898                 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1899                 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1900                            hdrchk_msgs);
1901                 *msg = '\0';
1902                 /* senderrbuf cleared in SPKTERRS below */
1903         }
1904 
1905         if (errs & QIB_E_P_SPKTERRS) {
1906                 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1907                     !(ppd->lflags & QIBL_LINKACTIVE)) {
1908                         /*
1909                          * This can happen when trying to bring the link
1910                          * up, but the IB link changes state at the "wrong"
1911                          * time. The IB logic then complains that the packet
1912                          * isn't valid.  We don't want to confuse people, so
1913                          * we just don't print them, except at debug
1914                          */
1915                         err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1916                                    (errs & QIB_E_P_LINK_PKTERRS),
1917                                    qib_7322p_error_msgs);
1918                         *msg = '\0';
1919                         ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1920                 }
1921                 qib_disarm_7322_senderrbufs(ppd);
1922         } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1923                    !(ppd->lflags & QIBL_LINKACTIVE)) {
1924                 /*
1925                  * This can happen when SMA is trying to bring the link
1926                  * up, but the IB link changes state at the "wrong" time.
1927                  * The IB logic then complains that the packet isn't
1928                  * valid.  We don't want to confuse people, so we just
1929                  * don't print them, except at debug
1930                  */
1931                 err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1932                            qib_7322p_error_msgs);
1933                 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1934                 *msg = '\0';
1935         }
1936 
1937         qib_write_kreg_port(ppd, krp_errclear, errs);
1938 
1939         errs &= ~ignore_this_time;
1940         if (!errs)
1941                 goto done;
1942 
1943         if (errs & QIB_E_P_RPKTERRS)
1944                 qib_stats.sps_rcverrs++;
1945         if (errs & QIB_E_P_SPKTERRS)
1946                 qib_stats.sps_txerrs++;
1947 
1948         iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1949 
1950         if (errs & QIB_E_P_SDMAERRS)
1951                 sdma_7322_p_errors(ppd, errs);
1952 
1953         if (errs & QIB_E_P_IBSTATUSCHANGED) {
1954                 u64 ibcs;
1955                 u8 ltstate;
1956 
1957                 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1958                 ltstate = qib_7322_phys_portstate(ibcs);
1959 
1960                 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1961                         handle_serdes_issues(ppd, ibcs);
1962                 if (!(ppd->cpspec->ibcctrl_a &
1963                       SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1964                         /*
1965                          * We got our interrupt, so init code should be
1966                          * happy and not try alternatives. Now squelch
1967                          * other "chatter" from link-negotiation (pre Init)
1968                          */
1969                         ppd->cpspec->ibcctrl_a |=
1970                                 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1971                         qib_write_kreg_port(ppd, krp_ibcctrl_a,
1972                                             ppd->cpspec->ibcctrl_a);
1973                 }
1974 
1975                 /* Update our picture of width and speed from chip */
1976                 ppd->link_width_active =
1977                         (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1978                             IB_WIDTH_4X : IB_WIDTH_1X;
1979                 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1980                         LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1981                           SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1982                                    QIB_IB_DDR : QIB_IB_SDR;
1983 
1984                 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1985                     IB_PHYSPORTSTATE_DISABLED)
1986                         qib_set_ib_7322_lstate(ppd, 0,
1987                                QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1988                 else
1989                         /*
1990                          * Since going into a recovery state causes the link
1991                          * state to go down and since recovery is transitory,
1992                          * it is better if we "miss" ever seeing the link
1993                          * training state go into recovery (i.e., ignore this
1994                          * transition for link state special handling purposes)
1995                          * without updating lastibcstat.
1996                          */
1997                         if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1998                             ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1999                             ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2000                             ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2001                                 qib_handle_e_ibstatuschanged(ppd, ibcs);
2002         }
2003         if (*msg && iserr)
2004                 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2005 
2006         if (ppd->state_wanted & ppd->lflags)
2007                 wake_up_interruptible(&ppd->state_wait);
2008 done:
2009         return;
2010 }
2011 
2012 /* enable/disable chip from delivering interrupts */
2013 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2014 {
2015         if (enable) {
2016                 if (dd->flags & QIB_BADINTR)
2017                         return;
2018                 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2019                 /* cause any pending enabled interrupts to be re-delivered */
2020                 qib_write_kreg(dd, kr_intclear, 0ULL);
2021                 if (dd->cspec->num_msix_entries) {
2022                         /* and same for MSIx */
2023                         u64 val = qib_read_kreg64(dd, kr_intgranted);
2024 
2025                         if (val)
2026                                 qib_write_kreg(dd, kr_intgranted, val);
2027                 }
2028         } else
2029                 qib_write_kreg(dd, kr_intmask, 0ULL);
2030 }
2031 
2032 /*
2033  * Try to cleanup as much as possible for anything that might have gone
2034  * wrong while in freeze mode, such as pio buffers being written by user
2035  * processes (causing armlaunch), send errors due to going into freeze mode,
2036  * etc., and try to avoid causing extra interrupts while doing so.
2037  * Forcibly update the in-memory pioavail register copies after cleanup
2038  * because the chip won't do it while in freeze mode (the register values
2039  * themselves are kept correct).
2040  * Make sure that we don't lose any important interrupts by using the chip
2041  * feature that says that writing 0 to a bit in *clear that is set in
2042  * *status will cause an interrupt to be generated again (if allowed by
2043  * the *mask value).
2044  * This is in chip-specific code because of all of the register accesses,
2045  * even though the details are similar on most chips.
2046  */
2047 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2048 {
2049         int pidx;
2050 
2051         /* disable error interrupts, to avoid confusion */
2052         qib_write_kreg(dd, kr_errmask, 0ULL);
2053 
2054         for (pidx = 0; pidx < dd->num_pports; ++pidx)
2055                 if (dd->pport[pidx].link_speed_supported)
2056                         qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2057                                             0ULL);
2058 
2059         /* also disable interrupts; errormask is sometimes overwritten */
2060         qib_7322_set_intr_state(dd, 0);
2061 
2062         /* clear the freeze, and be sure chip saw it */
2063         qib_write_kreg(dd, kr_control, dd->control);
2064         qib_read_kreg32(dd, kr_scratch);
2065 
2066         /*
2067          * Force new interrupt if any hwerr, error or interrupt bits are
2068          * still set, and clear "safe" send packet errors related to freeze
2069          * and cancelling sends.  Re-enable error interrupts before possible
2070          * force of re-interrupt on pending interrupts.
2071          */
2072         qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2073         qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2074         qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2075         /* We need to purge per-port errs and reset mask, too */
2076         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2077                 if (!dd->pport[pidx].link_speed_supported)
2078                         continue;
2079                 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2080                 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2081         }
2082         qib_7322_set_intr_state(dd, 1);
2083 }
2084 
2085 /* no error handling to speak of */
2086 /**
2087  * qib_7322_handle_hwerrors - display hardware errors.
2088  * @dd: the qlogic_ib device
2089  * @msg: the output buffer
2090  * @msgl: the size of the output buffer
2091  *
2092  * Use same msg buffer as regular errors to avoid excessive stack
2093  * use.  Most hardware errors are catastrophic, but for right now,
2094  * we'll print them and continue.  We reuse the same message buffer as
2095  * qib_handle_errors() to avoid excessive stack usage.
2096  */
2097 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2098                                      size_t msgl)
2099 {
2100         u64 hwerrs;
2101         u32 ctrl;
2102         int isfatal = 0;
2103 
2104         hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2105         if (!hwerrs)
2106                 goto bail;
2107         if (hwerrs == ~0ULL) {
2108                 qib_dev_err(dd,
2109                         "Read of hardware error status failed (all bits set); ignoring\n");
2110                 goto bail;
2111         }
2112         qib_stats.sps_hwerrs++;
2113 
2114         /* Always clear the error status register, except BIST fail */
2115         qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2116                        ~HWE_MASK(PowerOnBISTFailed));
2117 
2118         hwerrs &= dd->cspec->hwerrmask;
2119 
2120         /* no EEPROM logging, yet */
2121 
2122         if (hwerrs)
2123                 qib_devinfo(dd->pcidev,
2124                         "Hardware error: hwerr=0x%llx (cleared)\n",
2125                         (unsigned long long) hwerrs);
2126 
2127         ctrl = qib_read_kreg32(dd, kr_control);
2128         if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2129                 /*
2130                  * No recovery yet...
2131                  */
2132                 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2133                     dd->cspec->stay_in_freeze) {
2134                         /*
2135                          * If any set that we aren't ignoring only make the
2136                          * complaint once, in case it's stuck or recurring,
2137                          * and we get here multiple times
2138                          * Force link down, so switch knows, and
2139                          * LEDs are turned off.
2140                          */
2141                         if (dd->flags & QIB_INITTED)
2142                                 isfatal = 1;
2143                 } else
2144                         qib_7322_clear_freeze(dd);
2145         }
2146 
2147         if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2148                 isfatal = 1;
2149                 strlcpy(msg,
2150                         "[Memory BIST test failed, InfiniPath hardware unusable]",
2151                         msgl);
2152                 /* ignore from now on, so disable until driver reloaded */
2153                 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2154                 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2155         }
2156 
2157         err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2158 
2159         /* Ignore esoteric PLL failures et al. */
2160 
2161         qib_dev_err(dd, "%s hardware error\n", msg);
2162 
2163         if (hwerrs &
2164                    (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2165                     SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2166                 int pidx = 0;
2167                 int err;
2168                 unsigned long flags;
2169                 struct qib_pportdata *ppd = dd->pport;
2170 
2171                 for (; pidx < dd->num_pports; ++pidx, ppd++) {
2172                         err = 0;
2173                         if (pidx == 0 && (hwerrs &
2174                                 SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2175                                 err++;
2176                         if (pidx == 1 && (hwerrs &
2177                                 SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2178                                 err++;
2179                         if (err) {
2180                                 spin_lock_irqsave(&ppd->sdma_lock, flags);
2181                                 dump_sdma_7322_state(ppd);
2182                                 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2183                         }
2184                 }
2185         }
2186 
2187         if (isfatal && !dd->diag_client) {
2188                 qib_dev_err(dd,
2189                         "Fatal Hardware Error, no longer usable, SN %.16s\n",
2190                         dd->serial);
2191                 /*
2192                  * for /sys status file and user programs to print; if no
2193                  * trailing brace is copied, we'll know it was truncated.
2194                  */
2195                 if (dd->freezemsg)
2196                         snprintf(dd->freezemsg, dd->freezelen,
2197                                  "{%s}", msg);
2198                 qib_disable_after_error(dd);
2199         }
2200 bail:;
2201 }
2202 
2203 /**
2204  * qib_7322_init_hwerrors - enable hardware errors
2205  * @dd: the qlogic_ib device
2206  *
2207  * now that we have finished initializing everything that might reasonably
2208  * cause a hardware error, and cleared those errors bits as they occur,
2209  * we can enable hardware errors in the mask (potentially enabling
2210  * freeze mode), and enable hardware errors as errors (along with
2211  * everything else) in errormask
2212  */
2213 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2214 {
2215         int pidx;
2216         u64 extsval;
2217 
2218         extsval = qib_read_kreg64(dd, kr_extstatus);
2219         if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2220                          QIB_EXTS_MEMBIST_ENDTEST)))
2221                 qib_dev_err(dd, "MemBIST did not complete!\n");
2222 
2223         /* never clear BIST failure, so reported on each driver load */
2224         qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2225         qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2226 
2227         /* clear all */
2228         qib_write_kreg(dd, kr_errclear, ~0ULL);
2229         /* enable errors that are masked, at least this first time. */
2230         qib_write_kreg(dd, kr_errmask, ~0ULL);
2231         dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2232         for (pidx = 0; pidx < dd->num_pports; ++pidx)
2233                 if (dd->pport[pidx].link_speed_supported)
2234                         qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2235                                             ~0ULL);
2236 }
2237 
2238 /*
2239  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2240  * on chips that are count-based, rather than trigger-based.  There is no
2241  * reference counting, but that's also fine, given the intended use.
2242  * Only chip-specific because it's all register accesses
2243  */
2244 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2245 {
2246         if (enable) {
2247                 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2248                 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2249         } else
2250                 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2251         qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2252 }
2253 
2254 /*
2255  * Formerly took parameter <which> in pre-shifted,
2256  * pre-merged form with LinkCmd and LinkInitCmd
2257  * together, and assuming the zero was NOP.
2258  */
2259 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2260                                    u16 linitcmd)
2261 {
2262         u64 mod_wd;
2263         struct qib_devdata *dd = ppd->dd;
2264         unsigned long flags;
2265 
2266         if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2267                 /*
2268                  * If we are told to disable, note that so link-recovery
2269                  * code does not attempt to bring us back up.
2270                  * Also reset everything that we can, so we start
2271                  * completely clean when re-enabled (before we
2272                  * actually issue the disable to the IBC)
2273                  */
2274                 qib_7322_mini_pcs_reset(ppd);
2275                 spin_lock_irqsave(&ppd->lflags_lock, flags);
2276                 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2277                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2278         } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2279                 /*
2280                  * Any other linkinitcmd will lead to LINKDOWN and then
2281                  * to INIT (if all is well), so clear flag to let
2282                  * link-recovery code attempt to bring us back up.
2283                  */
2284                 spin_lock_irqsave(&ppd->lflags_lock, flags);
2285                 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2286                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2287                 /*
2288                  * Clear status change interrupt reduction so the
2289                  * new state is seen.
2290                  */
2291                 ppd->cpspec->ibcctrl_a &=
2292                         ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2293         }
2294 
2295         mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2296                 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2297 
2298         qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2299                             mod_wd);
2300         /* write to chip to prevent back-to-back writes of ibc reg */
2301         qib_write_kreg(dd, kr_scratch, 0);
2302 
2303 }
2304 
2305 /*
2306  * The total RCV buffer memory is 64KB, used for both ports, and is
2307  * in units of 64 bytes (same as IB flow control credit unit).
2308  * The consumedVL unit in the same registers are in 32 byte units!
2309  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2310  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2311  * in krp_rxcreditvl15, rather than 10.
2312  */
2313 #define RCV_BUF_UNITSZ 64
2314 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2315 
2316 static void set_vls(struct qib_pportdata *ppd)
2317 {
2318         int i, numvls, totcred, cred_vl, vl0extra;
2319         struct qib_devdata *dd = ppd->dd;
2320         u64 val;
2321 
2322         numvls = qib_num_vls(ppd->vls_operational);
2323 
2324         /*
2325          * Set up per-VL credits. Below is kluge based on these assumptions:
2326          * 1) port is disabled at the time early_init is called.
2327          * 2) give VL15 17 credits, for two max-plausible packets.
2328          * 3) Give VL0-N the rest, with any rounding excess used for VL0
2329          */
2330         /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2331         totcred = NUM_RCV_BUF_UNITS(dd);
2332         cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2333         totcred -= cred_vl;
2334         qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2335         cred_vl = totcred / numvls;
2336         vl0extra = totcred - cred_vl * numvls;
2337         qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2338         for (i = 1; i < numvls; i++)
2339                 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2340         for (; i < 8; i++) /* no buffer space for other VLs */
2341                 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2342 
2343         /* Notify IBC that credits need to be recalculated */
2344         val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2345         val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2346         qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2347         qib_write_kreg(dd, kr_scratch, 0ULL);
2348         val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2349         qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2350 
2351         for (i = 0; i < numvls; i++)
2352                 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2353         val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2354 
2355         /* Change the number of operational VLs */
2356         ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2357                                 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2358                 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2359         qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2360         qib_write_kreg(dd, kr_scratch, 0ULL);
2361 }
2362 
2363 /*
2364  * The code that deals with actual SerDes is in serdes_7322_init().
2365  * Compared to the code for iba7220, it is minimal.
2366  */
2367 static int serdes_7322_init(struct qib_pportdata *ppd);
2368 
2369 /**
2370  * qib_7322_bringup_serdes - bring up the serdes
2371  * @ppd: physical port on the qlogic_ib device
2372  */
2373 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2374 {
2375         struct qib_devdata *dd = ppd->dd;
2376         u64 val, guid, ibc;
2377         unsigned long flags;
2378         int ret = 0;
2379 
2380         /*
2381          * SerDes model not in Pd, but still need to
2382          * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2383          * eventually.
2384          */
2385         /* Put IBC in reset, sends disabled (should be in reset already) */
2386         ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2387         qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2388         qib_write_kreg(dd, kr_scratch, 0ULL);
2389 
2390         /* ensure previous Tx parameters are not still forced */
2391         qib_write_kreg_port(ppd, krp_tx_deemph_override,
2392                 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2393                 reset_tx_deemphasis_override));
2394 
2395         if (qib_compat_ddr_negotiate) {
2396                 ppd->cpspec->ibdeltainprog = 1;
2397                 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2398                                                 crp_ibsymbolerr);
2399                 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2400                                                 crp_iblinkerrrecov);
2401         }
2402 
2403         /* flowcontrolwatermark is in units of KBytes */
2404         ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2405         /*
2406          * Flow control is sent this often, even if no changes in
2407          * buffer space occur.  Units are 128ns for this chip.
2408          * Set to 3usec.
2409          */
2410         ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2411         /* max error tolerance */
2412         ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2413         /* IB credit flow control. */
2414         ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2415         /*
2416          * set initial max size pkt IBC will send, including ICRC; it's the
2417          * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2418          */
2419         ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2420                 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2421         ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2422 
2423         /*
2424          * Reset the PCS interface to the serdes (and also ibc, which is still
2425          * in reset from above).  Writes new value of ibcctrl_a as last step.
2426          */
2427         qib_7322_mini_pcs_reset(ppd);
2428 
2429         if (!ppd->cpspec->ibcctrl_b) {
2430                 unsigned lse = ppd->link_speed_enabled;
2431 
2432                 /*
2433                  * Not on re-init after reset, establish shadow
2434                  * and force initial config.
2435                  */
2436                 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2437                                                              krp_ibcctrl_b);
2438                 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2439                                 IBA7322_IBC_SPEED_DDR |
2440                                 IBA7322_IBC_SPEED_SDR |
2441                                 IBA7322_IBC_WIDTH_AUTONEG |
2442                                 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2443                 if (lse & (lse - 1)) /* Muliple speeds enabled */
2444                         ppd->cpspec->ibcctrl_b |=
2445                                 (lse << IBA7322_IBC_SPEED_LSB) |
2446                                 IBA7322_IBC_IBTA_1_2_MASK |
2447                                 IBA7322_IBC_MAX_SPEED_MASK;
2448                 else
2449                         ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2450                                 IBA7322_IBC_SPEED_QDR |
2451                                  IBA7322_IBC_IBTA_1_2_MASK :
2452                                 (lse == QIB_IB_DDR) ?
2453                                         IBA7322_IBC_SPEED_DDR :
2454                                         IBA7322_IBC_SPEED_SDR;
2455                 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2456                     (IB_WIDTH_1X | IB_WIDTH_4X))
2457                         ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2458                 else
2459                         ppd->cpspec->ibcctrl_b |=
2460                                 ppd->link_width_enabled == IB_WIDTH_4X ?
2461                                 IBA7322_IBC_WIDTH_4X_ONLY :
2462                                 IBA7322_IBC_WIDTH_1X_ONLY;
2463 
2464                 /* always enable these on driver reload, not sticky */
2465                 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2466                         IBA7322_IBC_HRTBT_MASK);
2467         }
2468         qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2469 
2470         /* setup so we have more time at CFGTEST to change H1 */
2471         val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2472         val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2473         val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2474         qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2475 
2476         serdes_7322_init(ppd);
2477 
2478         guid = be64_to_cpu(ppd->guid);
2479         if (!guid) {
2480                 if (dd->base_guid)
2481                         guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2482                 ppd->guid = cpu_to_be64(guid);
2483         }
2484 
2485         qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2486         /* write to chip to prevent back-to-back writes of ibc reg */
2487         qib_write_kreg(dd, kr_scratch, 0);
2488 
2489         /* Enable port */
2490         ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2491         set_vls(ppd);
2492 
2493         /* initially come up DISABLED, without sending anything. */
2494         val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2495                                         QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2496         qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2497         qib_write_kreg(dd, kr_scratch, 0ULL);
2498         /* clear the linkinit cmds */
2499         ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2500 
2501         /* be paranoid against later code motion, etc. */
2502         spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2503         ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2504         qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2505         spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2506 
2507         /* Also enable IBSTATUSCHG interrupt.  */
2508         val = qib_read_kreg_port(ppd, krp_errmask);
2509         qib_write_kreg_port(ppd, krp_errmask,
2510                 val | ERR_MASK_N(IBStatusChanged));
2511 
2512         /* Always zero until we start messing with SerDes for real */
2513         return ret;
2514 }
2515 
2516 /**
2517  * qib_7322_quiet_serdes - set serdes to txidle
2518  * @dd: the qlogic_ib device
2519  * Called when driver is being unloaded
2520  */
2521 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2522 {
2523         u64 val;
2524         unsigned long flags;
2525 
2526         qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2527 
2528         spin_lock_irqsave(&ppd->lflags_lock, flags);
2529         ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2530         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2531         wake_up(&ppd->cpspec->autoneg_wait);
2532         cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2533         if (ppd->dd->cspec->r1)
2534                 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2535 
2536         ppd->cpspec->chase_end = 0;
2537         if (ppd->cpspec->chase_timer.function) /* if initted */
2538                 del_timer_sync(&ppd->cpspec->chase_timer);
2539 
2540         /*
2541          * Despite the name, actually disables IBC as well. Do it when
2542          * we are as sure as possible that no more packets can be
2543          * received, following the down and the PCS reset.
2544          * The actual disabling happens in qib_7322_mini_pci_reset(),
2545          * along with the PCS being reset.
2546          */
2547         ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2548         qib_7322_mini_pcs_reset(ppd);
2549 
2550         /*
2551          * Update the adjusted counters so the adjustment persists
2552          * across driver reload.
2553          */
2554         if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2555             ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2556                 struct qib_devdata *dd = ppd->dd;
2557                 u64 diagc;
2558 
2559                 /* enable counter writes */
2560                 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2561                 qib_write_kreg(dd, kr_hwdiagctrl,
2562                                diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2563 
2564                 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2565                         val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2566                         if (ppd->cpspec->ibdeltainprog)
2567                                 val -= val - ppd->cpspec->ibsymsnap;
2568                         val -= ppd->cpspec->ibsymdelta;
2569                         write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2570                 }
2571                 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2572                         val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2573                         if (ppd->cpspec->ibdeltainprog)
2574                                 val -= val - ppd->cpspec->iblnkerrsnap;
2575                         val -= ppd->cpspec->iblnkerrdelta;
2576                         write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2577                 }
2578                 if (ppd->cpspec->iblnkdowndelta) {
2579                         val = read_7322_creg32_port(ppd, crp_iblinkdown);
2580                         val += ppd->cpspec->iblnkdowndelta;
2581                         write_7322_creg_port(ppd, crp_iblinkdown, val);
2582                 }
2583                 /*
2584                  * No need to save ibmalfdelta since IB perfcounters
2585                  * are cleared on driver reload.
2586                  */
2587 
2588                 /* and disable counter writes */
2589                 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2590         }
2591 }
2592 
2593 /**
2594  * qib_setup_7322_setextled - set the state of the two external LEDs
2595  * @ppd: physical port on the qlogic_ib device
2596  * @on: whether the link is up or not
2597  *
2598  * The exact combo of LEDs if on is true is determined by looking
2599  * at the ibcstatus.
2600  *
2601  * These LEDs indicate the physical and logical state of IB link.
2602  * For this chip (at least with recommended board pinouts), LED1
2603  * is Yellow (logical state) and LED2 is Green (physical state),
2604  *
2605  * Note:  We try to match the Mellanox HCA LED behavior as best
2606  * we can.  Green indicates physical link state is OK (something is
2607  * plugged in, and we can train).
2608  * Amber indicates the link is logically up (ACTIVE).
2609  * Mellanox further blinks the amber LED to indicate data packet
2610  * activity, but we have no hardware support for that, so it would
2611  * require waking up every 10-20 msecs and checking the counters
2612  * on the chip, and then turning the LED off if appropriate.  That's
2613  * visible overhead, so not something we will do.
2614  */
2615 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2616 {
2617         struct qib_devdata *dd = ppd->dd;
2618         u64 extctl, ledblink = 0, val;
2619         unsigned long flags;
2620         int yel, grn;
2621 
2622         /*
2623          * The diags use the LED to indicate diag info, so we leave
2624          * the external LED alone when the diags are running.
2625          */
2626         if (dd->diag_client)
2627                 return;
2628 
2629         /* Allow override of LED display for, e.g. Locating system in rack */
2630         if (ppd->led_override) {
2631                 grn = (ppd->led_override & QIB_LED_PHYS);
2632                 yel = (ppd->led_override & QIB_LED_LOG);
2633         } else if (on) {
2634                 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2635                 grn = qib_7322_phys_portstate(val) ==
2636                         IB_PHYSPORTSTATE_LINKUP;
2637                 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2638         } else {
2639                 grn = 0;
2640                 yel = 0;
2641         }
2642 
2643         spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2644         extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2645                 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2646         if (grn) {
2647                 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2648                 /*
2649                  * Counts are in chip clock (4ns) periods.
2650                  * This is 1/16 sec (66.6ms) on,
2651                  * 3/16 sec (187.5 ms) off, with packets rcvd.
2652                  */
2653                 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2654                         ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2655         }
2656         if (yel)
2657                 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2658         dd->cspec->extctrl = extctl;
2659         qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2660         spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2661 
2662         if (ledblink) /* blink the LED on packet receive */
2663                 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2664 }
2665 
2666 #ifdef CONFIG_INFINIBAND_QIB_DCA
2667 
2668 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2669 {
2670         switch (event) {
2671         case DCA_PROVIDER_ADD:
2672                 if (dd->flags & QIB_DCA_ENABLED)
2673                         break;
2674                 if (!dca_add_requester(&dd->pcidev->dev)) {
2675                         qib_devinfo(dd->pcidev, "DCA enabled\n");
2676                         dd->flags |= QIB_DCA_ENABLED;
2677                         qib_setup_dca(dd);
2678                 }
2679                 break;
2680         case DCA_PROVIDER_REMOVE:
2681                 if (dd->flags & QIB_DCA_ENABLED) {
2682                         dca_remove_requester(&dd->pcidev->dev);
2683                         dd->flags &= ~QIB_DCA_ENABLED;
2684                         dd->cspec->dca_ctrl = 0;
2685                         qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2686                                 dd->cspec->dca_ctrl);
2687                 }
2688                 break;
2689         }
2690         return 0;
2691 }
2692 
2693 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2694 {
2695         struct qib_devdata *dd = rcd->dd;
2696         struct qib_chip_specific *cspec = dd->cspec;
2697 
2698         if (!(dd->flags & QIB_DCA_ENABLED))
2699                 return;
2700         if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2701                 const struct dca_reg_map *rmp;
2702 
2703                 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2704                 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2705                 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2706                 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2707                         (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2708                 qib_devinfo(dd->pcidev,
2709                         "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2710                         (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2711                 qib_write_kreg(dd, rmp->regno,
2712                                cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2713                 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2714                 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2715         }
2716 }
2717 
2718 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2719 {
2720         struct qib_devdata *dd = ppd->dd;
2721         struct qib_chip_specific *cspec = dd->cspec;
2722         unsigned pidx = ppd->port - 1;
2723 
2724         if (!(dd->flags & QIB_DCA_ENABLED))
2725                 return;
2726         if (cspec->sdma_cpu[pidx] != cpu) {
2727                 cspec->sdma_cpu[pidx] = cpu;
2728                 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2729                         SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2730                         SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2731                 cspec->dca_rcvhdr_ctrl[4] |=
2732                         (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2733                                 (ppd->hw_pidx ?
2734                                         SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2735                                         SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2736                 qib_devinfo(dd->pcidev,
2737                         "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2738                         (long long) cspec->dca_rcvhdr_ctrl[4]);
2739                 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2740                                cspec->dca_rcvhdr_ctrl[4]);
2741                 cspec->dca_ctrl |= ppd->hw_pidx ?
2742                         SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2743                         SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2744                 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2745         }
2746 }
2747 
2748 static void qib_setup_dca(struct qib_devdata *dd)
2749 {
2750         struct qib_chip_specific *cspec = dd->cspec;
2751         int i;
2752 
2753         for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2754                 cspec->rhdr_cpu[i] = -1;
2755         for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2756                 cspec->sdma_cpu[i] = -1;
2757         cspec->dca_rcvhdr_ctrl[0] =
2758                 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2759                 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2760                 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2761                 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2762         cspec->dca_rcvhdr_ctrl[1] =
2763                 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2764                 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2765                 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2766                 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2767         cspec->dca_rcvhdr_ctrl[2] =
2768                 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2769                 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2770                 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2771                 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2772         cspec->dca_rcvhdr_ctrl[3] =
2773                 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2774                 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2775                 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2776                 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2777         cspec->dca_rcvhdr_ctrl[4] =
2778                 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2779                 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2780         for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2781                 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2782                                cspec->dca_rcvhdr_ctrl[i]);
2783         for (i = 0; i < cspec->num_msix_entries; i++)
2784                 setup_dca_notifier(dd, i);
2785 }
2786 
2787 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2788                              const cpumask_t *mask)
2789 {
2790         struct qib_irq_notify *n =
2791                 container_of(notify, struct qib_irq_notify, notify);
2792         int cpu = cpumask_first(mask);
2793 
2794         if (n->rcv) {
2795                 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2796 
2797                 qib_update_rhdrq_dca(rcd, cpu);
2798         } else {
2799                 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2800 
2801                 qib_update_sdma_dca(ppd, cpu);
2802         }
2803 }
2804 
2805 static void qib_irq_notifier_release(struct kref *ref)
2806 {
2807         struct qib_irq_notify *n =
2808                 container_of(ref, struct qib_irq_notify, notify.kref);
2809         struct qib_devdata *dd;
2810 
2811         if (n->rcv) {
2812                 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2813 
2814                 dd = rcd->dd;
2815         } else {
2816                 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2817 
2818                 dd = ppd->dd;
2819         }
2820         qib_devinfo(dd->pcidev,
2821                 "release on HCA notify 0x%p n 0x%p\n", ref, n);
2822         kfree(n);
2823 }
2824 #endif
2825 
2826 static void qib_7322_free_irq(struct qib_devdata *dd)
2827 {
2828         u64 intgranted;
2829         int i;
2830 
2831         dd->cspec->main_int_mask = ~0ULL;
2832 
2833         for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2834                 /* only free IRQs that were allocated */
2835                 if (dd->cspec->msix_entries[i].arg) {
2836 #ifdef CONFIG_INFINIBAND_QIB_DCA
2837                         reset_dca_notifier(dd, i);
2838 #endif
2839                         irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2840                                               NULL);
2841                         free_cpumask_var(dd->cspec->msix_entries[i].mask);
2842                         pci_free_irq(dd->pcidev, i,
2843                                      dd->cspec->msix_entries[i].arg);
2844                 }
2845         }
2846 
2847         /* If num_msix_entries was 0, disable the INTx IRQ */
2848         if (!dd->cspec->num_msix_entries)
2849                 pci_free_irq(dd->pcidev, 0, dd);
2850         else
2851                 dd->cspec->num_msix_entries = 0;
2852 
2853         pci_free_irq_vectors(dd->pcidev);
2854 
2855         /* make sure no MSIx interrupts are left pending */
2856         intgranted = qib_read_kreg64(dd, kr_intgranted);
2857         if (intgranted)
2858                 qib_write_kreg(dd, kr_intgranted, intgranted);
2859 }
2860 
2861 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2862 {
2863         int i;
2864 
2865 #ifdef CONFIG_INFINIBAND_QIB_DCA
2866         if (dd->flags & QIB_DCA_ENABLED) {
2867                 dca_remove_requester(&dd->pcidev->dev);
2868                 dd->flags &= ~QIB_DCA_ENABLED;
2869                 dd->cspec->dca_ctrl = 0;
2870                 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2871         }
2872 #endif
2873 
2874         qib_7322_free_irq(dd);
2875         kfree(dd->cspec->cntrs);
2876         kfree(dd->cspec->sendchkenable);
2877         kfree(dd->cspec->sendgrhchk);
2878         kfree(dd->cspec->sendibchk);
2879         kfree(dd->cspec->msix_entries);
2880         for (i = 0; i < dd->num_pports; i++) {
2881                 unsigned long flags;
2882                 u32 mask = QSFP_GPIO_MOD_PRS_N |
2883                         (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2884 
2885                 kfree(dd->pport[i].cpspec->portcntrs);
2886                 if (dd->flags & QIB_HAS_QSFP) {
2887                         spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2888                         dd->cspec->gpio_mask &= ~mask;
2889                         qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2890                         spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2891                 }
2892         }
2893 }
2894 
2895 /* handle SDMA interrupts */
2896 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2897 {
2898         struct qib_pportdata *ppd0 = &dd->pport[0];
2899         struct qib_pportdata *ppd1 = &dd->pport[1];
2900         u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2901                 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2902         u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2903                 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2904 
2905         if (intr0)
2906                 qib_sdma_intr(ppd0);
2907         if (intr1)
2908                 qib_sdma_intr(ppd1);
2909 
2910         if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2911                 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2912         if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2913                 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2914 }
2915 
2916 /*
2917  * Set or clear the Send buffer available interrupt enable bit.
2918  */
2919 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2920 {
2921         unsigned long flags;
2922 
2923         spin_lock_irqsave(&dd->sendctrl_lock, flags);
2924         if (needint)
2925                 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2926         else
2927                 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2928         qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2929         qib_write_kreg(dd, kr_scratch, 0ULL);
2930         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2931 }
2932 
2933 /*
2934  * Somehow got an interrupt with reserved bits set in interrupt status.
2935  * Print a message so we know it happened, then clear them.
2936  * keep mainline interrupt handler cache-friendly
2937  */
2938 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2939 {
2940         u64 kills;
2941         char msg[128];
2942 
2943         kills = istat & ~QIB_I_BITSEXTANT;
2944         qib_dev_err(dd,
2945                 "Clearing reserved interrupt(s) 0x%016llx: %s\n",
2946                 (unsigned long long) kills, msg);
2947         qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2948 }
2949 
2950 /* keep mainline interrupt handler cache-friendly */
2951 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2952 {
2953         u32 gpiostatus;
2954         int handled = 0;
2955         int pidx;
2956 
2957         /*
2958          * Boards for this chip currently don't use GPIO interrupts,
2959          * so clear by writing GPIOstatus to GPIOclear, and complain
2960          * to developer.  To avoid endless repeats, clear
2961          * the bits in the mask, since there is some kind of
2962          * programming error or chip problem.
2963          */
2964         gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2965         /*
2966          * In theory, writing GPIOstatus to GPIOclear could
2967          * have a bad side-effect on some diagnostic that wanted
2968          * to poll for a status-change, but the various shadows
2969          * make that problematic at best. Diags will just suppress
2970          * all GPIO interrupts during such tests.
2971          */
2972         qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2973         /*
2974          * Check for QSFP MOD_PRS changes
2975          * only works for single port if IB1 != pidx1
2976          */
2977         for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2978              ++pidx) {
2979                 struct qib_pportdata *ppd;
2980                 struct qib_qsfp_data *qd;
2981                 u32 mask;
2982 
2983                 if (!dd->pport[pidx].link_speed_supported)
2984                         continue;
2985                 mask = QSFP_GPIO_MOD_PRS_N;
2986                 ppd = dd->pport + pidx;
2987                 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2988                 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2989                         u64 pins;
2990 
2991                         qd = &ppd->cpspec->qsfp_data;
2992                         gpiostatus &= ~mask;
2993                         pins = qib_read_kreg64(dd, kr_extstatus);
2994                         pins >>= SYM_LSB(EXTStatus, GPIOIn);
2995                         if (!(pins & mask)) {
2996                                 ++handled;
2997                                 qd->t_insert = jiffies;
2998                                 queue_work(ib_wq, &qd->work);
2999                         }
3000                 }
3001         }
3002 
3003         if (gpiostatus && !handled) {
3004                 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3005                 u32 gpio_irq = mask & gpiostatus;
3006 
3007                 /*
3008                  * Clear any troublemakers, and update chip from shadow
3009                  */
3010                 dd->cspec->gpio_mask &= ~gpio_irq;
3011                 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3012         }
3013 }
3014 
3015 /*
3016  * Handle errors and unusual events first, separate function
3017  * to improve cache hits for fast path interrupt handling.
3018  */
3019 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3020 {
3021         if (istat & ~QIB_I_BITSEXTANT)
3022                 unknown_7322_ibits(dd, istat);
3023         if (istat & QIB_I_GPIO)
3024                 unknown_7322_gpio_intr(dd);
3025         if (istat & QIB_I_C_ERROR) {
3026                 qib_write_kreg(dd, kr_errmask, 0ULL);
3027                 tasklet_schedule(&dd->error_tasklet);
3028         }
3029         if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3030                 handle_7322_p_errors(dd->rcd[0]->ppd);
3031         if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3032                 handle_7322_p_errors(dd->rcd[1]->ppd);
3033 }
3034 
3035 /*
3036  * Dynamically adjust the rcv int timeout for a context based on incoming
3037  * packet rate.
3038  */
3039 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3040 {
3041         struct qib_devdata *dd = rcd->dd;
3042         u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3043 
3044         /*
3045          * Dynamically adjust idle timeout on chip
3046          * based on number of packets processed.
3047          */
3048         if (npkts < rcv_int_count && timeout > 2)
3049                 timeout >>= 1;
3050         else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3051                 timeout = min(timeout << 1, rcv_int_timeout);
3052         else
3053                 return;
3054 
3055         dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3056         qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3057 }
3058 
3059 /*
3060  * This is the main interrupt handler.
3061  * It will normally only be used for low frequency interrupts but may
3062  * have to handle all interrupts if INTx is enabled or fewer than normal
3063  * MSIx interrupts were allocated.
3064  * This routine should ignore the interrupt bits for any of the
3065  * dedicated MSIx handlers.
3066  */
3067 static irqreturn_t qib_7322intr(int irq, void *data)
3068 {
3069         struct qib_devdata *dd = data;
3070         irqreturn_t ret;
3071         u64 istat;
3072         u64 ctxtrbits;
3073         u64 rmask;
3074         unsigned i;
3075         u32 npkts;
3076 
3077         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3078                 /*
3079                  * This return value is not great, but we do not want the
3080                  * interrupt core code to remove our interrupt handler
3081                  * because we don't appear to be handling an interrupt
3082                  * during a chip reset.
3083                  */
3084                 ret = IRQ_HANDLED;
3085                 goto bail;
3086         }
3087 
3088         istat = qib_read_kreg64(dd, kr_intstatus);
3089 
3090         if (unlikely(istat == ~0ULL)) {
3091                 qib_bad_intrstatus(dd);
3092                 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3093                 /* don't know if it was our interrupt or not */
3094                 ret = IRQ_NONE;
3095                 goto bail;
3096         }
3097 
3098         istat &= dd->cspec->main_int_mask;
3099         if (unlikely(!istat)) {
3100                 /* already handled, or shared and not us */
3101                 ret = IRQ_NONE;
3102                 goto bail;
3103         }
3104 
3105         this_cpu_inc(*dd->int_counter);
3106 
3107         /* handle "errors" of various kinds first, device ahead of port */
3108         if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3109                               QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3110                               INT_MASK_P(Err, 1))))
3111                 unlikely_7322_intr(dd, istat);
3112 
3113         /*
3114          * Clear the interrupt bits we found set, relatively early, so we
3115          * "know" know the chip will have seen this by the time we process
3116          * the queue, and will re-interrupt if necessary.  The processor
3117          * itself won't take the interrupt again until we return.
3118          */
3119         qib_write_kreg(dd, kr_intclear, istat);
3120 
3121         /*
3122          * Handle kernel receive queues before checking for pio buffers
3123          * available since receives can overflow; piobuf waiters can afford
3124          * a few extra cycles, since they were waiting anyway.
3125          */
3126         ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3127         if (ctxtrbits) {
3128                 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3129                         (1ULL << QIB_I_RCVURG_LSB);
3130                 for (i = 0; i < dd->first_user_ctxt; i++) {
3131                         if (ctxtrbits & rmask) {
3132                                 ctxtrbits &= ~rmask;
3133                                 if (dd->rcd[i])
3134                                         qib_kreceive(dd->rcd[i], NULL, &npkts);
3135                         }
3136                         rmask <<= 1;
3137                 }
3138                 if (ctxtrbits) {
3139                         ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3140                                 (ctxtrbits >> QIB_I_RCVURG_LSB);
3141                         qib_handle_urcv(dd, ctxtrbits);
3142                 }
3143         }
3144 
3145         if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3146                 sdma_7322_intr(dd, istat);
3147 
3148         if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3149                 qib_ib_piobufavail(dd);
3150 
3151         ret = IRQ_HANDLED;
3152 bail:
3153         return ret;
3154 }
3155 
3156 /*
3157  * Dedicated receive packet available interrupt handler.
3158  */
3159 static irqreturn_t qib_7322pintr(int irq, void *data)
3160 {
3161         struct qib_ctxtdata *rcd = data;
3162         struct qib_devdata *dd = rcd->dd;
3163         u32 npkts;
3164 
3165         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3166                 /*
3167                  * This return value is not great, but we do not want the
3168                  * interrupt core code to remove our interrupt handler
3169                  * because we don't appear to be handling an interrupt
3170                  * during a chip reset.
3171                  */
3172                 return IRQ_HANDLED;
3173 
3174         this_cpu_inc(*dd->int_counter);
3175 
3176         /* Clear the interrupt bit we expect to be set. */
3177         qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3178                        (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3179 
3180         qib_kreceive(rcd, NULL, &npkts);
3181 
3182         return IRQ_HANDLED;
3183 }
3184 
3185 /*
3186  * Dedicated Send buffer available interrupt handler.
3187  */
3188 static irqreturn_t qib_7322bufavail(int irq, void *data)
3189 {
3190         struct qib_devdata *dd = data;
3191 
3192         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3193                 /*
3194                  * This return value is not great, but we do not want the
3195                  * interrupt core code to remove our interrupt handler
3196                  * because we don't appear to be handling an interrupt
3197                  * during a chip reset.
3198                  */
3199                 return IRQ_HANDLED;
3200 
3201         this_cpu_inc(*dd->int_counter);
3202 
3203         /* Clear the interrupt bit we expect to be set. */
3204         qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3205 
3206         /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3207         if (dd->flags & QIB_INITTED)
3208                 qib_ib_piobufavail(dd);
3209         else
3210                 qib_wantpiobuf_7322_intr(dd, 0);
3211 
3212         return IRQ_HANDLED;
3213 }
3214 
3215 /*
3216  * Dedicated Send DMA interrupt handler.
3217  */
3218 static irqreturn_t sdma_intr(int irq, void *data)
3219 {
3220         struct qib_pportdata *ppd = data;
3221         struct qib_devdata *dd = ppd->dd;
3222 
3223         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3224                 /*
3225                  * This return value is not great, but we do not want the
3226                  * interrupt core code to remove our interrupt handler
3227                  * because we don't appear to be handling an interrupt
3228                  * during a chip reset.
3229                  */
3230                 return IRQ_HANDLED;
3231 
3232         this_cpu_inc(*dd->int_counter);
3233 
3234         /* Clear the interrupt bit we expect to be set. */
3235         qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3236                        INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3237         qib_sdma_intr(ppd);
3238 
3239         return IRQ_HANDLED;
3240 }
3241 
3242 /*
3243  * Dedicated Send DMA idle interrupt handler.
3244  */
3245 static irqreturn_t sdma_idle_intr(int irq, void *data)
3246 {
3247         struct qib_pportdata *ppd = data;
3248         struct qib_devdata *dd = ppd->dd;
3249 
3250         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3251                 /*
3252                  * This return value is not great, but we do not want the
3253                  * interrupt core code to remove our interrupt handler
3254                  * because we don't appear to be handling an interrupt
3255                  * during a chip reset.
3256                  */
3257                 return IRQ_HANDLED;
3258 
3259         this_cpu_inc(*dd->int_counter);
3260 
3261         /* Clear the interrupt bit we expect to be set. */
3262         qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3263                        INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3264         qib_sdma_intr(ppd);
3265 
3266         return IRQ_HANDLED;
3267 }
3268 
3269 /*
3270  * Dedicated Send DMA progress interrupt handler.
3271  */
3272 static irqreturn_t sdma_progress_intr(int irq, void *data)
3273 {
3274         struct qib_pportdata *ppd = data;
3275         struct qib_devdata *dd = ppd->dd;
3276 
3277         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3278                 /*
3279                  * This return value is not great, but we do not want the
3280                  * interrupt core code to remove our interrupt handler
3281                  * because we don't appear to be handling an interrupt
3282                  * during a chip reset.
3283                  */
3284                 return IRQ_HANDLED;
3285 
3286         this_cpu_inc(*dd->int_counter);
3287 
3288         /* Clear the interrupt bit we expect to be set. */
3289         qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3290                        INT_MASK_P(SDmaProgress, 1) :
3291                        INT_MASK_P(SDmaProgress, 0));
3292         qib_sdma_intr(ppd);
3293 
3294         return IRQ_HANDLED;
3295 }
3296 
3297 /*
3298  * Dedicated Send DMA cleanup interrupt handler.
3299  */
3300 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3301 {
3302         struct qib_pportdata *ppd = data;
3303         struct qib_devdata *dd = ppd->dd;
3304 
3305         if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3306                 /*
3307                  * This return value is not great, but we do not want the
3308                  * interrupt core code to remove our interrupt handler
3309                  * because we don't appear to be handling an interrupt
3310                  * during a chip reset.
3311                  */
3312                 return IRQ_HANDLED;
3313 
3314         this_cpu_inc(*dd->int_counter);
3315 
3316         /* Clear the interrupt bit we expect to be set. */
3317         qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3318                        INT_MASK_PM(SDmaCleanupDone, 1) :
3319                        INT_MASK_PM(SDmaCleanupDone, 0));
3320         qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3321 
3322         return IRQ_HANDLED;
3323 }
3324 
3325 #ifdef CONFIG_INFINIBAND_QIB_DCA
3326 
3327 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3328 {
3329         if (!dd->cspec->msix_entries[msixnum].dca)
3330                 return;
3331 
3332         qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3333                     dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3334         irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3335         dd->cspec->msix_entries[msixnum].notifier = NULL;
3336 }
3337 
3338 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3339 {
3340         struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3341         struct qib_irq_notify *n;
3342 
3343         if (!m->dca)
3344                 return;
3345         n = kzalloc(sizeof(*n), GFP_KERNEL);
3346         if (n) {
3347                 int ret;
3348 
3349                 m->notifier = n;
3350                 n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3351                 n->notify.notify = qib_irq_notifier_notify;
3352                 n->notify.release = qib_irq_notifier_release;
3353                 n->arg = m->arg;
3354                 n->rcv = m->rcv;
3355                 qib_devinfo(dd->pcidev,
3356                         "set notifier irq %d rcv %d notify %p\n",
3357                         n->notify.irq, n->rcv, &n->notify);
3358                 ret = irq_set_affinity_notifier(
3359                                 n->notify.irq,
3360                                 &n->notify);
3361                 if (ret) {
3362                         m->notifier = NULL;
3363                         kfree(n);
3364                 }
3365         }
3366 }
3367 
3368 #endif
3369 
3370 /*
3371  * Set up our chip-specific interrupt handler.
3372  * The interrupt type has already been setup, so
3373  * we just need to do the registration and error checking.
3374  * If we are using MSIx interrupts, we may fall back to
3375  * INTx later, if the interrupt handler doesn't get called
3376  * within 1/2 second (see verify_interrupt()).
3377  */
3378 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3379 {
3380         int ret, i, msixnum;
3381         u64 redirect[6];
3382         u64 mask;
3383         const struct cpumask *local_mask;
3384         int firstcpu, secondcpu = 0, currrcvcpu = 0;
3385 
3386         if (!dd->num_pports)
3387                 return;
3388 
3389         if (clearpend) {
3390                 /*
3391                  * if not switching interrupt types, be sure interrupts are
3392                  * disabled, and then clear anything pending at this point,
3393                  * because we are starting clean.
3394                  */
3395                 qib_7322_set_intr_state(dd, 0);
3396 
3397                 /* clear the reset error, init error/hwerror mask */
3398                 qib_7322_init_hwerrors(dd);
3399 
3400                 /* clear any interrupt bits that might be set */
3401                 qib_write_kreg(dd, kr_intclear, ~0ULL);
3402 
3403                 /* make sure no pending MSIx intr, and clear diag reg */
3404                 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3405                 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3406         }
3407 
3408         if (!dd->cspec->num_msix_entries) {
3409                 /* Try to get INTx interrupt */
3410 try_intx:
3411                 ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3412                                       QIB_DRV_NAME);
3413                 if (ret) {
3414                         qib_dev_err(
3415                                 dd,
3416                                 "Couldn't setup INTx interrupt (irq=%d): %d\n",
3417                                 pci_irq_vector(dd->pcidev, 0), ret);
3418                         return;
3419                 }
3420                 dd->cspec->main_int_mask = ~0ULL;
3421                 return;
3422         }
3423 
3424         /* Try to get MSIx interrupts */
3425         memset(redirect, 0, sizeof(redirect));
3426         mask = ~0ULL;
3427         msixnum = 0;
3428         local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3429         firstcpu = cpumask_first(local_mask);
3430         if (firstcpu >= nr_cpu_ids ||
3431                         cpumask_weight(local_mask) == num_online_cpus()) {
3432                 local_mask = topology_core_cpumask(0);
3433                 firstcpu = cpumask_first(local_mask);
3434         }
3435         if (firstcpu < nr_cpu_ids) {
3436                 secondcpu = cpumask_next(firstcpu, local_mask);
3437                 if (secondcpu >= nr_cpu_ids)
3438                         secondcpu = firstcpu;
3439                 currrcvcpu = secondcpu;
3440         }
3441         for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3442                 irq_handler_t handler;
3443                 void *arg;
3444                 int lsb, reg, sh;
3445 #ifdef CONFIG_INFINIBAND_QIB_DCA
3446                 int dca = 0;
3447 #endif
3448                 if (i < ARRAY_SIZE(irq_table)) {
3449                         if (irq_table[i].port) {
3450                                 /* skip if for a non-configured port */
3451                                 if (irq_table[i].port > dd->num_pports)
3452                                         continue;
3453                                 arg = dd->pport + irq_table[i].port - 1;
3454                         } else
3455                                 arg = dd;
3456 #ifdef CONFIG_INFINIBAND_QIB_DCA
3457                         dca = irq_table[i].dca;
3458 #endif
3459                         lsb = irq_table[i].lsb;
3460                         handler = irq_table[i].handler;
3461                         ret = pci_request_irq(dd->pcidev, msixnum, handler,
3462                                               NULL, arg, QIB_DRV_NAME "%d%s",
3463                                               dd->unit,
3464                                               irq_table[i].name);
3465                 } else {
3466                         unsigned ctxt;
3467 
3468                         ctxt = i - ARRAY_SIZE(irq_table);
3469                         /* per krcvq context receive interrupt */
3470                         arg = dd->rcd[ctxt];
3471                         if (!arg)
3472                                 continue;
3473                         if (qib_krcvq01_no_msi && ctxt < 2)
3474                                 continue;
3475 #ifdef CONFIG_INFINIBAND_QIB_DCA
3476                         dca = 1;
3477 #endif
3478                         lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3479                         handler = qib_7322pintr;
3480                         ret = pci_request_irq(dd->pcidev, msixnum, handler,
3481                                               NULL, arg,
3482                                               QIB_DRV_NAME "%d (kctx)",
3483                                               dd->unit);
3484                 }
3485 
3486                 if (ret) {
3487                         /*
3488                          * Shouldn't happen since the enable said we could
3489                          * have as many as we are trying to setup here.
3490                          */
3491                         qib_dev_err(dd,
3492                                     "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3493                                     msixnum,
3494                                     pci_irq_vector(dd->pcidev, msixnum),
3495                                     ret);
3496                         qib_7322_free_irq(dd);
3497                         pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3498                                               PCI_IRQ_LEGACY);
3499                         goto try_intx;
3500                 }
3501                 dd->cspec->msix_entries[msixnum].arg = arg;
3502 #ifdef CONFIG_INFINIBAND_QIB_DCA
3503                 dd->cspec->msix_entries[msixnum].dca = dca;
3504                 dd->cspec->msix_entries[msixnum].rcv =
3505                         handler == qib_7322pintr;
3506 #endif
3507                 if (lsb >= 0) {
3508                         reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3509                         sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3510                                 SYM_LSB(IntRedirect0, vec1);
3511                         mask &= ~(1ULL << lsb);
3512                         redirect[reg] |= ((u64) msixnum) << sh;
3513                 }
3514                 qib_read_kreg64(dd, 2 * msixnum + 1 +
3515                                 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3516                 if (firstcpu < nr_cpu_ids &&
3517                         zalloc_cpumask_var(
3518                                 &dd->cspec->msix_entries[msixnum].mask,
3519                                 GFP_KERNEL)) {
3520                         if (handler == qib_7322pintr) {
3521                                 cpumask_set_cpu(currrcvcpu,
3522                                         dd->cspec->msix_entries[msixnum].mask);
3523                                 currrcvcpu = cpumask_next(currrcvcpu,
3524                                         local_mask);
3525                                 if (currrcvcpu >= nr_cpu_ids)
3526                                         currrcvcpu = secondcpu;
3527                         } else {
3528                                 cpumask_set_cpu(firstcpu,
3529                                         dd->cspec->msix_entries[msixnum].mask);
3530                         }
3531                         irq_set_affinity_hint(
3532                                 pci_irq_vector(dd->pcidev, msixnum),
3533                                 dd->cspec->msix_entries[msixnum].mask);
3534                 }
3535                 msixnum++;
3536         }
3537         /* Initialize the vector mapping */
3538         for (i = 0; i < ARRAY_SIZE(redirect); i++)
3539                 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3540         dd->cspec->main_int_mask = mask;
3541         tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3542                 (unsigned long)dd);
3543 }
3544 
3545 /**
3546  * qib_7322_boardname - fill in the board name and note features
3547  * @dd: the qlogic_ib device
3548  *
3549  * info will be based on the board revision register
3550  */
3551 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3552 {
3553         /* Will need enumeration of board-types here */
3554         u32 boardid;
3555         unsigned int features = DUAL_PORT_CAP;
3556 
3557         boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3558 
3559         switch (boardid) {
3560         case 0:
3561                 dd->boardname = "InfiniPath_QLE7342_Emulation";
3562                 break;
3563         case 1:
3564                 dd->boardname = "InfiniPath_QLE7340";
3565                 dd->flags |= QIB_HAS_QSFP;
3566                 features = PORT_SPD_CAP;
3567                 break;
3568         case 2:
3569                 dd->boardname = "InfiniPath_QLE7342";
3570                 dd->flags |= QIB_HAS_QSFP;
3571                 break;
3572         case 3:
3573                 dd->boardname = "InfiniPath_QMI7342";
3574                 break;
3575         case 4:
3576                 dd->boardname = "InfiniPath_Unsupported7342";
3577                 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3578                 features = 0;
3579                 break;
3580         case BOARD_QMH7342:
3581                 dd->boardname = "InfiniPath_QMH7342";
3582                 features = 0x24;
3583                 break;
3584         case BOARD_QME7342:
3585                 dd->boardname = "InfiniPath_QME7342";
3586                 break;
3587         case 8:
3588                 dd->boardname = "InfiniPath_QME7362";
3589                 dd->flags |= QIB_HAS_QSFP;
3590                 break;
3591         case BOARD_QMH7360:
3592                 dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3593                 dd->flags |= QIB_HAS_QSFP;
3594                 break;
3595         case 15:
3596                 dd->boardname = "InfiniPath_QLE7342_TEST";
3597                 dd->flags |= QIB_HAS_QSFP;
3598                 break;
3599         default:
3600                 dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3601                 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3602                 break;
3603         }
3604         dd->board_atten = 1; /* index into txdds_Xdr */
3605 
3606         snprintf(dd->boardversion, sizeof(dd->boardversion),
3607                  "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3608                  QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3609                  (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3610                  dd->majrev, dd->minrev,
3611                  (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3612 
3613         if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3614                 qib_devinfo(dd->pcidev,
3615                             "IB%u: Forced to single port mode by module parameter\n",
3616                             dd->unit);
3617                 features &= PORT_SPD_CAP;
3618         }
3619 
3620         return features;
3621 }
3622 
3623 /*
3624  * This routine sleeps, so it can only be called from user context, not
3625  * from interrupt context.
3626  */
3627 static int qib_do_7322_reset(struct qib_devdata *dd)
3628 {
3629         u64 val;
3630         u64 *msix_vecsave = NULL;
3631         int i, msix_entries, ret = 1;
3632         u16 cmdval;
3633         u8 int_line, clinesz;
3634         unsigned long flags;
3635 
3636         /* Use dev_err so it shows up in logs, etc. */
3637         qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3638 
3639         qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3640 
3641         msix_entries = dd->cspec->num_msix_entries;
3642 
3643         /* no interrupts till re-initted */
3644         qib_7322_set_intr_state(dd, 0);
3645 
3646         qib_7322_free_irq(dd);
3647 
3648         if (msix_entries) {
3649                 /* can be up to 512 bytes, too big for stack */
3650                 msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
3651                                              sizeof(u64),
3652                                              GFP_KERNEL);
3653         }
3654 
3655         /*
3656          * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3657          * info that is set up by the BIOS, so we have to save and restore
3658          * it ourselves.   There is some risk something could change it,
3659          * after we save it, but since we have disabled the MSIx, it
3660          * shouldn't be touched...
3661          */
3662         for (i = 0; i < msix_entries; i++) {
3663                 u64 vecaddr, vecdata;
3664 
3665                 vecaddr = qib_read_kreg64(dd, 2 * i +
3666                                   (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3667                 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3668                                   (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3669                 if (msix_vecsave) {
3670                         msix_vecsave[2 * i] = vecaddr;
3671                         /* save it without the masked bit set */
3672                         msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3673                 }
3674         }
3675 
3676         dd->pport->cpspec->ibdeltainprog = 0;
3677         dd->pport->cpspec->ibsymdelta = 0;
3678         dd->pport->cpspec->iblnkerrdelta = 0;
3679         dd->pport->cpspec->ibmalfdelta = 0;
3680         /* so we check interrupts work again */
3681         dd->z_int_counter = qib_int_counter(dd);
3682 
3683         /*
3684          * Keep chip from being accessed until we are ready.  Use
3685          * writeq() directly, to allow the write even though QIB_PRESENT
3686          * isn't set.
3687          */
3688         dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3689         dd->flags |= QIB_DOING_RESET;
3690         val = dd->control | QLOGIC_IB_C_RESET;
3691         writeq(val, &dd->kregbase[kr_control]);
3692 
3693         for (i = 1; i <= 5; i++) {
3694                 /*
3695                  * Allow MBIST, etc. to complete; longer on each retry.
3696                  * We sometimes get machine checks from bus timeout if no
3697                  * response, so for now, make it *really* long.
3698                  */
3699                 msleep(1000 + (1 + i) * 3000);
3700 
3701                 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3702 
3703                 /*
3704                  * Use readq directly, so we don't need to mark it as PRESENT
3705                  * until we get a successful indication that all is well.
3706                  */
3707                 val = readq(&dd->kregbase[kr_revision]);
3708                 if (val == dd->revision)
3709                         break;
3710                 if (i == 5) {
3711                         qib_dev_err(dd,
3712                                 "Failed to initialize after reset, unusable\n");
3713                         ret = 0;
3714                         goto  bail;
3715                 }
3716         }
3717 
3718         dd->flags |= QIB_PRESENT; /* it's back */
3719 
3720         if (msix_entries) {
3721                 /* restore the MSIx vector address and data if saved above */
3722                 for (i = 0; i < msix_entries; i++) {
3723                         if (!msix_vecsave || !msix_vecsave[2 * i])
3724                                 continue;
3725                         qib_write_kreg(dd, 2 * i +
3726                                 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3727                                 msix_vecsave[2 * i]);
3728                         qib_write_kreg(dd, 1 + 2 * i +
3729                                 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3730                                 msix_vecsave[1 + 2 * i]);
3731                 }
3732         }
3733 
3734         /* initialize the remaining registers.  */
3735         for (i = 0; i < dd->num_pports; ++i)
3736                 write_7322_init_portregs(&dd->pport[i]);
3737         write_7322_initregs(dd);
3738 
3739         if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3740                 qib_dev_err(dd,
3741                         "Reset failed to setup PCIe or interrupts; continuing anyway\n");
3742 
3743         dd->cspec->num_msix_entries = msix_entries;
3744         qib_setup_7322_interrupt(dd, 1);
3745 
3746         for (i = 0; i < dd->num_pports; ++i) {
3747                 struct qib_pportdata *ppd = &dd->pport[i];
3748 
3749                 spin_lock_irqsave(&ppd->lflags_lock, flags);
3750                 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3751                 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3752                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3753         }
3754 
3755 bail:
3756         dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3757         kfree(msix_vecsave);
3758         return ret;
3759 }
3760 
3761 /**
3762  * qib_7322_put_tid - write a TID to the chip
3763  * @dd: the qlogic_ib device
3764  * @tidptr: pointer to the expected TID (in chip) to update
3765  * @tidtype: 0 for eager, 1 for expected
3766  * @pa: physical address of in memory buffer; tidinvalid if freeing
3767  */
3768 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3769                              u32 type, unsigned long pa)
3770 {
3771         if (!(dd->flags & QIB_PRESENT))
3772                 return;
3773         if (pa != dd->tidinvalid) {
3774                 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3775 
3776                 /* paranoia checks */
3777                 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3778                         qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3779                                     pa);
3780                         return;
3781                 }
3782                 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3783                         qib_dev_err(dd,
3784                                 "Physical page address 0x%lx larger than supported\n",
3785                                 pa);
3786                         return;
3787                 }
3788 
3789                 if (type == RCVHQ_RCV_TYPE_EAGER)
3790                         chippa |= dd->tidtemplate;
3791                 else /* for now, always full 4KB page */
3792                         chippa |= IBA7322_TID_SZ_4K;
3793                 pa = chippa;
3794         }
3795         writeq(pa, tidptr);
3796 }
3797 
3798 /**
3799  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3800  * @dd: the qlogic_ib device
3801  * @ctxt: the ctxt
3802  *
3803  * clear all TID entries for a ctxt, expected and eager.
3804  * Used from qib_close().
3805  */
3806 static void qib_7322_clear_tids(struct qib_devdata *dd,
3807                                 struct qib_ctxtdata *rcd)
3808 {
3809         u64 __iomem *tidbase;
3810         unsigned long tidinv;
3811         u32 ctxt;
3812         int i;
3813 
3814         if (!dd->kregbase || !rcd)
3815                 return;
3816 
3817         ctxt = rcd->ctxt;
3818 
3819         tidinv = dd->tidinvalid;
3820         tidbase = (u64 __iomem *)
3821                 ((char __iomem *) dd->kregbase +
3822                  dd->rcvtidbase +
3823                  ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3824 
3825         for (i = 0; i < dd->rcvtidcnt; i++)
3826                 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3827                                  tidinv);
3828 
3829         tidbase = (u64 __iomem *)
3830                 ((char __iomem *) dd->kregbase +
3831                  dd->rcvegrbase +
3832                  rcd->rcvegr_tid_base * sizeof(*tidbase));
3833 
3834         for (i = 0; i < rcd->rcvegrcnt; i++)
3835                 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3836                                  tidinv);
3837 }
3838 
3839 /**
3840  * qib_7322_tidtemplate - setup constants for TID updates
3841  * @dd: the qlogic_ib device
3842  *
3843  * We setup stuff that we use a lot, to avoid calculating each time
3844  */
3845 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3846 {
3847         /*
3848          * For now, we always allocate 4KB buffers (at init) so we can
3849          * receive max size packets.  We may want a module parameter to
3850          * specify 2KB or 4KB and/or make it per port instead of per device
3851          * for those who want to reduce memory footprint.  Note that the
3852          * rcvhdrentsize size must be large enough to hold the largest
3853          * IB header (currently 96 bytes) that we expect to handle (plus of
3854          * course the 2 dwords of RHF).
3855          */
3856         if (dd->rcvegrbufsize == 2048)
3857                 dd->tidtemplate = IBA7322_TID_SZ_2K;
3858         else if (dd->rcvegrbufsize == 4096)
3859                 dd->tidtemplate = IBA7322_TID_SZ_4K;
3860         dd->tidinvalid = 0;
3861 }
3862 
3863 /**
3864  * qib_init_7322_get_base_info - set chip-specific flags for user code
3865  * @rcd: the qlogic_ib ctxt
3866  * @kbase: qib_base_info pointer
3867  *
3868  * We set the PCIE flag because the lower bandwidth on PCIe vs
3869  * HyperTransport can affect some user packet algorithims.
3870  */
3871 
3872 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3873                                   struct qib_base_info *kinfo)
3874 {
3875         kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3876                 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3877                 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3878         if (rcd->dd->cspec->r1)
3879                 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3880         if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3881                 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3882 
3883         return 0;
3884 }
3885 
3886 static struct qib_message_header *
3887 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3888 {
3889         u32 offset = qib_hdrget_offset(rhf_addr);
3890 
3891         return (struct qib_message_header *)
3892                 (rhf_addr - dd->rhf_offset + offset);
3893 }
3894 
3895 /*
3896  * Configure number of contexts.
3897  */
3898 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3899 {
3900         unsigned long flags;
3901         u32 nchipctxts;
3902 
3903         nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3904         dd->cspec->numctxts = nchipctxts;
3905         if (qib_n_krcv_queues > 1 && dd->num_pports) {
3906                 dd->first_user_ctxt = NUM_IB_PORTS +
3907                         (qib_n_krcv_queues - 1) * dd->num_pports;
3908                 if (dd->first_user_ctxt > nchipctxts)
3909                         dd->first_user_ctxt = nchipctxts;
3910                 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3911         } else {
3912                 dd->first_user_ctxt = NUM_IB_PORTS;
3913                 dd->n_krcv_queues = 1;
3914         }
3915 
3916         if (!qib_cfgctxts) {
3917                 int nctxts = dd->first_user_ctxt + num_online_cpus();
3918 
3919                 if (nctxts <= 6)
3920                         dd->ctxtcnt = 6;
3921                 else if (nctxts <= 10)
3922                         dd->ctxtcnt = 10;
3923                 else if (nctxts <= nchipctxts)
3924                         dd->ctxtcnt = nchipctxts;
3925         } else if (qib_cfgctxts < dd->num_pports)
3926                 dd->ctxtcnt = dd->num_pports;
3927         else if (qib_cfgctxts <= nchipctxts)
3928                 dd->ctxtcnt = qib_cfgctxts;
3929         if (!dd->ctxtcnt) /* none of the above, set to max */
3930                 dd->ctxtcnt = nchipctxts;
3931 
3932         /*
3933          * Chip can be configured for 6, 10, or 18 ctxts, and choice
3934          * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3935          * Lock to be paranoid about later motion, etc.
3936          */
3937         spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3938         if (dd->ctxtcnt > 10)
3939                 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3940         else if (dd->ctxtcnt > 6)
3941                 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3942         /* else configure for default 6 receive ctxts */
3943 
3944         /* The XRC opcode is 5. */
3945         dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3946 
3947         /*
3948          * RcvCtrl *must* be written here so that the
3949          * chip understands how to change rcvegrcnt below.
3950          */
3951         qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3952         spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3953 
3954         /* kr_rcvegrcnt changes based on the number of contexts enabled */
3955         dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3956         if (qib_rcvhdrcnt)
3957                 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3958         else
3959                 dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3960                                     dd->num_pports > 1 ? 1024U : 2048U);
3961 }
3962 
3963 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3964 {
3965 
3966         int lsb, ret = 0;
3967         u64 maskr; /* right-justified mask */
3968 
3969         switch (which) {
3970 
3971         case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3972                 ret = ppd->link_width_enabled;
3973                 goto done;
3974 
3975         case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3976                 ret = ppd->link_width_active;
3977                 goto done;
3978 
3979         case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3980                 ret = ppd->link_speed_enabled;
3981                 goto done;
3982 
3983         case QIB_IB_CFG_SPD: /* Get current Link spd */
3984                 ret = ppd->link_speed_active;
3985                 goto done;
3986 
3987         case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3988                 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3989                 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3990                 break;
3991 
3992         case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3993                 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3994                 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3995                 break;
3996 
3997         case QIB_IB_CFG_LINKLATENCY:
3998                 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3999                         SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4000                 goto done;
4001 
4002         case QIB_IB_CFG_OP_VLS:
4003                 ret = ppd->vls_operational;
4004                 goto done;
4005 
4006         case QIB_IB_CFG_VL_HIGH_CAP:
4007                 ret = 16;
4008                 goto done;
4009 
4010         case QIB_IB_CFG_VL_LOW_CAP:
4011                 ret = 16;
4012                 goto done;
4013 
4014         case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4015                 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4016                                 OverrunThreshold);
4017                 goto done;
4018 
4019         case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4020                 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4021                                 PhyerrThreshold);
4022                 goto done;
4023 
4024         case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4025                 /* will only take effect when the link state changes */
4026                 ret = (ppd->cpspec->ibcctrl_a &
4027                        SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4028                         IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4029                 goto done;
4030 
4031         case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4032                 lsb = IBA7322_IBC_HRTBT_LSB;
4033                 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4034                 break;
4035 
4036         case QIB_IB_CFG_PMA_TICKS:
4037                 /*
4038                  * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4039                  * Since the clock is always 250MHz, the value is 3, 1 or 0.
4040                  */
4041                 if (ppd->link_speed_active == QIB_IB_QDR)
4042                         ret = 3;
4043                 else if (ppd->link_speed_active == QIB_IB_DDR)
4044                         ret = 1;
4045                 else
4046                         ret = 0;
4047                 goto done;
4048 
4049         default:
4050                 ret = -EINVAL;
4051                 goto done;
4052         }
4053         ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4054 done:
4055         return ret;
4056 }
4057 
4058 /*
4059  * Below again cribbed liberally from older version. Do not lean
4060  * heavily on it.
4061  */
4062 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4063 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4064         | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4065 
4066 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4067 {
4068         struct qib_devdata *dd = ppd->dd;
4069         u64 maskr; /* right-justified mask */
4070         int lsb, ret = 0;
4071         u16 lcmd, licmd;
4072         unsigned long flags;
4073 
4074         switch (which) {
4075         case QIB_IB_CFG_LIDLMC:
4076                 /*
4077                  * Set LID and LMC. Combined to avoid possible hazard
4078                  * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4079                  */
4080                 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4081                 maskr = IBA7322_IBC_DLIDLMC_MASK;
4082                 /*
4083                  * For header-checking, the SLID in the packet will
4084                  * be masked with SendIBSLMCMask, and compared
4085                  * with SendIBSLIDAssignMask. Make sure we do not
4086                  * set any bits not covered by the mask, or we get
4087                  * false-positives.
4088                  */
4089                 qib_write_kreg_port(ppd, krp_sendslid,
4090                                     val & (val >> 16) & SendIBSLIDAssignMask);
4091                 qib_write_kreg_port(ppd, krp_sendslidmask,
4092                                     (val >> 16) & SendIBSLMCMask);
4093                 break;
4094 
4095         case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4096                 ppd->link_width_enabled = val;
4097                 /* convert IB value to chip register value */
4098                 if (val == IB_WIDTH_1X)
4099                         val = 0;
4100                 else if (val == IB_WIDTH_4X)
4101                         val = 1;
4102                 else
4103                         val = 3;
4104                 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4105                 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4106                 break;
4107 
4108         case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4109                 /*
4110                  * As with width, only write the actual register if the
4111                  * link is currently down, otherwise takes effect on next
4112                  * link change.  Since setting is being explicitly requested
4113                  * (via MAD or sysfs), clear autoneg failure status if speed
4114                  * autoneg is enabled.
4115                  */
4116                 ppd->link_speed_enabled = val;
4117                 val <<= IBA7322_IBC_SPEED_LSB;
4118                 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4119                         IBA7322_IBC_MAX_SPEED_MASK;
4120                 if (val & (val - 1)) {
4121                         /* Muliple speeds enabled */
4122                         val |= IBA7322_IBC_IBTA_1_2_MASK |
4123                                 IBA7322_IBC_MAX_SPEED_MASK;
4124                         spin_lock_irqsave(&ppd->lflags_lock, flags);
4125                         ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4126                         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4127                 } else if (val & IBA7322_IBC_SPEED_QDR)
4128                         val |= IBA7322_IBC_IBTA_1_2_MASK;
4129                 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
4130                 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4131                 break;
4132 
4133         case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4134                 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4135                 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4136                 break;
4137 
4138         case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4139                 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4140                 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4141                 break;
4142 
4143         case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4144                 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4145                                   OverrunThreshold);
4146                 if (maskr != val) {
4147                         ppd->cpspec->ibcctrl_a &=
4148                                 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4149                         ppd->cpspec->ibcctrl_a |= (u64) val <<
4150                                 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4151                         qib_write_kreg_port(ppd, krp_ibcctrl_a,
4152                                             ppd->cpspec->ibcctrl_a);
4153                         qib_write_kreg(dd, kr_scratch, 0ULL);
4154                 }
4155                 goto bail;
4156 
4157         case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4158                 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4159                                   PhyerrThreshold);
4160                 if (maskr != val) {
4161                         ppd->cpspec->ibcctrl_a &=
4162                                 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4163                         ppd->cpspec->ibcctrl_a |= (u64) val <<
4164                                 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4165                         qib_write_kreg_port(ppd, krp_ibcctrl_a,
4166                                             ppd->cpspec->ibcctrl_a);
4167                         qib_write_kreg(dd, kr_scratch, 0ULL);
4168                 }
4169                 goto bail;
4170 
4171         case QIB_IB_CFG_PKEYS: /* update pkeys */
4172                 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4173                         ((u64) ppd->pkeys[2] << 32) |
4174                         ((u64) ppd->pkeys[3] << 48);
4175                 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4176                 goto bail;
4177 
4178         case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4179                 /* will only take effect when the link state changes */
4180                 if (val == IB_LINKINITCMD_POLL)
4181                         ppd->cpspec->ibcctrl_a &=
4182                                 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4183                 else /* SLEEP */
4184                         ppd->cpspec->ibcctrl_a |=
4185                                 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4186                 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4187                 qib_write_kreg(dd, kr_scratch, 0ULL);
4188                 goto bail;
4189 
4190         case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4191                 /*
4192                  * Update our housekeeping variables, and set IBC max
4193                  * size, same as init code; max IBC is max we allow in
4194                  * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4195                  * Set even if it's unchanged, print debug message only
4196                  * on changes.
4197                  */
4198                 val = (ppd->ibmaxlen >> 2) + 1;
4199                 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4200                 ppd->cpspec->ibcctrl_a |= (u64)val <<
4201                         SYM_LSB(IBCCtrlA_0, MaxPktLen);
4202                 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4203                                     ppd->cpspec->ibcctrl_a);
4204                 qib_write_kreg(dd, kr_scratch, 0ULL);
4205                 goto bail;
4206 
4207         case QIB_IB_CFG_LSTATE: /* set the IB link state */
4208                 switch (val & 0xffff0000) {
4209                 case IB_LINKCMD_DOWN:
4210                         lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4211                         ppd->cpspec->ibmalfusesnap = 1;
4212                         ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4213                                 crp_errlink);
4214                         if (!ppd->cpspec->ibdeltainprog &&
4215                             qib_compat_ddr_negotiate) {
4216                                 ppd->cpspec->ibdeltainprog = 1;
4217                                 ppd->cpspec->ibsymsnap =
4218                                         read_7322_creg32_port(ppd,
4219                                                               crp_ibsymbolerr);
4220                                 ppd->cpspec->iblnkerrsnap =
4221                                         read_7322_creg32_port(ppd,
4222                                                       crp_iblinkerrrecov);
4223                         }
4224                         break;
4225 
4226                 case IB_LINKCMD_ARMED:
4227                         lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4228                         if (ppd->cpspec->ibmalfusesnap) {
4229                                 ppd->cpspec->ibmalfusesnap = 0;
4230                                 ppd->cpspec->ibmalfdelta +=
4231                                         read_7322_creg32_port(ppd,
4232                                                               crp_errlink) -
4233                                         ppd->cpspec->ibmalfsnap;
4234                         }
4235                         break;
4236 
4237                 case IB_LINKCMD_ACTIVE:
4238                         lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4239                         break;
4240 
4241                 default:
4242                         ret = -EINVAL;
4243                         qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4244                         goto bail;
4245                 }
4246                 switch (val & 0xffff) {
4247                 case IB_LINKINITCMD_NOP:
4248                         licmd = 0;
4249                         break;
4250 
4251                 case IB_LINKINITCMD_POLL:
4252                         licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4253                         break;
4254 
4255                 case IB_LINKINITCMD_SLEEP:
4256                         licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4257                         break;
4258 
4259                 case IB_LINKINITCMD_DISABLE:
4260                         licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4261                         ppd->cpspec->chase_end = 0;
4262                         /*
4263                          * stop state chase counter and timer, if running.
4264                          * wait forpending timer, but don't clear .data (ppd)!
4265                          */
4266                         if (ppd->cpspec->chase_timer.expires) {
4267                                 del_timer_sync(&ppd->cpspec->chase_timer);
4268                                 ppd->cpspec->chase_timer.expires = 0;
4269                         }
4270                         break;
4271 
4272                 default:
4273                         ret = -EINVAL;
4274                         qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4275                                     val & 0xffff);
4276                         goto bail;
4277                 }
4278                 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4279                 goto bail;
4280 
4281         case QIB_IB_CFG_OP_VLS:
4282                 if (ppd->vls_operational != val) {
4283                         ppd->vls_operational = val;
4284                         set_vls(ppd);
4285                 }
4286                 goto bail;
4287 
4288         case QIB_IB_CFG_VL_HIGH_LIMIT:
4289                 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4290                 goto bail;
4291 
4292         case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4293                 if (val > 3) {
4294                         ret = -EINVAL;
4295                         goto bail;
4296                 }
4297                 lsb = IBA7322_IBC_HRTBT_LSB;
4298                 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4299                 break;
4300 
4301         case QIB_IB_CFG_PORT:
4302                 /* val is the port number of the switch we are connected to. */
4303                 if (ppd->dd->cspec->r1) {
4304                         cancel_delayed_work(&ppd->cpspec->ipg_work);
4305                         ppd->cpspec->ipg_tries = 0;
4306                 }
4307                 goto bail;
4308 
4309         default:
4310                 ret = -EINVAL;
4311                 goto bail;
4312         }
4313         ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4314         ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4315         qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4316         qib_write_kreg(dd, kr_scratch, 0);
4317 bail:
4318         return ret;
4319 }
4320 
4321 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4322 {
4323         int ret = 0;
4324         u64 val, ctrlb;
4325 
4326         /* only IBC loopback, may add serdes and xgxs loopbacks later */
4327         if (!strncmp(what, "ibc", 3)) {
4328                 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4329                                                        Loopback);
4330                 val = 0; /* disable heart beat, so link will come up */
4331                 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4332                          ppd->dd->unit, ppd->port);
4333         } else if (!strncmp(what, "off", 3)) {
4334                 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4335                                                         Loopback);
4336                 /* enable heart beat again */
4337                 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4338                 qib_devinfo(ppd->dd->pcidev,
4339                         "Disabling IB%u:%u IBC loopback (normal)\n",
4340                         ppd->dd->unit, ppd->port);
4341         } else
4342                 ret = -EINVAL;
4343         if (!ret) {
4344                 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4345                                     ppd->cpspec->ibcctrl_a);
4346                 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4347                                              << IBA7322_IBC_HRTBT_LSB);
4348                 ppd->cpspec->ibcctrl_b = ctrlb | val;
4349                 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4350                                     ppd->cpspec->ibcctrl_b);
4351                 qib_write_kreg(ppd->dd, kr_scratch, 0);
4352         }
4353         return ret;
4354 }
4355 
4356 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4357                            struct ib_vl_weight_elem *vl)
4358 {
4359         unsigned i;
4360 
4361         for (i = 0; i < 16; i++, regno++, vl++) {
4362                 u32 val = qib_read_kreg_port(ppd, regno);
4363 
4364                 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4365                         SYM_RMASK(LowPriority0_0, VirtualLane);
4366                 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4367                         SYM_RMASK(LowPriority0_0, Weight);
4368         }
4369 }
4370 
4371 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4372                            struct ib_vl_weight_elem *vl)
4373 {
4374         unsigned i;
4375 
4376         for (i = 0; i < 16; i++, regno++, vl++) {
4377                 u64 val;
4378 
4379                 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4380                         SYM_LSB(LowPriority0_0, VirtualLane)) |
4381                       ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4382                         SYM_LSB(LowPriority0_0, Weight));
4383                 qib_write_kreg_port(ppd, regno, val);
4384         }
4385         if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4386                 struct qib_devdata *dd = ppd->dd;
4387                 unsigned long flags;
4388 
4389                 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4390                 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4391                 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4392                 qib_write_kreg(dd, kr_scratch, 0);
4393                 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4394         }
4395 }
4396 
4397 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4398 {
4399         switch (which) {
4400         case QIB_IB_TBL_VL_HIGH_ARB:
4401                 get_vl_weights(ppd, krp_highprio_0, t);
4402                 break;
4403 
4404         case QIB_IB_TBL_VL_LOW_ARB:
4405                 get_vl_weights(ppd, krp_lowprio_0, t);
4406                 break;
4407 
4408         default:
4409                 return -EINVAL;
4410         }
4411         return 0;
4412 }
4413 
4414 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4415 {
4416         switch (which) {
4417         case QIB_IB_TBL_VL_HIGH_ARB:
4418                 set_vl_weights(ppd, krp_highprio_0, t);
4419                 break;
4420 
4421         case QIB_IB_TBL_VL_LOW_ARB:
4422                 set_vl_weights(ppd, krp_lowprio_0, t);
4423                 break;
4424 
4425         default:
4426                 return -EINVAL;
4427         }
4428         return 0;
4429 }
4430 
4431 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4432                                     u32 updegr, u32 egrhd, u32 npkts)
4433 {
4434         /*
4435          * Need to write timeout register before updating rcvhdrhead to ensure
4436          * that the timer is enabled on reception of a packet.
4437          */
4438         if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4439                 adjust_rcv_timeout(rcd, npkts);
4440         if (updegr)
4441                 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4442         qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4443         qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4444 }
4445 
4446 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4447 {
4448         u32 head, tail;
4449 
4450         head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4451         if (rcd->rcvhdrtail_kvaddr)
4452                 tail = qib_get_rcvhdrtail(rcd);
4453         else
4454                 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4455         return head == tail;
4456 }
4457 
4458 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4459         QIB_RCVCTRL_CTXT_DIS | \
4460         QIB_RCVCTRL_TIDFLOW_ENB | \
4461         QIB_RCVCTRL_TIDFLOW_DIS | \
4462         QIB_RCVCTRL_TAILUPD_ENB | \
4463         QIB_RCVCTRL_TAILUPD_DIS | \
4464         QIB_RCVCTRL_INTRAVAIL_ENB | \
4465         QIB_RCVCTRL_INTRAVAIL_DIS | \
4466         QIB_RCVCTRL_BP_ENB | \
4467         QIB_RCVCTRL_BP_DIS)
4468 
4469 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4470         QIB_RCVCTRL_CTXT_DIS | \
4471         QIB_RCVCTRL_PKEY_DIS | \
4472         QIB_RCVCTRL_PKEY_ENB)
4473 
4474 /*
4475  * Modify the RCVCTRL register in chip-specific way. This
4476  * is a function because bit positions and (future) register
4477  * location is chip-specifc, but the needed operations are
4478  * generic. <op> is a bit-mask because we often want to
4479  * do multiple modifications.
4480  */
4481 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4482                              int ctxt)
4483 {
4484         struct qib_devdata *dd = ppd->dd;
4485         struct qib_ctxtdata *rcd;
4486         u64 mask, val;
4487         unsigned long flags;
4488 
4489         spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4490 
4491         if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4492                 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4493         if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4494                 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4495         if (op & QIB_RCVCTRL_TAILUPD_ENB)
4496                 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4497         if (op & QIB_RCVCTRL_TAILUPD_DIS)
4498                 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4499         if (op & QIB_RCVCTRL_PKEY_ENB)
4500                 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4501         if (op & QIB_RCVCTRL_PKEY_DIS)
4502                 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4503         if (ctxt < 0) {
4504                 mask = (1ULL << dd->ctxtcnt) - 1;
4505                 rcd = NULL;
4506         } else {
4507                 mask = (1ULL << ctxt);
4508                 rcd = dd->rcd[ctxt];
4509         }
4510         if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4511                 ppd->p_rcvctrl |=
4512                         (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4513                 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4514                         op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4515                         dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4516                 }
4517                 /* Write these registers before the context is enabled. */
4518                 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4519                                     rcd->rcvhdrqtailaddr_phys);
4520                 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4521                                     rcd->rcvhdrq_phys);
4522                 rcd->seq_cnt = 1;
4523         }
4524         if (op & QIB_RCVCTRL_CTXT_DIS)
4525                 ppd->p_rcvctrl &=
4526                         ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4527         if (op & QIB_RCVCTRL_BP_ENB)
4528                 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4529         if (op & QIB_RCVCTRL_BP_DIS)
4530                 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4531         if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4532                 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4533         if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4534                 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4535         /*
4536          * Decide which registers to write depending on the ops enabled.
4537          * Special case is "flush" (no bits set at all)
4538          * which needs to write both.
4539          */
4540         if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4541                 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4542         if (op == 0 || (op & RCVCTRL_PORT_MODS))
4543                 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4544         if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4545                 /*
4546                  * Init the context registers also; if we were
4547                  * disabled, tail and head should both be zero
4548                  * already from the enable, but since we don't
4549                  * know, we have to do it explicitly.
4550                  */
4551                 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4552                 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4553 
4554                 /* be sure enabling write seen; hd/tl should be 0 */
4555                 (void) qib_read_kreg32(dd, kr_scratch);
4556                 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4557                 dd->rcd[ctxt]->head = val;
4558                 /* If kctxt, interrupt on next receive. */
4559                 if (ctxt < dd->first_user_ctxt)
4560                         val |= dd->rhdrhead_intr_off;
4561                 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4562         } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4563                 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4564                 /* arm rcv interrupt */
4565                 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4566                 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4567         }
4568         if (op & QIB_RCVCTRL_CTXT_DIS) {
4569                 unsigned f;
4570 
4571                 /* Now that the context is disabled, clear these registers. */
4572                 if (ctxt >= 0) {
4573                         qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4574                         qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4575                         for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4576                                 qib_write_ureg(dd, ur_rcvflowtable + f,
4577                                                TIDFLOW_ERRBITS, ctxt);
4578                 } else {
4579                         unsigned i;
4580 
4581                         for (i = 0; i < dd->cfgctxts; i++) {
4582                                 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4583                                                     i, 0);
4584                                 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4585                                 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4586                                         qib_write_ureg(dd, ur_rcvflowtable + f,
4587                                                        TIDFLOW_ERRBITS, i);
4588                         }
4589                 }
4590         }
4591         spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4592 }
4593 
4594 /*
4595  * Modify the SENDCTRL register in chip-specific way. This
4596  * is a function where there are multiple such registers with
4597  * slightly different layouts.
4598  * The chip doesn't allow back-to-back sendctrl writes, so write
4599  * the scratch register after writing sendctrl.
4600  *
4601  * Which register is written depends on the operation.
4602  * Most operate on the common register, while
4603  * SEND_ENB and SEND_DIS operate on the per-port ones.
4604  * SEND_ENB is included in common because it can change SPCL_TRIG
4605  */
4606 #define SENDCTRL_COMMON_MODS (\
4607         QIB_SENDCTRL_CLEAR | \
4608         QIB_SENDCTRL_AVAIL_DIS | \
4609         QIB_SENDCTRL_AVAIL_ENB | \
4610         QIB_SENDCTRL_AVAIL_BLIP | \
4611         QIB_SENDCTRL_DISARM | \
4612         QIB_SENDCTRL_DISARM_ALL | \
4613         QIB_SENDCTRL_SEND_ENB)
4614 
4615 #define SENDCTRL_PORT_MODS (\
4616         QIB_SENDCTRL_CLEAR | \
4617         QIB_SENDCTRL_SEND_ENB | \
4618         QIB_SENDCTRL_SEND_DIS | \
4619         QIB_SENDCTRL_FLUSH)
4620 
4621 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4622 {
4623         struct qib_devdata *dd = ppd->dd;
4624         u64 tmp_dd_sendctrl;
4625         unsigned long flags;
4626 
4627         spin_lock_irqsave(&dd->sendctrl_lock, flags);
4628 
4629         /* First the dd ones that are "sticky", saved in shadow */
4630         if (op & QIB_SENDCTRL_CLEAR)
4631                 dd->sendctrl = 0;
4632         if (op & QIB_SENDCTRL_AVAIL_DIS)
4633                 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4634         else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4635                 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4636                 if (dd->flags & QIB_USE_SPCL_TRIG)
4637                         dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4638         }
4639 
4640         /* Then the ppd ones that are "sticky", saved in shadow */
4641         if (op & QIB_SENDCTRL_SEND_DIS)
4642                 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4643         else if (op & QIB_SENDCTRL_SEND_ENB)
4644                 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4645 
4646         if (op & QIB_SENDCTRL_DISARM_ALL) {
4647                 u32 i, last;
4648 
4649                 tmp_dd_sendctrl = dd->sendctrl;
4650                 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4651                 /*
4652                  * Disarm any buffers that are not yet launched,
4653                  * disabling updates until done.
4654                  */
4655                 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4656                 for (i = 0; i < last; i++) {
4657                         qib_write_kreg(dd, kr_sendctrl,
4658                                        tmp_dd_sendctrl |
4659                                        SYM_MASK(SendCtrl, Disarm) | i);
4660                         qib_write_kreg(dd, kr_scratch, 0);
4661                 }
4662         }
4663 
4664         if (op & QIB_SENDCTRL_FLUSH) {
4665                 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4666 
4667                 /*
4668                  * Now drain all the fifos.  The Abort bit should never be
4669                  * needed, so for now, at least, we don't use it.
4670                  */
4671                 tmp_ppd_sendctrl |=
4672                         SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4673                         SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4674                         SYM_MASK(SendCtrl_0, TxeBypassIbc);
4675                 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4676                 qib_write_kreg(dd, kr_scratch, 0);
4677         }
4678 
4679         tmp_dd_sendctrl = dd->sendctrl;
4680 
4681         if (op & QIB_SENDCTRL_DISARM)
4682                 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4683                         ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4684                          SYM_LSB(SendCtrl, DisarmSendBuf));
4685         if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4686             (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4687                 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4688 
4689         if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4690                 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4691                 qib_write_kreg(dd, kr_scratch, 0);
4692         }
4693 
4694         if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4695                 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4696                 qib_write_kreg(dd, kr_scratch, 0);
4697         }
4698 
4699         if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4700                 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4701                 qib_write_kreg(dd, kr_scratch, 0);
4702         }
4703 
4704         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4705 
4706         if (op & QIB_SENDCTRL_FLUSH) {
4707                 u32 v;
4708                 /*
4709                  * ensure writes have hit chip, then do a few
4710                  * more reads, to allow DMA of pioavail registers
4711                  * to occur, so in-memory copy is in sync with
4712                  * the chip.  Not always safe to sleep.
4713                  */
4714                 v = qib_read_kreg32(dd, kr_scratch);
4715                 qib_write_kreg(dd, kr_scratch, v);
4716                 v = qib_read_kreg32(dd, kr_scratch);
4717                 qib_write_kreg(dd, kr_scratch, v);
4718                 qib_read_kreg32(dd, kr_scratch);
4719         }
4720 }
4721 
4722 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4723 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4724 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4725 
4726 /**
4727  * qib_portcntr_7322 - read a per-port chip counter
4728  * @ppd: the qlogic_ib pport
4729  * @creg: the counter to read (not a chip offset)
4730  */
4731 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4732 {
4733         struct qib_devdata *dd = ppd->dd;
4734         u64 ret = 0ULL;
4735         u16 creg;
4736         /* 0xffff for unimplemented or synthesized counters */
4737         static const u32 xlator[] = {
4738                 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4739                 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4740                 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4741                 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4742                 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4743                 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4744                 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4745                 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4746                 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4747                 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4748                 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4749                 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4750                 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4751                 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4752                 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4753                 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4754                 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4755                 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4756                 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4757                 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4758                 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4759                 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4760                 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4761                 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4762                 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4763                 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4764                 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4765                 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4766                 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4767                 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4768                 /*
4769                  * the next 3 aren't really counters, but were implemented
4770                  * as counters in older chips, so still get accessed as
4771                  * though they were counters from this code.
4772                  */
4773                 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4774                 [QIBPORTCNTR_PSSTART] = krp_psstart,
4775                 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4776                 /* pseudo-counter, summed for all ports */
4777                 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4778         };
4779 
4780         if (reg >= ARRAY_SIZE(xlator)) {
4781                 qib_devinfo(ppd->dd->pcidev,
4782                          "Unimplemented portcounter %u\n", reg);
4783                 goto done;
4784         }
4785         creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4786 
4787         /* handle non-counters and special cases first */
4788         if (reg == QIBPORTCNTR_KHDROVFL) {
4789                 int i;
4790 
4791                 /* sum over all kernel contexts (skip if mini_init) */
4792                 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4793                         struct qib_ctxtdata *rcd = dd->rcd[i];
4794 
4795                         if (!rcd || rcd->ppd != ppd)
4796                                 continue;
4797                         ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4798                 }
4799                 goto done;
4800         } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4801                 /*
4802                  * Used as part of the synthesis of port_rcv_errors
4803                  * in the verbs code for IBTA counters.  Not needed for 7322,
4804                  * because all the errors are already counted by other cntrs.
4805                  */
4806                 goto done;
4807         } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4808                    reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4809                 /* were counters in older chips, now per-port kernel regs */
4810                 ret = qib_read_kreg_port(ppd, creg);
4811                 goto done;
4812         }
4813 
4814         /*
4815          * Only fast increment counters are 64 bits; use 32 bit reads to
4816          * avoid two independent reads when on Opteron.
4817          */
4818         if (xlator[reg] & _PORT_64BIT_FLAG)
4819                 ret = read_7322_creg_port(ppd, creg);
4820         else
4821                 ret = read_7322_creg32_port(ppd, creg);
4822         if (creg == crp_ibsymbolerr) {
4823                 if (ppd->cpspec->ibdeltainprog)
4824                         ret -= ret - ppd->cpspec->ibsymsnap;
4825                 ret -= ppd->cpspec->ibsymdelta;
4826         } else if (creg == crp_iblinkerrrecov) {
4827                 if (ppd->cpspec->ibdeltainprog)
4828                         ret -= ret - ppd->cpspec->iblnkerrsnap;
4829                 ret -= ppd->cpspec->iblnkerrdelta;
4830         } else if (creg == crp_errlink)
4831                 ret -= ppd->cpspec->ibmalfdelta;
4832         else if (creg == crp_iblinkdown)
4833                 ret += ppd->cpspec->iblnkdowndelta;
4834 done:
4835         return ret;
4836 }
4837 
4838 /*
4839  * Device counter names (not port-specific), one line per stat,
4840  * single string.  Used by utilities like ipathstats to print the stats
4841  * in a way which works for different versions of drivers, without changing
4842  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4843  * display by utility.
4844  * Non-error counters are first.
4845  * Start of "error" conters is indicated by a leading "E " on the first
4846  * "error" counter, and doesn't count in label length.
4847  * The EgrOvfl list needs to be last so we truncate them at the configured
4848  * context count for the device.
4849  * cntr7322indices contains the corresponding register indices.
4850  */
4851 static const char cntr7322names[] =
4852         "Interrupts\n"
4853         "HostBusStall\n"
4854         "E RxTIDFull\n"
4855         "RxTIDInvalid\n"
4856         "RxTIDFloDrop\n" /* 7322 only */
4857         "Ctxt0EgrOvfl\n"
4858         "Ctxt1EgrOvfl\n"
4859         "Ctxt2EgrOvfl\n"
4860         "Ctxt3EgrOvfl\n"
4861         "Ctxt4EgrOvfl\n"
4862         "Ctxt5EgrOvfl\n"
4863         "Ctxt6EgrOvfl\n"
4864         "Ctxt7EgrOvfl\n"
4865         "Ctxt8EgrOvfl\n"
4866         "Ctxt9EgrOvfl\n"
4867         "Ctx10EgrOvfl\n"
4868         "Ctx11EgrOvfl\n"
4869         "Ctx12EgrOvfl\n"
4870         "Ctx13EgrOvfl\n"
4871         "Ctx14EgrOvfl\n"
4872         "Ctx15EgrOvfl\n"
4873         "Ctx16EgrOvfl\n"
4874         "Ctx17EgrOvfl\n"
4875         ;
4876 
4877 static const u32 cntr7322indices[] = {
4878         cr_lbint | _PORT_64BIT_FLAG,
4879         cr_lbstall | _PORT_64BIT_FLAG,
4880         cr_tidfull,
4881         cr_tidinvalid,
4882         cr_rxtidflowdrop,
4883         cr_base_egrovfl + 0,
4884         cr_base_egrovfl + 1,
4885         cr_base_egrovfl + 2,
4886         cr_base_egrovfl + 3,
4887         cr_base_egrovfl + 4,
4888         cr_base_egrovfl + 5,
4889         cr_base_egrovfl + 6,
4890         cr_base_egrovfl + 7,
4891         cr_base_egrovfl + 8,
4892         cr_base_egrovfl + 9,
4893         cr_base_egrovfl + 10,
4894         cr_base_egrovfl + 11,
4895         cr_base_egrovfl + 12,
4896         cr_base_egrovfl + 13,
4897         cr_base_egrovfl + 14,
4898         cr_base_egrovfl + 15,
4899         cr_base_egrovfl + 16,
4900         cr_base_egrovfl + 17,
4901 };
4902 
4903 /*
4904  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4905  * portcntr7322indices is somewhat complicated by some registers needing
4906  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4907  */
4908 static const char portcntr7322names[] =
4909         "TxPkt\n"
4910         "TxFlowPkt\n"
4911         "TxWords\n"
4912         "RxPkt\n"
4913         "RxFlowPkt\n"
4914         "RxWords\n"
4915         "TxFlowStall\n"
4916         "TxDmaDesc\n"  /* 7220 and 7322-only */
4917         "E RxDlidFltr\n"  /* 7220 and 7322-only */
4918         "IBStatusChng\n"
4919         "IBLinkDown\n"
4920         "IBLnkRecov\n"
4921         "IBRxLinkErr\n"
4922         "IBSymbolErr\n"
4923         "RxLLIErr\n"
4924         "RxBadFormat\n"
4925         "RxBadLen\n"
4926         "RxBufOvrfl\n"
4927         "RxEBP\n"
4928         "RxFlowCtlErr\n"
4929         "RxICRCerr\n"
4930         "RxLPCRCerr\n"
4931         "RxVCRCerr\n"
4932         "RxInvalLen\n"
4933         "RxInvalPKey\n"
4934         "RxPktDropped\n"
4935         "TxBadLength\n"
4936         "TxDropped\n"
4937         "TxInvalLen\n"
4938         "TxUnderrun\n"
4939         "TxUnsupVL\n"
4940         "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4941         "RxVL15Drop\n"
4942         "RxVlErr\n"
4943         "XcessBufOvfl\n"
4944         "RxQPBadCtxt\n" /* 7322-only from here down */
4945         "TXBadHeader\n"
4946         ;
4947 
4948 static const u32 portcntr7322indices[] = {
4949         QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4950         crp_pktsendflow,
4951         QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4952         QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4953         crp_pktrcvflowctrl,
4954         QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4955         QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4956         crp_txsdmadesc | _PORT_64BIT_FLAG,
4957         crp_rxdlidfltr,
4958         crp_ibstatuschange,
4959         QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4960         QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4961         QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4962         QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4963         QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4964         QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4965         QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4966         QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4967         QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4968         crp_rcvflowctrlviol,
4969         QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4970         QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4971         QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4972         QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4973         QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4974         QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4975         crp_txminmaxlenerr,
4976         crp_txdroppedpkt,
4977         crp_txlenerr,
4978         crp_txunderrun,
4979         crp_txunsupvl,
4980         QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4981         QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4982         QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4983         QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4984         crp_rxqpinvalidctxt,
4985         crp_txhdrerr,
4986 };
4987 
4988 /* do all the setup to make the counter reads efficient later */
4989 static void init_7322_cntrnames(struct qib_devdata *dd)
4990 {
4991         int i, j = 0;
4992         char *s;
4993 
4994         for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4995              i++) {
4996                 /* we always have at least one counter before the egrovfl */
4997                 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4998                         j = 1;
4999                 s = strchr(s + 1, '\n');
5000                 if (s && j)
5001                         j++;
5002         }
5003         dd->cspec->ncntrs = i;
5004         if (!s)
5005                 /* full list; size is without terminating null */
5006                 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5007         else
5008                 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5009         dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
5010                                          GFP_KERNEL);
5011 
5012         for (i = 0, s = (char *)portcntr7322names; s; i++)
5013                 s = strchr(s + 1, '\n');
5014         dd->cspec->nportcntrs = i - 1;
5015         dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5016         for (i = 0; i < dd->num_pports; ++i) {
5017                 dd->pport[i].cpspec->portcntrs =
5018                         kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
5019                                       GFP_KERNEL);
5020         }
5021 }
5022 
5023 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5024                               u64 **cntrp)
5025 {
5026         u32 ret;
5027 
5028         if (namep) {
5029                 ret = dd->cspec->cntrnamelen;
5030                 if (pos >= ret)
5031                         ret = 0; /* final read after getting everything */
5032                 else
5033                         *namep = (char *) cntr7322names;
5034         } else {
5035                 u64 *cntr = dd->cspec->cntrs;
5036                 int i;
5037 
5038                 ret = dd->cspec->ncntrs * sizeof(u64);
5039                 if (!cntr || pos >= ret) {
5040                         /* everything read, or couldn't get memory */
5041                         ret = 0;
5042                         goto done;
5043                 }
5044                 *cntrp = cntr;
5045                 for (i = 0; i < dd->cspec->ncntrs; i++)
5046                         if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5047                                 *cntr++ = read_7322_creg(dd,
5048                                                          cntr7322indices[i] &
5049                                                          _PORT_CNTR_IDXMASK);
5050                         else
5051                                 *cntr++ = read_7322_creg32(dd,
5052                                                            cntr7322indices[i]);
5053         }
5054 done:
5055         return ret;
5056 }
5057 
5058 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5059                                   char **namep, u64 **cntrp)
5060 {
5061         u32 ret;
5062 
5063         if (namep) {
5064                 ret = dd->cspec->portcntrnamelen;
5065                 if (pos >= ret)
5066                         ret = 0; /* final read after getting everything */
5067                 else
5068                         *namep = (char *)portcntr7322names;
5069         } else {
5070                 struct qib_pportdata *ppd = &dd->pport[port];
5071                 u64 *cntr = ppd->cpspec->portcntrs;
5072                 int i;
5073 
5074                 ret = dd->cspec->nportcntrs * sizeof(u64);
5075                 if (!cntr || pos >= ret) {
5076                         /* everything read, or couldn't get memory */
5077                         ret = 0;
5078                         goto done;
5079                 }
5080                 *cntrp = cntr;
5081                 for (i = 0; i < dd->cspec->nportcntrs; i++) {
5082                         if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5083                                 *cntr++ = qib_portcntr_7322(ppd,
5084                                         portcntr7322indices[i] &
5085                                         _PORT_CNTR_IDXMASK);
5086                         else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5087                                 *cntr++ = read_7322_creg_port(ppd,
5088                                            portcntr7322indices[i] &
5089                                             _PORT_CNTR_IDXMASK);
5090                         else
5091                                 *cntr++ = read_7322_creg32_port(ppd,
5092                                            portcntr7322indices[i]);
5093                 }
5094         }
5095 done:
5096         return ret;
5097 }
5098 
5099 /**
5100  * qib_get_7322_faststats - get word counters from chip before they overflow
5101  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5102  *
5103  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5104  * real purpose of this function is to maintain the notion of
5105  * "active time", which in turn is only logged into the eeprom,
5106  * which we don;t have, yet, for 7322-based boards.
5107  *
5108  * called from add_timer
5109  */
5110 static void qib_get_7322_faststats(struct timer_list *t)
5111 {
5112         struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5113         struct qib_pportdata *ppd;
5114         unsigned long flags;
5115         u64 traffic_wds;
5116         int pidx;
5117 
5118         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5119                 ppd = dd->pport + pidx;
5120 
5121                 /*
5122                  * If port isn't enabled or not operational ports, or
5123                  * diags is running (can cause memory diags to fail)
5124                  * skip this port this time.
5125                  */
5126                 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5127                     || dd->diag_client)
5128                         continue;
5129 
5130                 /*
5131                  * Maintain an activity timer, based on traffic
5132                  * exceeding a threshold, so we need to check the word-counts
5133                  * even if they are 64-bit.
5134                  */
5135                 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5136                         qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5137                 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5138                 traffic_wds -= ppd->dd->traffic_wds;
5139                 ppd->dd->traffic_wds += traffic_wds;
5140                 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5141                 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5142                                                 QIB_IB_QDR) &&
5143                     (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5144                                     QIBL_LINKACTIVE)) &&
5145                     ppd->cpspec->qdr_dfe_time &&
5146                     time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5147                         ppd->cpspec->qdr_dfe_on = 0;
5148 
5149                         qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5150                                             ppd->dd->cspec->r1 ?
5151                                             QDR_STATIC_ADAPT_INIT_R1 :
5152                                             QDR_STATIC_ADAPT_INIT);
5153                         force_h1(ppd);
5154                 }
5155         }
5156         mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5157 }
5158 
5159 /*
5160  * If we were using MSIx, try to fallback to INTx.
5161  */
5162 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5163 {
5164         if (!dd->cspec->num_msix_entries)
5165                 return 0; /* already using INTx */
5166 
5167         qib_devinfo(dd->pcidev,
5168                 "MSIx interrupt not detected, trying INTx interrupts\n");
5169         qib_7322_free_irq(dd);
5170         if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5171                 qib_dev_err(dd, "Failed to enable INTx\n");
5172         qib_setup_7322_interrupt(dd, 0);
5173         return 1;
5174 }
5175 
5176 /*
5177  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5178  * than resetting the IBC or external link state, and useful in some
5179  * cases to cause some retraining.  To do this right, we reset IBC
5180  * as well, then return to previous state (which may be still in reset)
5181  * NOTE: some callers of this "know" this writes the current value
5182  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5183  * check all callers.
5184  */
5185 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5186 {
5187         u64 val;
5188         struct qib_devdata *dd = ppd->dd;
5189         const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5190                 SYM_MASK(IBPCSConfig_0, xcv_treset) |
5191                 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5192 
5193         val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5194         qib_write_kreg(dd, kr_hwerrmask,
5195                        dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5196         qib_write_kreg_port(ppd, krp_ibcctrl_a,
5197                             ppd->cpspec->ibcctrl_a &
5198                             ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5199 
5200         qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5201         qib_read_kreg32(dd, kr_scratch);
5202         qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5203         qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5204         qib_write_kreg(dd, kr_scratch, 0ULL);
5205         qib_write_kreg(dd, kr_hwerrclear,
5206                        SYM_MASK(HwErrClear, statusValidNoEopClear));
5207         qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5208 }
5209 
5210 /*
5211  * This code for non-IBTA-compliant IB speed negotiation is only known to
5212  * work for the SDR to DDR transition, and only between an HCA and a switch
5213  * with recent firmware.  It is based on observed heuristics, rather than
5214  * actual knowledge of the non-compliant speed negotiation.
5215  * It has a number of hard-coded fields, since the hope is to rewrite this
5216  * when a spec is available on how the negoation is intended to work.
5217  */
5218 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5219                                  u32 dcnt, u32 *data)
5220 {
5221         int i;
5222         u64 pbc;
5223         u32 __iomem *piobuf;
5224         u32 pnum, control, len;
5225         struct qib_devdata *dd = ppd->dd;
5226 
5227         i = 0;
5228         len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5229         control = qib_7322_setpbc_control(ppd, len, 0, 15);
5230         pbc = ((u64) control << 32) | len;
5231         while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5232                 if (i++ > 15)
5233                         return;
5234                 udelay(2);
5235         }
5236         /* disable header check on this packet, since it can't be valid */
5237         dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5238         writeq(pbc, piobuf);
5239         qib_flush_wc();
5240         qib_pio_copy(piobuf + 2, hdr, 7);
5241         qib_pio_copy(piobuf + 9, data, dcnt);
5242         if (dd->flags & QIB_USE_SPCL_TRIG) {
5243                 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5244 
5245                 qib_flush_wc();
5246                 __raw_writel(0xaebecede, piobuf + spcl_off);
5247         }
5248         qib_flush_wc();
5249         qib_sendbuf_done(dd, pnum);
5250         /* and re-enable hdr check */
5251         dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5252 }
5253 
5254 /*
5255  * _start packet gets sent twice at start, _done gets sent twice at end
5256  */
5257 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5258 {
5259         struct qib_devdata *dd = ppd->dd;
5260         static u32 swapped;
5261         u32 dw, i, hcnt, dcnt, *data;
5262         static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5263         static u32 madpayload_start[0x40] = {
5264                 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5265                 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5266                 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5267                 };
5268         static u32 madpayload_done[0x40] = {
5269                 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5270                 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5271                 0x40000001, 0x1388, 0x15e, /* rest 0's */
5272                 };
5273 
5274         dcnt = ARRAY_SIZE(madpayload_start);
5275         hcnt = ARRAY_SIZE(hdr);
5276         if (!swapped) {
5277                 /* for maintainability, do it at runtime */
5278                 for (i = 0; i < hcnt; i++) {
5279                         dw = (__force u32) cpu_to_be32(hdr[i]);
5280                         hdr[i] = dw;
5281                 }
5282                 for (i = 0; i < dcnt; i++) {
5283                         dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5284                         madpayload_start[i] = dw;
5285                         dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5286                         madpayload_done[i] = dw;
5287                 }
5288                 swapped = 1;
5289         }
5290 
5291         data = which ? madpayload_done : madpayload_start;
5292 
5293         autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5294         qib_read_kreg64(dd, kr_scratch);
5295         udelay(2);
5296         autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5297         qib_read_kreg64(dd, kr_scratch);
5298         udelay(2);
5299 }
5300 
5301 /*
5302  * Do the absolute minimum to cause an IB speed change, and make it
5303  * ready, but don't actually trigger the change.   The caller will
5304  * do that when ready (if link is in Polling training state, it will
5305  * happen immediately, otherwise when link next goes down)
5306  *
5307  * This routine should only be used as part of the DDR autonegotation
5308  * code for devices that are not compliant with IB 1.2 (or code that
5309  * fixes things up for same).
5310  *
5311  * When link has gone down, and autoneg enabled, or autoneg has
5312  * failed and we give up until next time we set both speeds, and
5313  * then we want IBTA enabled as well as "use max enabled speed.
5314  */
5315 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5316 {
5317         u64 newctrlb;
5318 
5319         newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5320                                     IBA7322_IBC_IBTA_1_2_MASK |
5321                                     IBA7322_IBC_MAX_SPEED_MASK);
5322 
5323         if (speed & (speed - 1)) /* multiple speeds */
5324                 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5325                                     IBA7322_IBC_IBTA_1_2_MASK |
5326                                     IBA7322_IBC_MAX_SPEED_MASK;
5327         else
5328                 newctrlb |= speed == QIB_IB_QDR ?
5329                         IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5330                         ((speed == QIB_IB_DDR ?
5331                           IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5332 
5333         if (newctrlb == ppd->cpspec->ibcctrl_b)
5334                 return;
5335 
5336         ppd->cpspec->ibcctrl_b = newctrlb;
5337         qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5338         qib_write_kreg(ppd->dd, kr_scratch, 0);
5339 }
5340 
5341 /*
5342  * This routine is only used when we are not talking to another
5343  * IB 1.2-compliant device that we think can do DDR.
5344  * (This includes all existing switch chips as of Oct 2007.)
5345  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5346  */
5347 static void try_7322_autoneg(struct qib_pportdata *ppd)
5348 {
5349         unsigned long flags;
5350 
5351         spin_lock_irqsave(&ppd->lflags_lock, flags);
5352         ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5353         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5354         qib_autoneg_7322_send(ppd, 0);
5355         set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5356         qib_7322_mini_pcs_reset(ppd);
5357         /* 2 msec is minimum length of a poll cycle */
5358         queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5359                            msecs_to_jiffies(2));
5360 }
5361 
5362 /*
5363  * Handle the empirically determined mechanism for auto-negotiation
5364  * of DDR speed with switches.
5365  */
5366 static void autoneg_7322_work(struct work_struct *work)
5367 {
5368         struct qib_pportdata *ppd;
5369         u32 i;
5370         unsigned long flags;
5371 
5372         ppd = container_of(work, struct qib_chippport_specific,
5373                             autoneg_work.work)->ppd;
5374 
5375         /*
5376          * Busy wait for this first part, it should be at most a
5377          * few hundred usec, since we scheduled ourselves for 2msec.
5378          */
5379         for (i = 0; i < 25; i++) {
5380                 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5381                      == IB_7322_LT_STATE_POLLQUIET) {
5382                         qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5383                         break;
5384                 }
5385                 udelay(100);
5386         }
5387 
5388         if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5389                 goto done; /* we got there early or told to stop */
5390 
5391         /* we expect this to timeout */
5392         if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5393                                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5394                                msecs_to_jiffies(90)))
5395                 goto done;
5396         qib_7322_mini_pcs_reset(ppd);
5397 
5398         /* we expect this to timeout */
5399         if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5400                                !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5401                                msecs_to_jiffies(1700)))
5402                 goto done;
5403         qib_7322_mini_pcs_reset(ppd);
5404 
5405         set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5406 
5407         /*
5408          * Wait up to 250 msec for link to train and get to INIT at DDR;
5409          * this should terminate early.
5410          */
5411         wait_event_timeout(ppd->cpspec->autoneg_wait,
5412                 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5413                 msecs_to_jiffies(250));
5414 done:
5415         if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5416                 spin_lock_irqsave(&ppd->lflags_lock, flags);
5417                 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5418                 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5419                         ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5420                         ppd->cpspec->autoneg_tries = 0;
5421                 }
5422                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5423                 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5424         }
5425 }
5426 
5427 /*
5428  * This routine is used to request IPG set in the QLogic switch.
5429  * Only called if r1.
5430  */
5431 static void try_7322_ipg(struct qib_pportdata *ppd)
5432 {
5433         struct qib_ibport *ibp = &ppd->ibport_data;
5434         struct ib_mad_send_buf *send_buf;
5435         struct ib_mad_agent *agent;
5436         struct ib_smp *smp;
5437         unsigned delay;
5438         int ret;
5439 
5440         agent = ibp->rvp.send_agent;
5441         if (!agent)
5442                 goto retry;
5443 
5444         send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5445                                       IB_MGMT_MAD_DATA, GFP_ATOMIC,
5446                                       IB_MGMT_BASE_VERSION);
5447         if (IS_ERR(send_buf))
5448                 goto retry;
5449 
5450         if (!ibp->smi_ah) {
5451                 struct ib_ah *ah;
5452 
5453                 ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5454                 if (IS_ERR(ah))
5455                         ret = PTR_ERR(ah);
5456                 else {
5457                         send_buf->ah = ah;
5458                         ibp->smi_ah = ibah_to_rvtah(ah);
5459                         ret = 0;
5460                 }
5461         } else {
5462                 send_buf->ah = &ibp->smi_ah->ibah;
5463                 ret = 0;
5464         }
5465 
5466         smp = send_buf->mad;
5467         smp->base_version = IB_MGMT_BASE_VERSION;
5468         smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5469         smp->class_version = 1;
5470         smp->method = IB_MGMT_METHOD_SEND;
5471         smp->hop_cnt = 1;
5472         smp->attr_id = QIB_VENDOR_IPG;
5473         smp->attr_mod = 0;
5474 
5475         if (!ret)
5476                 ret = ib_post_send_mad(send_buf, NULL);
5477         if (ret)
5478                 ib_free_send_mad(send_buf);
5479 retry:
5480         delay = 2 << ppd->cpspec->ipg_tries;
5481         queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5482                            msecs_to_jiffies(delay));
5483 }
5484 
5485 /*
5486  * Timeout handler for setting IPG.
5487  * Only called if r1.
5488  */
5489 static void ipg_7322_work(struct work_struct *work)
5490 {
5491         struct qib_pportdata *ppd;
5492 
5493         ppd = container_of(work, struct qib_chippport_specific,
5494                            ipg_work.work)->ppd;
5495         if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5496             && ++ppd->cpspec->ipg_tries <= 10)
5497                 try_7322_ipg(ppd);
5498 }
5499 
5500 static u32 qib_7322_iblink_state(u64 ibcs)
5501 {
5502         u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5503 
5504         switch (state) {
5505         case IB_7322_L_STATE_INIT:
5506                 state = IB_PORT_INIT;
5507                 break;
5508         case IB_7322_L_STATE_ARM:
5509                 state = IB_PORT_ARMED;
5510                 break;
5511         case IB_7322_L_STATE_ACTIVE:
5512                 /* fall through */
5513         case IB_7322_L_STATE_ACT_DEFER:
5514                 state = IB_PORT_ACTIVE;
5515                 break;
5516         default: /* fall through */
5517         case IB_7322_L_STATE_DOWN:
5518                 state = IB_PORT_DOWN;
5519                 break;
5520         }
5521         return state;
5522 }
5523 
5524 /* returns the IBTA port state, rather than the IBC link training state */
5525 static u8 qib_7322_phys_portstate(u64 ibcs)
5526 {
5527         u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5528         return qib_7322_physportstate[state];
5529 }
5530 
5531 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5532 {
5533         int ret = 0, symadj = 0;
5534         unsigned long flags;
5535         int mult;
5536 
5537         spin_lock_irqsave(&ppd->lflags_lock, flags);
5538         ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5539         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5540 
5541         /* Update our picture of width and speed from chip */
5542         if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5543                 ppd->link_speed_active = QIB_IB_QDR;
5544                 mult = 4;
5545         } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5546                 ppd->link_speed_active = QIB_IB_DDR;
5547                 mult = 2;
5548         } else {
5549                 ppd->link_speed_active = QIB_IB_SDR;
5550                 mult = 1;
5551         }
5552         if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5553                 ppd->link_width_active = IB_WIDTH_4X;
5554                 mult *= 4;
5555         } else
5556                 ppd->link_width_active = IB_WIDTH_1X;
5557         ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5558 
5559         if (!ibup) {
5560                 u64 clr;
5561 
5562                 /* Link went down. */
5563                 /* do IPG MAD again after linkdown, even if last time failed */
5564                 ppd->cpspec->ipg_tries = 0;
5565                 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5566                         (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5567                          SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5568                 if (clr)
5569                         qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5570                 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5571                                      QIBL_IB_AUTONEG_INPROG)))
5572                         set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5573                 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5574                         struct qib_qsfp_data *qd =
5575                                 &ppd->cpspec->qsfp_data;
5576                         /* unlock the Tx settings, speed may change */
5577                         qib_write_kreg_port(ppd, krp_tx_deemph_override,
5578                                 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5579                                 reset_tx_deemphasis_override));
5580                         qib_cancel_sends(ppd);
5581                         /* on link down, ensure sane pcs state */
5582                         qib_7322_mini_pcs_reset(ppd);
5583                         /* schedule the qsfp refresh which should turn the link
5584                            off */
5585                         if (ppd->dd->flags & QIB_HAS_QSFP) {
5586                                 qd->t_insert = jiffies;
5587                                 queue_work(ib_wq, &qd->work);
5588                         }
5589                         spin_lock_irqsave(&ppd->sdma_lock, flags);
5590                         if (__qib_sdma_running(ppd))
5591                                 __qib_sdma_process_event(ppd,
5592                                         qib_sdma_event_e70_go_idle);
5593                         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5594                 }
5595                 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5596                 if (clr == ppd->cpspec->iblnkdownsnap)
5597                         ppd->cpspec->iblnkdowndelta++;
5598         } else {
5599                 if (qib_compat_ddr_negotiate &&
5600                     !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5601                                      QIBL_IB_AUTONEG_INPROG)) &&
5602                     ppd->link_speed_active == QIB_IB_SDR &&
5603                     (ppd->link_speed_enabled & QIB_IB_DDR)
5604                     && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5605                         /* we are SDR, and auto-negotiation enabled */
5606                         ++ppd->cpspec->autoneg_tries;
5607                         if (!ppd->cpspec->ibdeltainprog) {
5608                                 ppd->cpspec->ibdeltainprog = 1;
5609                                 ppd->cpspec->ibsymdelta +=
5610                                         read_7322_creg32_port(ppd,
5611                                                 crp_ibsymbolerr) -
5612                                                 ppd->cpspec->ibsymsnap;
5613                                 ppd->cpspec->iblnkerrdelta +=
5614                                         read_7322_creg32_port(ppd,
5615                                                 crp_iblinkerrrecov) -
5616                                                 ppd->cpspec->iblnkerrsnap;
5617                         }
5618                         try_7322_autoneg(ppd);
5619                         ret = 1; /* no other IB status change processing */
5620                 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5621                            ppd->link_speed_active == QIB_IB_SDR) {
5622                         qib_autoneg_7322_send(ppd, 1);
5623                         set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5624                         qib_7322_mini_pcs_reset(ppd);
5625                         udelay(2);
5626                         ret = 1; /* no other IB status change processing */
5627                 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5628                            (ppd->link_speed_active & QIB_IB_DDR)) {
5629                         spin_lock_irqsave(&ppd->lflags_lock, flags);
5630                         ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5631                                          QIBL_IB_AUTONEG_FAILED);
5632                         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5633                         ppd->cpspec->autoneg_tries = 0;
5634                         /* re-enable SDR, for next link down */
5635                         set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5636                         wake_up(&ppd->cpspec->autoneg_wait);
5637                         symadj = 1;
5638                 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5639                         /*
5640                          * Clear autoneg failure flag, and do setup
5641                          * so we'll try next time link goes down and
5642                          * back to INIT (possibly connected to a
5643                          * different device).
5644                          */
5645                         spin_lock_irqsave(&ppd->lflags_lock, flags);
5646                         ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5647                         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5648                         ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5649                         symadj = 1;
5650                 }
5651                 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5652                         symadj = 1;
5653                         if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5654                                 try_7322_ipg(ppd);
5655                         if (!ppd->cpspec->recovery_init)
5656                                 setup_7322_link_recovery(ppd, 0);
5657                         ppd->cpspec->qdr_dfe_time = jiffies +
5658                                 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5659                 }
5660                 ppd->cpspec->ibmalfusesnap = 0;
5661                 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5662                         crp_errlink);
5663         }
5664         if (symadj) {
5665                 ppd->cpspec->iblnkdownsnap =
5666                         read_7322_creg32_port(ppd, crp_iblinkdown);
5667                 if (ppd->cpspec->ibdeltainprog) {
5668                         ppd->cpspec->ibdeltainprog = 0;
5669                         ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5670                                 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5671                         ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5672                                 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5673                 }
5674         } else if (!ibup && qib_compat_ddr_negotiate &&
5675                    !ppd->cpspec->ibdeltainprog &&
5676                         !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5677                 ppd->cpspec->ibdeltainprog = 1;
5678                 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5679                         crp_ibsymbolerr);
5680                 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5681                         crp_iblinkerrrecov);
5682         }
5683 
5684         if (!ret)
5685                 qib_setup_7322_setextled(ppd, ibup);
5686         return ret;
5687 }
5688 
5689 /*
5690  * Does read/modify/write to appropriate registers to
5691  * set output and direction bits selected by mask.
5692  * these are in their canonical postions (e.g. lsb of
5693  * dir will end up in D48 of extctrl on existing chips).
5694  * returns contents of GP Inputs.
5695  */
5696 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5697 {
5698         u64 read_val, new_out;
5699         unsigned long flags;
5700 
5701         if (mask) {
5702                 /* some bits being written, lock access to GPIO */
5703                 dir &= mask;
5704                 out &= mask;
5705                 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5706                 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5707                 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5708                 new_out = (dd->cspec->gpio_out & ~mask) | out;
5709 
5710                 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5711                 qib_write_kreg(dd, kr_gpio_out, new_out);
5712                 dd->cspec->gpio_out = new_out;
5713                 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5714         }
5715         /*
5716          * It is unlikely that a read at this time would get valid
5717          * data on a pin whose direction line was set in the same
5718          * call to this function. We include the read here because
5719          * that allows us to potentially combine a change on one pin with
5720          * a read on another, and because the old code did something like
5721          * this.
5722          */
5723         read_val = qib_read_kreg64(dd, kr_extstatus);
5724         return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5725 }
5726 
5727 /* Enable writes to config EEPROM, if possible. Returns previous state */
5728 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5729 {
5730         int prev_wen;
5731         u32 mask;
5732 
5733         mask = 1 << QIB_EEPROM_WEN_NUM;
5734         prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5735         gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5736 
5737         return prev_wen & 1;
5738 }
5739 
5740 /*
5741  * Read fundamental info we need to use the chip.  These are
5742  * the registers that describe chip capabilities, and are
5743  * saved in shadow registers.
5744  */
5745 static void get_7322_chip_params(struct qib_devdata *dd)
5746 {
5747         u64 val;
5748         u32 piobufs;
5749         int mtu;
5750 
5751         dd->palign = qib_read_kreg32(dd, kr_pagealign);
5752 
5753         dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5754 
5755         dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5756         dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5757         dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5758         dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5759         dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5760 
5761         val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5762         dd->piobcnt2k = val & ~0U;
5763         dd->piobcnt4k = val >> 32;
5764         val = qib_read_kreg64(dd, kr_sendpiosize);
5765         dd->piosize2k = val & ~0U;
5766         dd->piosize4k = val >> 32;
5767 
5768         mtu = ib_mtu_enum_to_int(qib_ibmtu);
5769         if (mtu == -1)
5770                 mtu = QIB_DEFAULT_MTU;
5771         dd->pport[0].ibmtu = (u32)mtu;
5772         dd->pport[1].ibmtu = (u32)mtu;
5773 
5774         /* these may be adjusted in init_chip_wc_pat() */
5775         dd->pio2kbase = (u32 __iomem *)
5776                 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5777         dd->pio4kbase = (u32 __iomem *)
5778                 ((char __iomem *) dd->kregbase +
5779                  (dd->piobufbase >> 32));
5780         /*
5781          * 4K buffers take 2 pages; we use roundup just to be
5782          * paranoid; we calculate it once here, rather than on
5783          * ever buf allocate
5784          */
5785         dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5786 
5787         piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5788 
5789         dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5790                 (sizeof(u64) * BITS_PER_BYTE / 2);
5791 }
5792 
5793 /*
5794  * The chip base addresses in cspec and cpspec have to be set
5795  * after possible init_chip_wc_pat(), rather than in
5796  * get_7322_chip_params(), so split out as separate function
5797  */
5798 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5799 {
5800         u32 cregbase;
5801 
5802         cregbase = qib_read_kreg32(dd, kr_counterregbase);
5803 
5804         dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5805                 (char __iomem *)dd->kregbase);
5806 
5807         dd->egrtidbase = (u64 __iomem *)
5808                 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5809 
5810         /* port registers are defined as relative to base of chip */
5811         dd->pport[0].cpspec->kpregbase =
5812                 (u64 __iomem *)((char __iomem *)dd->kregbase);
5813         dd->pport[1].cpspec->kpregbase =
5814                 (u64 __iomem *)(dd->palign +
5815                 (char __iomem *)dd->kregbase);
5816         dd->pport[0].cpspec->cpregbase =
5817                 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5818                 kr_counterregbase) + (char __iomem *)dd->kregbase);
5819         dd->pport[1].cpspec->cpregbase =
5820                 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5821                 kr_counterregbase) + (char __iomem *)dd->kregbase);
5822 }
5823 
5824 /*
5825  * This is a fairly special-purpose observer, so we only support
5826  * the port-specific parts of SendCtrl
5827  */
5828 
5829 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |           \
5830                            SYM_MASK(SendCtrl_0, SDmaEnable) |           \
5831                            SYM_MASK(SendCtrl_0, SDmaIntEnable) |        \
5832                            SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5833                            SYM_MASK(SendCtrl_0, SDmaHalt) |             \
5834                            SYM_MASK(SendCtrl_0, IBVLArbiterEn) |        \
5835                            SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5836 
5837 static int sendctrl_hook(struct qib_devdata *dd,
5838                          const struct diag_observer *op, u32 offs,
5839                          u64 *data, u64 mask, int only_32)
5840 {
5841         unsigned long flags;
5842         unsigned idx;
5843         unsigned pidx;
5844         struct qib_pportdata *ppd = NULL;
5845         u64 local_data, all_bits;
5846 
5847         /*
5848          * The fixed correspondence between Physical ports and pports is
5849          * severed. We need to hunt for the ppd that corresponds
5850          * to the offset we got. And we have to do that without admitting
5851          * we know the stride, apparently.
5852          */
5853         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5854                 u64 __iomem *psptr;
5855                 u32 psoffs;
5856 
5857                 ppd = dd->pport + pidx;
5858                 if (!ppd->cpspec->kpregbase)
5859                         continue;
5860 
5861                 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5862                 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5863                 if (psoffs == offs)
5864                         break;
5865         }
5866 
5867         /* If pport is not being managed by driver, just avoid shadows. */
5868         if (pidx >= dd->num_pports)
5869                 ppd = NULL;
5870 
5871         /* In any case, "idx" is flat index in kreg space */
5872         idx = offs / sizeof(u64);
5873 
5874         all_bits = ~0ULL;
5875         if (only_32)
5876                 all_bits >>= 32;
5877 
5878         spin_lock_irqsave(&dd->sendctrl_lock, flags);
5879         if (!ppd || (mask & all_bits) != all_bits) {
5880                 /*
5881                  * At least some mask bits are zero, so we need
5882                  * to read. The judgement call is whether from
5883                  * reg or shadow. First-cut: read reg, and complain
5884                  * if any bits which should be shadowed are different
5885                  * from their shadowed value.
5886                  */
5887                 if (only_32)
5888                         local_data = (u64)qib_read_kreg32(dd, idx);
5889                 else
5890                         local_data = qib_read_kreg64(dd, idx);
5891                 *data = (local_data & ~mask) | (*data & mask);
5892         }
5893         if (mask) {
5894                 /*
5895                  * At least some mask bits are one, so we need
5896                  * to write, but only shadow some bits.
5897                  */
5898                 u64 sval, tval; /* Shadowed, transient */
5899 
5900                 /*
5901                  * New shadow val is bits we don't want to touch,
5902                  * ORed with bits we do, that are intended for shadow.
5903                  */
5904                 if (ppd) {
5905                         sval = ppd->p_sendctrl & ~mask;
5906                         sval |= *data & SENDCTRL_SHADOWED & mask;
5907                         ppd->p_sendctrl = sval;
5908                 } else
5909                         sval = *data & SENDCTRL_SHADOWED & mask;
5910                 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5911                 qib_write_kreg(dd, idx, tval);
5912                 qib_write_kreg(dd, kr_scratch, 0Ull);
5913         }
5914         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5915         return only_32 ? 4 : 8;
5916 }
5917 
5918 static const struct diag_observer sendctrl_0_observer = {
5919         sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5920         KREG_IDX(SendCtrl_0) * sizeof(u64)
5921 };
5922 
5923 static const struct diag_observer sendctrl_1_observer = {
5924         sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5925         KREG_IDX(SendCtrl_1) * sizeof(u64)
5926 };
5927 
5928 static ushort sdma_fetch_prio = 8;
5929 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5930 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5931 
5932 /* Besides logging QSFP events, we set appropriate TxDDS values */
5933 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5934 
5935 static void qsfp_7322_event(struct work_struct *work)
5936 {
5937         struct qib_qsfp_data *qd;
5938         struct qib_pportdata *ppd;
5939         unsigned long pwrup;
5940         unsigned long flags;
5941         int ret;
5942         u32 le2;
5943 
5944         qd = container_of(work, struct qib_qsfp_data, work);
5945         ppd = qd->ppd;
5946         pwrup = qd->t_insert +
5947                 msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5948 
5949         /* Delay for 20 msecs to allow ModPrs resistor to setup */
5950         mdelay(QSFP_MODPRS_LAG_MSEC);
5951 
5952         if (!qib_qsfp_mod_present(ppd)) {
5953                 ppd->cpspec->qsfp_data.modpresent = 0;
5954                 /* Set the physical link to disabled */
5955                 qib_set_ib_7322_lstate(ppd, 0,
5956                                        QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5957                 spin_lock_irqsave(&ppd->lflags_lock, flags);
5958                 ppd->lflags &= ~QIBL_LINKV;
5959                 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5960         } else {
5961                 /*
5962                  * Some QSFP's not only do not respond until the full power-up
5963                  * time, but may behave badly if we try. So hold off responding
5964                  * to insertion.
5965                  */
5966                 while (1) {
5967                         if (time_is_before_jiffies(pwrup))
5968                                 break;
5969                         msleep(20);
5970                 }
5971 
5972                 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5973 
5974                 /*
5975                  * Need to change LE2 back to defaults if we couldn't
5976                  * read the cable type (to handle cable swaps), so do this
5977                  * even on failure to read cable information.  We don't
5978                  * get here for QME, so IS_QME check not needed here.
5979                  */
5980                 if (!ret && !ppd->dd->cspec->r1) {
5981                         if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5982                                 le2 = LE2_QME;
5983                         else if (qd->cache.atten[1] >= qib_long_atten &&
5984                                  QSFP_IS_CU(qd->cache.tech))
5985                                 le2 = LE2_5m;
5986                         else
5987                                 le2 = LE2_DEFAULT;
5988                 } else
5989                         le2 = LE2_DEFAULT;
5990                 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5991                 /*
5992                  * We always change parameteters, since we can choose
5993                  * values for cables without eeproms, and the cable may have
5994                  * changed from a cable with full or partial eeprom content
5995                  * to one with partial or no content.
5996                  */
5997                 init_txdds_table(ppd, 0);
5998                 /* The physical link is being re-enabled only when the
5999                  * previous state was DISABLED and the VALID bit is not
6000                  * set. This should only happen when  the cable has been
6001                  * physically pulled. */
6002                 if (!ppd->cpspec->qsfp_data.modpresent &&
6003                     (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6004                         ppd->cpspec->qsfp_data.modpresent = 1;
6005                         qib_set_ib_7322_lstate(ppd, 0,
6006                                 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6007                         spin_lock_irqsave(&ppd->lflags_lock, flags);
6008                         ppd->lflags |= QIBL_LINKV;
6009                         spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6010                 }
6011         }
6012 }
6013 
6014 /*
6015  * There is little we can do but complain to the user if QSFP
6016  * initialization fails.
6017  */
6018 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6019 {
6020         unsigned long flags;
6021         struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6022         struct qib_devdata *dd = ppd->dd;
6023         u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6024 
6025         mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6026         qd->ppd = ppd;
6027         qib_qsfp_init(qd, qsfp_7322_event);
6028         spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6029         dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6030         dd->cspec->gpio_mask |= mod_prs_bit;
6031         qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6032         qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6033         spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6034 }
6035 
6036 /*
6037  * called at device initialization time, and also if the txselect
6038  * module parameter is changed.  This is used for cables that don't
6039  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6040  * We initialize to the default, then if there is a specific
6041  * unit,port match, we use that (and set it immediately, for the
6042  * current speed, if the link is at INIT or better).
6043  * String format is "default# unit#,port#=# ... u,p=#", separators must
6044  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6045  * optionally have "u,p=#,#", where the final # is the H1 value
6046  * The last specific match is used (actually, all are used, but last
6047  * one is the one that winds up set); if none at all, fall back on default.
6048  */
6049 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6050 {
6051         char *nxt, *str;
6052         u32 pidx, unit, port, deflt, h1;
6053         unsigned long val;
6054         int any = 0, seth1;
6055         int txdds_size;
6056 
6057         str = txselect_list;
6058 
6059         /* default number is validated in setup_txselect() */
6060         deflt = simple_strtoul(str, &nxt, 0);
6061         for (pidx = 0; pidx < dd->num_pports; ++pidx)
6062                 dd->pport[pidx].cpspec->no_eep = deflt;
6063 
6064         txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6065         if (IS_QME(dd) || IS_QMH(dd))
6066                 txdds_size += TXDDS_MFG_SZ;
6067 
6068         while (*nxt && nxt[1]) {
6069                 str = ++nxt;
6070                 unit = simple_strtoul(str, &nxt, 0);
6071                 if (nxt == str || !*nxt || *nxt != ',') {
6072                         while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6073                                 ;
6074                         continue;
6075                 }
6076                 str = ++nxt;
6077                 port = simple_strtoul(str, &nxt, 0);
6078                 if (nxt == str || *nxt != '=') {
6079                         while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6080                                 ;
6081                         continue;
6082                 }
6083                 str = ++nxt;
6084                 val = simple_strtoul(str, &nxt, 0);
6085                 if (nxt == str) {
6086                         while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6087                                 ;
6088                         continue;
6089                 }
6090                 if (val >= txdds_size)
6091                         continue;
6092                 seth1 = 0;
6093                 h1 = 0; /* gcc thinks it might be used uninitted */
6094                 if (*nxt == ',' && nxt[1]) {
6095                         str = ++nxt;
6096                         h1 = (u32)simple_strtoul(str, &nxt, 0);
6097                         if (nxt == str)
6098                                 while (*nxt && *nxt++ != ' ') /* skip */
6099                                         ;
6100                         else
6101                                 seth1 = 1;
6102                 }
6103                 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6104                      ++pidx) {
6105                         struct qib_pportdata *ppd = &dd->pport[pidx];
6106 
6107                         if (ppd->port != port || !ppd->link_speed_supported)
6108                                 continue;
6109                         ppd->cpspec->no_eep = val;
6110                         if (seth1)
6111                                 ppd->cpspec->h1_val = h1;
6112                         /* now change the IBC and serdes, overriding generic */
6113                         init_txdds_table(ppd, 1);
6114                         /* Re-enable the physical state machine on mezz boards
6115                          * now that the correct settings have been set.
6116                          * QSFP boards are handles by the QSFP event handler */
6117                         if (IS_QMH(dd) || IS_QME(dd))
6118                                 qib_set_ib_7322_lstate(ppd, 0,
6119                                             QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6120                         any++;
6121                 }
6122                 if (*nxt == '\n')
6123                         break; /* done */
6124         }
6125         if (change && !any) {
6126                 /* no specific setting, use the default.
6127                  * Change the IBC and serdes, but since it's
6128                  * general, don't override specific settings.
6129                  */
6130                 for (pidx = 0; pidx < dd->num_pports; ++pidx)
6131                         if (dd->pport[pidx].link_speed_supported)
6132                                 init_txdds_table(&dd->pport[pidx], 0);
6133         }
6134 }
6135 
6136 /* handle the txselect parameter changing */
6137 static int setup_txselect(const char *str, const struct kernel_param *kp)
6138 {
6139         struct qib_devdata *dd;
6140         unsigned long index, val;
6141         char *n;
6142 
6143         if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6144                 pr_info("txselect_values string too long\n");
6145                 return -ENOSPC;
6146         }
6147         val = simple_strtoul(str, &n, 0);
6148         if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6149                                 TXDDS_MFG_SZ)) {
6150                 pr_info("txselect_values must start with a number < %d\n",
6151                         TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6152                 return -EINVAL;
6153         }
6154         strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6155 
6156         xa_for_each(&qib_dev_table, index, dd)
6157                 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6158                         set_no_qsfp_atten(dd, 1);
6159         return 0;
6160 }
6161 
6162 /*
6163  * Write the final few registers that depend on some of the
6164  * init setup.  Done late in init, just before bringing up
6165  * the serdes.
6166  */
6167 static int qib_late_7322_initreg(struct qib_devdata *dd)
6168 {
6169         int ret = 0, n;
6170         u64 val;
6171 
6172         qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6173         qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6174         qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6175         qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6176         val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6177         if (val != dd->pioavailregs_phys) {
6178                 qib_dev_err(dd,
6179                         "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6180                         (unsigned long) dd->pioavailregs_phys,
6181                         (unsigned long long) val);
6182                 ret = -EINVAL;
6183         }
6184 
6185         n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6186         qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6187         /* driver sends get pkey, lid, etc. checking also, to catch bugs */
6188         qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6189 
6190         qib_register_observer(dd, &sendctrl_0_observer);
6191         qib_register_observer(dd, &sendctrl_1_observer);
6192 
6193         dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6194         qib_write_kreg(dd, kr_control, dd->control);
6195         /*
6196          * Set SendDmaFetchPriority and init Tx params, including
6197          * QSFP handler on boards that have QSFP.
6198          * First set our default attenuation entry for cables that
6199          * don't have valid attenuation.
6200          */
6201         set_no_qsfp_atten(dd, 0);
6202         for (n = 0; n < dd->num_pports; ++n) {
6203                 struct qib_pportdata *ppd = dd->pport + n;
6204 
6205                 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6206                                     sdma_fetch_prio & 0xf);
6207                 /* Initialize qsfp if present on board. */
6208                 if (dd->flags & QIB_HAS_QSFP)
6209                         qib_init_7322_qsfp(ppd);
6210         }
6211         dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6212         qib_write_kreg(dd, kr_control, dd->control);
6213 
6214         return ret;
6215 }
6216 
6217 /* per IB port errors.  */
6218 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6219         MASK_ACROSS(8, 15))
6220 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6221 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6222         MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6223         MASK_ACROSS(0, 11))
6224 
6225 /*
6226  * Write the initialization per-port registers that need to be done at
6227  * driver load and after reset completes (i.e., that aren't done as part
6228  * of other init procedures called from qib_init.c).
6229  * Some of these should be redundant on reset, but play safe.
6230  */
6231 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6232 {
6233         u64 val;
6234         int i;
6235 
6236         if (!ppd->link_speed_supported) {
6237                 /* no buffer credits for this port */
6238                 for (i = 1; i < 8; i++)
6239                         qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6240                 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6241                 qib_write_kreg(ppd->dd, kr_scratch, 0);
6242                 return;
6243         }
6244 
6245         /*
6246          * Set the number of supported virtual lanes in IBC,
6247          * for flow control packet handling on unsupported VLs
6248          */
6249         val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6250         val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6251         val |= (u64)(ppd->vls_supported - 1) <<
6252                 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6253         qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6254 
6255         qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6256 
6257         /* enable tx header checking */
6258         qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6259                             IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6260                             IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6261 
6262         qib_write_kreg_port(ppd, krp_ncmodectrl,
6263                 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6264 
6265         /*
6266          * Unconditionally clear the bufmask bits.  If SDMA is
6267          * enabled, we'll set them appropriately later.
6268          */
6269         qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6270         qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6271         qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6272         if (ppd->dd->cspec->r1)
6273                 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6274 }
6275 
6276 /*
6277  * Write the initialization per-device registers that need to be done at
6278  * driver load and after reset completes (i.e., that aren't done as part
6279  * of other init procedures called from qib_init.c).  Also write per-port
6280  * registers that are affected by overall device config, such as QP mapping
6281  * Some of these should be redundant on reset, but play safe.
6282  */
6283 static void write_7322_initregs(struct qib_devdata *dd)
6284 {
6285         struct qib_pportdata *ppd;
6286         int i, pidx;
6287         u64 val;
6288 
6289         /* Set Multicast QPs received by port 2 to map to context one. */
6290         qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6291 
6292         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6293                 unsigned n, regno;
6294                 unsigned long flags;
6295 
6296                 if (dd->n_krcv_queues < 2 ||
6297                         !dd->pport[pidx].link_speed_supported)
6298                         continue;
6299 
6300                 ppd = &dd->pport[pidx];
6301 
6302                 /* be paranoid against later code motion, etc. */
6303                 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6304                 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6305                 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6306 
6307                 /* Initialize QP to context mapping */
6308                 regno = krp_rcvqpmaptable;
6309                 val = 0;
6310                 if (dd->num_pports > 1)
6311                         n = dd->first_user_ctxt / dd->num_pports;
6312                 else
6313                         n = dd->first_user_ctxt - 1;
6314                 for (i = 0; i < 32; ) {
6315                         unsigned ctxt;
6316 
6317                         if (dd->num_pports > 1)
6318                                 ctxt = (i % n) * dd->num_pports + pidx;
6319                         else if (i % n)
6320                                 ctxt = (i % n) + 1;
6321                         else
6322                                 ctxt = ppd->hw_pidx;
6323                         val |= ctxt << (5 * (i % 6));
6324                         i++;
6325                         if (i % 6 == 0) {
6326                                 qib_write_kreg_port(ppd, regno, val);
6327                                 val = 0;
6328                                 regno++;
6329                         }
6330                 }
6331                 qib_write_kreg_port(ppd, regno, val);
6332         }
6333 
6334         /*
6335          * Setup up interrupt mitigation for kernel contexts, but
6336          * not user contexts (user contexts use interrupts when
6337          * stalled waiting for any packet, so want those interrupts
6338          * right away).
6339          */
6340         for (i = 0; i < dd->first_user_ctxt; i++) {
6341                 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6342                 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6343         }
6344 
6345         /*
6346          * Initialize  as (disabled) rcvflow tables.  Application code
6347          * will setup each flow as it uses the flow.
6348          * Doesn't clear any of the error bits that might be set.
6349          */
6350         val = TIDFLOW_ERRBITS; /* these are W1C */
6351         for (i = 0; i < dd->cfgctxts; i++) {
6352                 int flow;
6353 
6354                 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6355                         qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6356         }
6357 
6358         /*
6359          * dual cards init to dual port recovery, single port cards to
6360          * the one port.  Dual port cards may later adjust to 1 port,
6361          * and then back to dual port if both ports are connected
6362          * */
6363         if (dd->num_pports)
6364                 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6365 }
6366 
6367 static int qib_init_7322_variables(struct qib_devdata *dd)
6368 {
6369         struct qib_pportdata *ppd;
6370         unsigned features, pidx, sbufcnt;
6371         int ret, mtu;
6372         u32 sbufs, updthresh;
6373         resource_size_t vl15off;
6374 
6375         /* pport structs are contiguous, allocated after devdata */
6376         ppd = (struct qib_pportdata *)(dd + 1);
6377         dd->pport = ppd;
6378         ppd[0].dd = dd;
6379         ppd[1].dd = dd;
6380 
6381         dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6382 
6383         ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6384         ppd[1].cpspec = &ppd[0].cpspec[1];
6385         ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6386         ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6387 
6388         spin_lock_init(&dd->cspec->rcvmod_lock);
6389         spin_lock_init(&dd->cspec->gpio_lock);
6390 
6391         /* we haven't yet set QIB_PRESENT, so use read directly */
6392         dd->revision = readq(&dd->kregbase[kr_revision]);
6393 
6394         if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6395                 qib_dev_err(dd,
6396                         "Revision register read failure, giving up initialization\n");
6397                 ret = -ENODEV;
6398                 goto bail;
6399         }
6400         dd->flags |= QIB_PRESENT;  /* now register routines work */
6401 
6402         dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6403         dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6404         dd->cspec->r1 = dd->minrev == 1;
6405 
6406         get_7322_chip_params(dd);
6407         features = qib_7322_boardname(dd);
6408 
6409         /* now that piobcnt2k and 4k set, we can allocate these */
6410         sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6411                 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6412         sbufcnt /= BITS_PER_LONG;
6413         dd->cspec->sendchkenable =
6414                 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
6415                               GFP_KERNEL);
6416         dd->cspec->sendgrhchk =
6417                 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
6418                               GFP_KERNEL);
6419         dd->cspec->sendibchk =
6420                 kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
6421                               GFP_KERNEL);
6422         if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6423                 !dd->cspec->sendibchk) {
6424                 ret = -ENOMEM;
6425                 goto bail;
6426         }
6427 
6428         ppd = dd->pport;
6429 
6430         /*
6431          * GPIO bits for TWSI data and clock,
6432          * used for serial EEPROM.
6433          */
6434         dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6435         dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6436         dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6437 
6438         dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6439                 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6440                 QIB_HAS_THRESH_UPDATE |
6441                 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6442         dd->flags |= qib_special_trigger ?
6443                 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6444 
6445         /*
6446          * Setup initial values.  These may change when PAT is enabled, but
6447          * we need these to do initial chip register accesses.
6448          */
6449         qib_7322_set_baseaddrs(dd);
6450 
6451         mtu = ib_mtu_enum_to_int(qib_ibmtu);
6452         if (mtu == -1)
6453                 mtu = QIB_DEFAULT_MTU;
6454 
6455         dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6456         /* all hwerrors become interrupts, unless special purposed */
6457         dd->cspec->hwerrmask = ~0ULL;
6458         /*  link_recovery setup causes these errors, so ignore them,
6459          *  other than clearing them when they occur */
6460         dd->cspec->hwerrmask &=
6461                 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6462                   SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6463                   HWE_MASK(LATriggered));
6464 
6465         for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6466                 struct qib_chippport_specific *cp = ppd->cpspec;
6467 
6468                 ppd->link_speed_supported = features & PORT_SPD_CAP;
6469                 features >>=  PORT_SPD_CAP_SHIFT;
6470                 if (!ppd->link_speed_supported) {
6471                         /* single port mode (7340, or configured) */
6472                         dd->skip_kctxt_mask |= 1 << pidx;
6473                         if (pidx == 0) {
6474                                 /* Make sure port is disabled. */
6475                                 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6476                                 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6477                                 ppd[0] = ppd[1];
6478                                 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6479                                                   IBSerdesPClkNotDetectMask_0)
6480                                                   | SYM_MASK(HwErrMask,
6481                                                   SDmaMemReadErrMask_0));
6482                                 dd->cspec->int_enable_mask &= ~(
6483                                      SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6484                                      SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6485                                      SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6486                                      SYM_MASK(IntMask, SDmaIntMask_0) |
6487                                      SYM_MASK(IntMask, ErrIntMask_0) |
6488                                      SYM_MASK(IntMask, SendDoneIntMask_0));
6489                         } else {
6490                                 /* Make sure port is disabled. */
6491                                 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6492                                 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6493                                 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6494                                                   IBSerdesPClkNotDetectMask_1)
6495                                                   | SYM_MASK(HwErrMask,
6496                                                   SDmaMemReadErrMask_1));
6497                                 dd->cspec->int_enable_mask &= ~(
6498                                      SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6499                                      SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6500                                      SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6501                                      SYM_MASK(IntMask, SDmaIntMask_1) |
6502                                      SYM_MASK(IntMask, ErrIntMask_1) |
6503                                      SYM_MASK(IntMask, SendDoneIntMask_1));
6504                         }
6505                         continue;
6506                 }
6507 
6508                 dd->num_pports++;
6509                 ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6510                 if (ret) {
6511                         dd->num_pports--;
6512                         goto bail;
6513                 }
6514 
6515                 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6516                 ppd->link_width_enabled = IB_WIDTH_4X;
6517                 ppd->link_speed_enabled = ppd->link_speed_supported;
6518                 /*
6519                  * Set the initial values to reasonable default, will be set
6520                  * for real when link is up.
6521                  */
6522                 ppd->link_width_active = IB_WIDTH_4X;
6523                 ppd->link_speed_active = QIB_IB_SDR;
6524                 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6525                 switch (qib_num_cfg_vls) {
6526                 case 1:
6527                         ppd->vls_supported = IB_VL_VL0;
6528                         break;
6529                 case 2:
6530                         ppd->vls_supported = IB_VL_VL0_1;
6531                         break;
6532                 default:
6533                         qib_devinfo(dd->pcidev,
6534                                     "Invalid num_vls %u, using 4 VLs\n",
6535                                     qib_num_cfg_vls);
6536                         qib_num_cfg_vls = 4;
6537                         /* fall through */
6538                 case 4:
6539                         ppd->vls_supported = IB_VL_VL0_3;
6540                         break;
6541                 case 8:
6542                         if (mtu <= 2048)
6543                                 ppd->vls_supported = IB_VL_VL0_7;
6544                         else {
6545                                 qib_devinfo(dd->pcidev,
6546                                             "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6547                                             qib_num_cfg_vls, mtu);
6548                                 ppd->vls_supported = IB_VL_VL0_3;
6549                                 qib_num_cfg_vls = 4;
6550                         }
6551                         break;
6552                 }
6553                 ppd->vls_operational = ppd->vls_supported;
6554 
6555                 init_waitqueue_head(&cp->autoneg_wait);
6556                 INIT_DELAYED_WORK(&cp->autoneg_work,
6557                                   autoneg_7322_work);
6558                 if (ppd->dd->cspec->r1)
6559                         INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6560 
6561                 /*
6562                  * For Mez and similar cards, no qsfp info, so do
6563                  * the "cable info" setup here.  Can be overridden
6564                  * in adapter-specific routines.
6565                  */
6566                 if (!(dd->flags & QIB_HAS_QSFP)) {
6567                         if (!IS_QMH(dd) && !IS_QME(dd))
6568                                 qib_devinfo(dd->pcidev,
6569                                         "IB%u:%u: Unknown mezzanine card type\n",
6570                                         dd->unit, ppd->port);
6571                         cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6572                         /*
6573                          * Choose center value as default tx serdes setting
6574                          * until changed through module parameter.
6575                          */
6576                         ppd->cpspec->no_eep = IS_QMH(dd) ?
6577                                 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6578                 } else
6579                         cp->h1_val = H1_FORCE_VAL;
6580 
6581                 /* Avoid writes to chip for mini_init */
6582                 if (!qib_mini_init)
6583                         write_7322_init_portregs(ppd);
6584 
6585                 timer_setup(&cp->chase_timer, reenable_chase, 0);
6586 
6587                 ppd++;
6588         }
6589 
6590         dd->rcvhdrentsize = qib_rcvhdrentsize ?
6591                 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6592         dd->rcvhdrsize = qib_rcvhdrsize ?
6593                 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6594         dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6595 
6596         /* we always allocate at least 2048 bytes for eager buffers */
6597         dd->rcvegrbufsize = max(mtu, 2048);
6598         dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6599 
6600         qib_7322_tidtemplate(dd);
6601 
6602         /*
6603          * We can request a receive interrupt for 1 or
6604          * more packets from current offset.
6605          */
6606         dd->rhdrhead_intr_off =
6607                 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6608 
6609         /* setup the stats timer; the add_timer is done at end of init */
6610         timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6611 
6612         dd->ureg_align = 0x10000;  /* 64KB alignment */
6613 
6614         dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6615 
6616         qib_7322_config_ctxts(dd);
6617         qib_set_ctxtcnt(dd);
6618 
6619         /*
6620          * We do not set WC on the VL15 buffers to avoid
6621          * a rare problem with unaligned writes from
6622          * interrupt-flushed store buffers, so we need
6623          * to map those separately here.  We can't solve
6624          * this for the rarely used mtrr case.
6625          */
6626         ret = init_chip_wc_pat(dd, 0);
6627         if (ret)
6628                 goto bail;
6629 
6630         /* vl15 buffers start just after the 4k buffers */
6631         vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6632                   dd->piobcnt4k * dd->align4k;
6633         dd->piovl15base = ioremap_nocache(vl15off,
6634                                           NUM_VL15_BUFS * dd->align4k);
6635         if (!dd->piovl15base) {
6636                 ret = -ENOMEM;
6637                 goto bail;
6638         }
6639 
6640         qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6641 
6642         ret = 0;
6643         if (qib_mini_init)
6644                 goto bail;
6645         if (!dd->num_pports) {
6646                 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6647                 goto bail; /* no error, so can still figure out why err */
6648         }
6649 
6650         write_7322_initregs(dd);
6651         ret = qib_create_ctxts(dd);
6652         init_7322_cntrnames(dd);
6653 
6654         updthresh = 8U; /* update threshold */
6655 
6656         /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6657          * reserve the update threshold amount for other kernel use, such
6658          * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6659          * unless we aren't enabling SDMA, in which case we want to use
6660          * all the 4k bufs for the kernel.
6661          * if this was less than the update threshold, we could wait
6662          * a long time for an update.  Coded this way because we
6663          * sometimes change the update threshold for various reasons,
6664          * and we want this to remain robust.
6665          */
6666         if (dd->flags & QIB_HAS_SEND_DMA) {
6667                 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6668                 sbufs = updthresh > 3 ? updthresh : 3;
6669         } else {
6670                 dd->cspec->sdmabufcnt = 0;
6671                 sbufs = dd->piobcnt4k;
6672         }
6673         dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6674                 dd->cspec->sdmabufcnt;
6675         dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6676         dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6677         dd->last_pio = dd->cspec->lastbuf_for_pio;
6678         dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6679                 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6680 
6681         /*
6682          * If we have 16 user contexts, we will have 7 sbufs
6683          * per context, so reduce the update threshold to match.  We
6684          * want to update before we actually run out, at low pbufs/ctxt
6685          * so give ourselves some margin.
6686          */
6687         if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6688                 updthresh = dd->pbufsctxt - 2;
6689         dd->cspec->updthresh_dflt = updthresh;
6690         dd->cspec->updthresh = updthresh;
6691 
6692         /* before full enable, no interrupts, no locking needed */
6693         dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6694                              << SYM_LSB(SendCtrl, AvailUpdThld)) |
6695                         SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6696 
6697         dd->psxmitwait_supported = 1;
6698         dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6699 bail:
6700         if (!dd->ctxtcnt)
6701                 dd->ctxtcnt = 1; /* for other initialization code */
6702 
6703         return ret;
6704 }
6705 
6706 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6707                                         u32 *pbufnum)
6708 {
6709         u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6710         struct qib_devdata *dd = ppd->dd;
6711 
6712         /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6713         if (pbc & PBC_7322_VL15_SEND) {
6714                 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6715                 last = first;
6716         } else {
6717                 if ((plen + 1) > dd->piosize2kmax_dwords)
6718                         first = dd->piobcnt2k;
6719                 else
6720                         first = 0;
6721                 last = dd->cspec->lastbuf_for_pio;
6722         }
6723         return qib_getsendbuf_range(dd, pbufnum, first, last);
6724 }
6725 
6726 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6727                                      u32 start)
6728 {
6729         qib_write_kreg_port(ppd, krp_psinterval, intv);
6730         qib_write_kreg_port(ppd, krp_psstart, start);
6731 }
6732 
6733 /*
6734  * Must be called with sdma_lock held, or before init finished.
6735  */
6736 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6737 {
6738         qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6739 }
6740 
6741 /*
6742  * sdma_lock should be acquired before calling this routine
6743  */
6744 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6745 {
6746         u64 reg, reg1, reg2;
6747 
6748         reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6749         qib_dev_porterr(ppd->dd, ppd->port,
6750                 "SDMA senddmastatus: 0x%016llx\n", reg);
6751 
6752         reg = qib_read_kreg_port(ppd, krp_sendctrl);
6753         qib_dev_porterr(ppd->dd, ppd->port,
6754                 "SDMA sendctrl: 0x%016llx\n", reg);
6755 
6756         reg = qib_read_kreg_port(ppd, krp_senddmabase);
6757         qib_dev_porterr(ppd->dd, ppd->port,
6758                 "SDMA senddmabase: 0x%016llx\n", reg);
6759 
6760         reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6761         reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6762         reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6763         qib_dev_porterr(ppd->dd, ppd->port,
6764                 "SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6765                  reg, reg1, reg2);
6766 
6767         /* get bufuse bits, clear them, and print them again if non-zero */
6768         reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6769         qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6770         reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6771         qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6772         reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6773         qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6774         /* 0 and 1 should always be zero, so print as short form */
6775         qib_dev_porterr(ppd->dd, ppd->port,
6776                  "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6777                  reg, reg1, reg2);
6778         reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6779         reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6780         reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6781         /* 0 and 1 should always be zero, so print as short form */
6782         qib_dev_porterr(ppd->dd, ppd->port,
6783                  "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6784                  reg, reg1, reg2);
6785 
6786         reg = qib_read_kreg_port(ppd, krp_senddmatail);
6787         qib_dev_porterr(ppd->dd, ppd->port,
6788                 "SDMA senddmatail: 0x%016llx\n", reg);
6789 
6790         reg = qib_read_kreg_port(ppd, krp_senddmahead);
6791         qib_dev_porterr(ppd->dd, ppd->port,
6792                 "SDMA senddmahead: 0x%016llx\n", reg);
6793 
6794         reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6795         qib_dev_porterr(ppd->dd, ppd->port,
6796                 "SDMA senddmaheadaddr: 0x%016llx\n", reg);
6797 
6798         reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6799         qib_dev_porterr(ppd->dd, ppd->port,
6800                 "SDMA senddmalengen: 0x%016llx\n", reg);
6801 
6802         reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6803         qib_dev_porterr(ppd->dd, ppd->port,
6804                 "SDMA senddmadesccnt: 0x%016llx\n", reg);
6805 
6806         reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6807         qib_dev_porterr(ppd->dd, ppd->port,
6808                 "SDMA senddmaidlecnt: 0x%016llx\n", reg);
6809 
6810         reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6811         qib_dev_porterr(ppd->dd, ppd->port,
6812                 "SDMA senddmapriorityhld: 0x%016llx\n", reg);
6813 
6814         reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6815         qib_dev_porterr(ppd->dd, ppd->port,
6816                 "SDMA senddmareloadcnt: 0x%016llx\n", reg);
6817 
6818         dump_sdma_state(ppd);
6819 }
6820 
6821 static struct sdma_set_state_action sdma_7322_action_table[] = {
6822         [qib_sdma_state_s00_hw_down] = {
6823                 .go_s99_running_tofalse = 1,
6824                 .op_enable = 0,
6825                 .op_intenable = 0,
6826                 .op_halt = 0,
6827                 .op_drain = 0,
6828         },
6829         [qib_sdma_state_s10_hw_start_up_wait] = {
6830                 .op_enable = 0,
6831                 .op_intenable = 1,
6832                 .op_halt = 1,
6833                 .op_drain = 0,
6834         },
6835         [qib_sdma_state_s20_idle] = {
6836                 .op_enable = 1,
6837                 .op_intenable = 1,
6838                 .op_halt = 1,
6839                 .op_drain = 0,
6840         },
6841         [qib_sdma_state_s30_sw_clean_up_wait] = {
6842                 .op_enable = 0,
6843                 .op_intenable = 1,
6844                 .op_halt = 1,
6845                 .op_drain = 0,
6846         },
6847         [qib_sdma_state_s40_hw_clean_up_wait] = {
6848                 .op_enable = 1,
6849                 .op_intenable = 1,
6850                 .op_halt = 1,
6851                 .op_drain = 0,
6852         },
6853         [qib_sdma_state_s50_hw_halt_wait] = {
6854                 .op_enable = 1,
6855                 .op_intenable = 1,
6856                 .op_halt = 1,
6857                 .op_drain = 1,
6858         },
6859         [qib_sdma_state_s99_running] = {
6860                 .op_enable = 1,
6861                 .op_intenable = 1,
6862                 .op_halt = 0,
6863                 .op_drain = 0,
6864                 .go_s99_running_totrue = 1,
6865         },
6866 };
6867 
6868 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6869 {
6870         ppd->sdma_state.set_state_action = sdma_7322_action_table;
6871 }
6872 
6873 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6874 {
6875         struct qib_devdata *dd = ppd->dd;
6876         unsigned lastbuf, erstbuf;
6877         u64 senddmabufmask[3] = { 0 };
6878         int n, ret = 0;
6879 
6880         qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6881         qib_sdma_7322_setlengen(ppd);
6882         qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6883         qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6884         qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6885         qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6886 
6887         if (dd->num_pports)
6888                 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6889         else
6890                 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6891         erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6892                 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6893                 dd->cspec->sdmabufcnt);
6894         lastbuf = erstbuf + n;
6895 
6896         ppd->sdma_state.first_sendbuf = erstbuf;
6897         ppd->sdma_state.last_sendbuf = lastbuf;
6898         for (; erstbuf < lastbuf; ++erstbuf) {
6899                 unsigned word = erstbuf / BITS_PER_LONG;
6900                 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6901 
6902                 senddmabufmask[word] |= 1ULL << bit;
6903         }
6904         qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6905         qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6906         qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6907         return ret;
6908 }
6909 
6910 /* sdma_lock must be held */
6911 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6912 {
6913         struct qib_devdata *dd = ppd->dd;
6914         int sane;
6915         int use_dmahead;
6916         u16 swhead;
6917         u16 swtail;
6918         u16 cnt;
6919         u16 hwhead;
6920 
6921         use_dmahead = __qib_sdma_running(ppd) &&
6922                 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6923 retry:
6924         hwhead = use_dmahead ?
6925                 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6926                 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6927 
6928         swhead = ppd->sdma_descq_head;
6929         swtail = ppd->sdma_descq_tail;
6930         cnt = ppd->sdma_descq_cnt;
6931 
6932         if (swhead < swtail)
6933                 /* not wrapped */
6934                 sane = (hwhead >= swhead) & (hwhead <= swtail);
6935         else if (swhead > swtail)
6936                 /* wrapped around */
6937                 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6938                         (hwhead <= swtail);
6939         else
6940                 /* empty */
6941                 sane = (hwhead == swhead);
6942 
6943         if (unlikely(!sane)) {
6944                 if (use_dmahead) {
6945                         /* try one more time, directly from the register */
6946                         use_dmahead = 0;
6947                         goto retry;
6948                 }
6949                 /* proceed as if no progress */
6950                 hwhead = swhead;
6951         }
6952 
6953         return hwhead;
6954 }
6955 
6956 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6957 {
6958         u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6959 
6960         return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6961                (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6962                !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6963                !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6964 }
6965 
6966 /*
6967  * Compute the amount of delay before sending the next packet if the
6968  * port's send rate differs from the static rate set for the QP.
6969  * The delay affects the next packet and the amount of the delay is
6970  * based on the length of the this packet.
6971  */
6972 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6973                                    u8 srate, u8 vl)
6974 {
6975         u8 snd_mult = ppd->delay_mult;
6976         u8 rcv_mult = ib_rate_to_delay[srate];
6977         u32 ret;
6978 
6979         ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6980 
6981         /* Indicate VL15, else set the VL in the control word */
6982         if (vl == 15)
6983                 ret |= PBC_7322_VL15_SEND_CTRL;
6984         else
6985                 ret |= vl << PBC_VL_NUM_LSB;
6986         ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6987 
6988         return ret;
6989 }
6990 
6991 /*
6992  * Enable the per-port VL15 send buffers for use.
6993  * They follow the rest of the buffers, without a config parameter.
6994  * This was in initregs, but that is done before the shadow
6995  * is set up, and this has to be done after the shadow is
6996  * set up.
6997  */
6998 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6999 {
7000         unsigned vl15bufs;
7001 
7002         vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7003         qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7004                                TXCHK_CHG_TYPE_KERN, NULL);
7005 }
7006 
7007 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7008 {
7009         if (rcd->ctxt < NUM_IB_PORTS) {
7010                 if (rcd->dd->num_pports > 1) {
7011                         rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7012                         rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7013                 } else {
7014                         rcd->rcvegrcnt = KCTXT0_EGRCNT;
7015                         rcd->rcvegr_tid_base = 0;
7016                 }
7017         } else {
7018                 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7019                 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7020                         (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7021         }
7022 }
7023 
7024 #define QTXSLEEPS 5000
7025 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7026                                   u32 len, u32 which, struct qib_ctxtdata *rcd)
7027 {
7028         int i;
7029         const int last = start + len - 1;
7030         const int lastr = last / BITS_PER_LONG;
7031         u32 sleeps = 0;
7032         int wait = rcd != NULL;
7033         unsigned long flags;
7034 
7035         while (wait) {
7036                 unsigned long shadow = 0;
7037                 int cstart, previ = -1;
7038 
7039                 /*
7040                  * when flipping from kernel to user, we can't change
7041                  * the checking type if the buffer is allocated to the
7042                  * driver.   It's OK the other direction, because it's
7043                  * from close, and we have just disarm'ed all the
7044                  * buffers.  All the kernel to kernel changes are also
7045                  * OK.
7046                  */
7047                 for (cstart = start; cstart <= last; cstart++) {
7048                         i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7049                                 / BITS_PER_LONG;
7050                         if (i != previ) {
7051                                 shadow = (unsigned long)
7052                                         le64_to_cpu(dd->pioavailregs_dma[i]);
7053                                 previ = i;
7054                         }
7055                         if (test_bit(((2 * cstart) +
7056                                       QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7057                                      % BITS_PER_LONG, &shadow))
7058                                 break;
7059                 }
7060 
7061                 if (cstart > last)
7062                         break;
7063 
7064                 if (sleeps == QTXSLEEPS)
7065                         break;
7066                 /* make sure we see an updated copy next time around */
7067                 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7068                 sleeps++;
7069                 msleep(20);
7070         }
7071 
7072         switch (which) {
7073         case TXCHK_CHG_TYPE_DIS1:
7074                 /*
7075                  * disable checking on a range; used by diags; just
7076                  * one buffer, but still written generically
7077                  */
7078                 for (i = start; i <= last; i++)
7079                         clear_bit(i, dd->cspec->sendchkenable);
7080                 break;
7081 
7082         case TXCHK_CHG_TYPE_ENAB1:
7083                 /*
7084                  * (re)enable checking on a range; used by diags; just
7085                  * one buffer, but still written generically; read
7086                  * scratch to be sure buffer actually triggered, not
7087                  * just flushed from processor.
7088                  */
7089                 qib_read_kreg32(dd, kr_scratch);
7090                 for (i = start; i <= last; i++)
7091                         set_bit(i, dd->cspec->sendchkenable);
7092                 break;
7093 
7094         case TXCHK_CHG_TYPE_KERN:
7095                 /* usable by kernel */
7096                 for (i = start; i <= last; i++) {
7097                         set_bit(i, dd->cspec->sendibchk);
7098                         clear_bit(i, dd->cspec->sendgrhchk);
7099                 }
7100                 spin_lock_irqsave(&dd->uctxt_lock, flags);
7101                 /* see if we need to raise avail update threshold */
7102                 for (i = dd->first_user_ctxt;
7103                      dd->cspec->updthresh != dd->cspec->updthresh_dflt
7104                      && i < dd->cfgctxts; i++)
7105                         if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7106                            ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7107                            < dd->cspec->updthresh_dflt)
7108                                 break;
7109                 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7110                 if (i == dd->cfgctxts) {
7111                         spin_lock_irqsave(&dd->sendctrl_lock, flags);
7112                         dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7113                         dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7114                         dd->sendctrl |= (dd->cspec->updthresh &
7115                                          SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7116                                            SYM_LSB(SendCtrl, AvailUpdThld);
7117                         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7118                         sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7119                 }
7120                 break;
7121 
7122         case TXCHK_CHG_TYPE_USER:
7123                 /* for user process */
7124                 for (i = start; i <= last; i++) {
7125                         clear_bit(i, dd->cspec->sendibchk);
7126                         set_bit(i, dd->cspec->sendgrhchk);
7127                 }
7128                 spin_lock_irqsave(&dd->sendctrl_lock, flags);
7129                 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7130                         / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7131                         dd->cspec->updthresh = (rcd->piocnt /
7132                                                 rcd->subctxt_cnt) - 1;
7133                         dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7134                         dd->sendctrl |= (dd->cspec->updthresh &
7135                                         SYM_RMASK(SendCtrl, AvailUpdThld))
7136                                         << SYM_LSB(SendCtrl, AvailUpdThld);
7137                         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7138                         sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7139                 } else
7140                         spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7141                 break;
7142 
7143         default:
7144                 break;
7145         }
7146 
7147         for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7148                 qib_write_kreg(dd, kr_sendcheckmask + i,
7149                                dd->cspec->sendchkenable[i]);
7150 
7151         for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7152                 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7153                                dd->cspec->sendgrhchk[i]);
7154                 qib_write_kreg(dd, kr_sendibpktmask + i,
7155                                dd->cspec->sendibchk[i]);
7156         }
7157 
7158         /*
7159          * Be sure whatever we did was seen by the chip and acted upon,
7160          * before we return.  Mostly important for which >= 2.
7161          */
7162         qib_read_kreg32(dd, kr_scratch);
7163 }
7164 
7165 
7166 /* useful for trigger analyzers, etc. */
7167 static void writescratch(struct qib_devdata *dd, u32 val)
7168 {
7169         qib_write_kreg(dd, kr_scratch, val);
7170 }
7171 
7172 /* Dummy for now, use chip regs soon */
7173 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7174 {
7175         return -ENXIO;
7176 }
7177 
7178 /**
7179  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7180  * @dev: the pci_dev for qlogic_ib device
7181  * @ent: pci_device_id struct for this dev
7182  *
7183  * Also allocates, inits, and returns the devdata struct for this
7184  * device instance
7185  *
7186  * This is global, and is called directly at init to set up the
7187  * chip-specific function pointers for later use.
7188  */
7189 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7190                                            const struct pci_device_id *ent)
7191 {
7192         struct qib_devdata *dd;
7193         int ret, i;
7194         u32 tabsize, actual_cnt = 0;
7195 
7196         dd = qib_alloc_devdata(pdev,
7197                 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7198                 sizeof(struct qib_chip_specific) +
7199                 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7200         if (IS_ERR(dd))
7201                 goto bail;
7202 
7203         dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7204         dd->f_cleanup           = qib_setup_7322_cleanup;
7205         dd->f_clear_tids        = qib_7322_clear_tids;
7206         dd->f_free_irq          = qib_7322_free_irq;
7207         dd->f_get_base_info     = qib_7322_get_base_info;
7208         dd->f_get_msgheader     = qib_7322_get_msgheader;
7209         dd->f_getsendbuf        = qib_7322_getsendbuf;
7210         dd->f_gpio_mod          = gpio_7322_mod;
7211         dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7212         dd->f_hdrqempty         = qib_7322_hdrqempty;
7213         dd->f_ib_updown         = qib_7322_ib_updown;
7214         dd->f_init_ctxt         = qib_7322_init_ctxt;
7215         dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7216         dd->f_intr_fallback     = qib_7322_intr_fallback;
7217         dd->f_late_initreg      = qib_late_7322_initreg;
7218         dd->f_setpbc_control    = qib_7322_setpbc_control;
7219         dd->f_portcntr          = qib_portcntr_7322;
7220         dd->f_put_tid           = qib_7322_put_tid;
7221         dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7222         dd->f_rcvctrl           = rcvctrl_7322_mod;
7223         dd->f_read_cntrs        = qib_read_7322cntrs;
7224         dd->f_read_portcntrs    = qib_read_7322portcntrs;
7225         dd->f_reset             = qib_do_7322_reset;
7226         dd->f_init_sdma_regs    = init_sdma_7322_regs;
7227         dd->f_sdma_busy         = qib_sdma_7322_busy;
7228         dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7229         dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7230         dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7231         dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7232         dd->f_sendctrl          = sendctrl_7322_mod;
7233         dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7234         dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7235         dd->f_iblink_state      = qib_7322_iblink_state;
7236         dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7237         dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7238         dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7239         dd->f_set_ib_loopback   = qib_7322_set_loopback;
7240         dd->f_get_ib_table      = qib_7322_get_ib_table;
7241         dd->f_set_ib_table      = qib_7322_set_ib_table;
7242         dd->f_set_intr_state    = qib_7322_set_intr_state;
7243         dd->f_setextled         = qib_setup_7322_setextled;
7244         dd->f_txchk_change      = qib_7322_txchk_change;
7245         dd->f_update_usrhead    = qib_update_7322_usrhead;
7246         dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7247         dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7248         dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7249         dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7250         dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7251         dd->f_writescratch      = writescratch;
7252         dd->f_tempsense_rd      = qib_7322_tempsense_rd;
7253 #ifdef CONFIG_INFINIBAND_QIB_DCA
7254         dd->f_notify_dca        = qib_7322_notify_dca;
7255 #endif
7256         /*
7257          * Do remaining PCIe setup and save PCIe values in dd.
7258          * Any error printing is already done by the init code.
7259          * On return, we have the chip mapped, but chip registers
7260          * are not set up until start of qib_init_7322_variables.
7261          */
7262         ret = qib_pcie_ddinit(dd, pdev, ent);
7263         if (ret < 0)
7264                 goto bail_free;
7265 
7266         /* initialize chip-specific variables */
7267         ret = qib_init_7322_variables(dd);
7268         if (ret)
7269                 goto bail_cleanup;
7270 
7271         if (qib_mini_init || !dd->num_pports)
7272                 goto bail;
7273 
7274         /*
7275          * Determine number of vectors we want; depends on port count
7276          * and number of configured kernel receive queues actually used.
7277          * Should also depend on whether sdma is enabled or not, but
7278          * that's such a rare testing case it's not worth worrying about.
7279          */
7280         tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7281         for (i = 0; i < tabsize; i++)
7282                 if ((i < ARRAY_SIZE(irq_table) &&
7283                      irq_table[i].port <= dd->num_pports) ||
7284                     (i >= ARRAY_SIZE(irq_table) &&
7285                      dd->rcd[i - ARRAY_SIZE(irq_table)]))
7286                         actual_cnt++;
7287         /* reduce by ctxt's < 2 */
7288         if (qib_krcvq01_no_msi)
7289                 actual_cnt -= dd->num_pports;
7290 
7291         tabsize = actual_cnt;
7292         dd->cspec->msix_entries = kcalloc(tabsize,
7293                                           sizeof(struct qib_msix_entry),
7294                                           GFP_KERNEL);
7295         if (!dd->cspec->msix_entries)
7296                 tabsize = 0;
7297 
7298         if (qib_pcie_params(dd, 8, &tabsize))
7299                 qib_dev_err(dd,
7300                         "Failed to setup PCIe or interrupts; continuing anyway\n");
7301         /* may be less than we wanted, if not enough available */
7302         dd->cspec->num_msix_entries = tabsize;
7303 
7304         /* setup interrupt handler */
7305         qib_setup_7322_interrupt(dd, 1);
7306 
7307         /* clear diagctrl register, in case diags were running and crashed */
7308         qib_write_kreg(dd, kr_hwdiagctrl, 0);
7309 #ifdef CONFIG_INFINIBAND_QIB_DCA
7310         if (!dca_add_requester(&pdev->dev)) {
7311                 qib_devinfo(dd->pcidev, "DCA enabled\n");
7312                 dd->flags |= QIB_DCA_ENABLED;
7313                 qib_setup_dca(dd);
7314         }
7315 #endif
7316         goto bail;
7317 
7318 bail_cleanup:
7319         qib_pcie_ddcleanup(dd);
7320 bail_free:
7321         qib_free_devdata(dd);
7322         dd = ERR_PTR(ret);
7323 bail:
7324         return dd;
7325 }
7326 
7327 /*
7328  * Set the table entry at the specified index from the table specifed.
7329  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7330  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7331  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7332  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7333  */
7334 #define DDS_ENT_AMP_LSB 14
7335 #define DDS_ENT_MAIN_LSB 9
7336 #define DDS_ENT_POST_LSB 5
7337 #define DDS_ENT_PRE_XTRA_LSB 3
7338 #define DDS_ENT_PRE_LSB 0
7339 
7340 /*
7341  * Set one entry in the TxDDS table for spec'd port
7342  * ridx picks one of the entries, while tp points
7343  * to the appropriate table entry.
7344  */
7345 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7346                       const struct txdds_ent *tp)
7347 {
7348         struct qib_devdata *dd = ppd->dd;
7349         u32 pack_ent;
7350         int regidx;
7351 
7352         /* Get correct offset in chip-space, and in source table */
7353         regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7354         /*
7355          * We do not use qib_write_kreg_port() because it was intended
7356          * only for registers in the lower "port specific" pages.
7357          * So do index calculation  by hand.
7358          */
7359         if (ppd->hw_pidx)
7360                 regidx += (dd->palign / sizeof(u64));
7361 
7362         pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7363         pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7364         pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7365         pack_ent |= tp->post << DDS_ENT_POST_LSB;
7366         qib_write_kreg(dd, regidx, pack_ent);
7367         /* Prevent back-to-back writes by hitting scratch */
7368         qib_write_kreg(ppd->dd, kr_scratch, 0);
7369 }
7370 
7371 static const struct vendor_txdds_ent vendor_txdds[] = {
7372         { /* Amphenol 1m 30awg NoEq */
7373                 { 0x41, 0x50, 0x48 }, "584470002       ",
7374                 { 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7375         },
7376         { /* Amphenol 3m 28awg NoEq */
7377                 { 0x41, 0x50, 0x48 }, "584470004       ",
7378                 {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7379         },
7380         { /* Finisar 3m OM2 Optical */
7381                 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7382                 {  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7383         },
7384         { /* Finisar 30m OM2 Optical */
7385                 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7386                 {  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7387         },
7388         { /* Finisar Default OM2 Optical */
7389                 { 0x00, 0x90, 0x65 }, NULL,
7390                 {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7391         },
7392         { /* Gore 1m 30awg NoEq */
7393                 { 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7394                 {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7395         },
7396         { /* Gore 2m 30awg NoEq */
7397                 { 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7398                 {  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7399         },
7400         { /* Gore 1m 28awg NoEq */
7401                 { 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7402                 {  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7403         },
7404         { /* Gore 3m 28awg NoEq */
7405                 { 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7406                 {  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7407         },
7408         { /* Gore 5m 24awg Eq */
7409                 { 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7410                 {  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7411         },
7412         { /* Gore 7m 24awg Eq */
7413                 { 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7414                 {  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7415         },
7416         { /* Gore 5m 26awg Eq */
7417                 { 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7418                 {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7419         },
7420         { /* Gore 7m 26awg Eq */
7421                 { 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7422                 {  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7423         },
7424         { /* Intersil 12m 24awg Active */
7425                 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7426                 {  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7427         },
7428         { /* Intersil 10m 28awg Active */
7429                 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7430                 {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7431         },
7432         { /* Intersil 7m 30awg Active */
7433                 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7434                 {  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7435         },
7436         { /* Intersil 5m 32awg Active */
7437                 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7438                 {  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7439         },
7440         { /* Intersil Default Active */
7441                 { 0x00, 0x30, 0xB4 }, NULL,
7442                 {  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7443         },
7444         { /* Luxtera 20m Active Optical */
7445                 { 0x00, 0x25, 0x63 }, NULL,
7446                 {  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7447         },
7448         { /* Molex 1M Cu loopback */
7449                 { 0x00, 0x09, 0x3A }, "74763-0025      ",
7450                 {  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7451         },
7452         { /* Molex 2m 28awg NoEq */
7453                 { 0x00, 0x09, 0x3A }, "74757-2201      ",
7454                 {  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7455         },
7456 };
7457 
7458 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7459         /* amp, pre, main, post */
7460         {  2, 2, 15,  6 },      /* Loopback */
7461         {  0, 0,  0,  1 },      /*  2 dB */
7462         {  0, 0,  0,  2 },      /*  3 dB */
7463         {  0, 0,  0,  3 },      /*  4 dB */
7464         {  0, 0,  0,  4 },      /*  5 dB */
7465         {  0, 0,  0,  5 },      /*  6 dB */
7466         {  0, 0,  0,  6 },      /*  7 dB */
7467         {  0, 0,  0,  7 },      /*  8 dB */
7468         {  0, 0,  0,  8 },      /*  9 dB */
7469         {  0, 0,  0,  9 },      /* 10 dB */
7470         {  0, 0,  0, 10 },      /* 11 dB */
7471         {  0, 0,  0, 11 },      /* 12 dB */
7472         {  0, 0,  0, 12 },      /* 13 dB */
7473         {  0, 0,  0, 13 },      /* 14 dB */
7474         {  0, 0,  0, 14 },      /* 15 dB */
7475         {  0, 0,  0, 15 },      /* 16 dB */
7476 };
7477 
7478 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7479         /* amp, pre, main, post */
7480         {  2, 2, 15,  6 },      /* Loopback */
7481         {  0, 0,  0,  8 },      /*  2 dB */
7482         {  0, 0,  0,  8 },      /*  3 dB */
7483         {  0, 0,  0,  9 },      /*  4 dB */
7484         {  0, 0,  0,  9 },      /*  5 dB */
7485         {  0, 0,  0, 10 },      /*  6 dB */
7486         {  0, 0,  0, 10 },      /*  7 dB */
7487         {  0, 0,  0, 11 },      /*  8 dB */
7488         {  0, 0,  0, 11 },      /*  9 dB */
7489         {  0, 0,  0, 12 },      /* 10 dB */
7490         {  0, 0,  0, 12 },      /* 11 dB */
7491         {  0, 0,  0, 13 },      /* 12 dB */
7492         {  0, 0,  0, 13 },      /* 13 dB */
7493         {  0, 0,  0, 14 },      /* 14 dB */
7494         {  0, 0,  0, 14 },      /* 15 dB */
7495         {  0, 0,  0, 15 },      /* 16 dB */
7496 };
7497 
7498 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7499         /* amp, pre, main, post */
7500         {  2, 2, 15,  6 },      /* Loopback */
7501         {  0, 1,  0,  7 },      /*  2 dB (also QMH7342) */
7502         {  0, 1,  0,  9 },      /*  3 dB (also QMH7342) */
7503         {  0, 1,  0, 11 },      /*  4 dB */
7504         {  0, 1,  0, 13 },      /*  5 dB */
7505         {  0, 1,  0, 15 },      /*  6 dB */
7506         {  0, 1,  3, 15 },      /*  7 dB */
7507         {  0, 1,  7, 15 },      /*  8 dB */
7508         {  0, 1,  7, 15 },      /*  9 dB */
7509         {  0, 1,  8, 15 },      /* 10 dB */
7510         {  0, 1,  9, 15 },      /* 11 dB */
7511         {  0, 1, 10, 15 },      /* 12 dB */
7512         {  0, 2,  6, 15 },      /* 13 dB */
7513         {  0, 2,  7, 15 },      /* 14 dB */
7514         {  0, 2,  8, 15 },      /* 15 dB */
7515         {  0, 2,  9, 15 },      /* 16 dB */
7516 };
7517 
7518 /*
7519  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7520  * These are mostly used for mez cards going through connectors
7521  * and backplane traces, but can be used to add other "unusual"
7522  * table values as well.
7523  */
7524 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7525         /* amp, pre, main, post */
7526         {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7527         {  0, 0, 0,  1 },       /* QMH7342 backplane settings */
7528         {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7529         {  0, 0, 0,  2 },       /* QMH7342 backplane settings */
7530         {  0, 0, 0,  3 },       /* QMH7342 backplane settings */
7531         {  0, 0, 0,  4 },       /* QMH7342 backplane settings */
7532         {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7533         {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7534         {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7535         {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7536         {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7537         {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7538         {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7539         {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7540         {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7541         {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7542         {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7543         {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7544 };
7545 
7546 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7547         /* amp, pre, main, post */
7548         {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7549         {  0, 0, 0,  7 },       /* QMH7342 backplane settings */
7550         {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7551         {  0, 0, 0,  8 },       /* QMH7342 backplane settings */
7552         {  0, 0, 0,  9 },       /* QMH7342 backplane settings */
7553         {  0, 0, 0, 10 },       /* QMH7342 backplane settings */
7554         {  0, 1, 4, 15 },       /* QME7342 backplane settings 1.0 */
7555         {  0, 1, 3, 15 },       /* QME7342 backplane settings 1.0 */
7556         {  0, 1, 0, 12 },       /* QME7342 backplane settings 1.0 */
7557         {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.0 */
7558         {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.0 */
7559         {  0, 1, 0, 14 },       /* QME7342 backplane settings 1.0 */
7560         {  0, 1, 2, 15 },       /* QME7342 backplane settings 1.0 */
7561         {  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7562         {  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7563         {  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7564         {  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7565         {  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7566 };
7567 
7568 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7569         /* amp, pre, main, post */
7570         {  0, 1,  0,  4 },      /* QMH7342 backplane settings */
7571         {  0, 1,  0,  5 },      /* QMH7342 backplane settings */
7572         {  0, 1,  0,  6 },      /* QMH7342 backplane settings */
7573         {  0, 1,  0,  8 },      /* QMH7342 backplane settings */
7574         {  0, 1,  0, 10 },      /* QMH7342 backplane settings */
7575         {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
7576         {  0, 1,  4, 15 },      /* QME7342 backplane settings 1.0 */
7577         {  0, 1,  3, 15 },      /* QME7342 backplane settings 1.0 */
7578         {  0, 1,  0, 12 },      /* QME7342 backplane settings 1.0 */
7579         {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.0 */
7580         {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.0 */
7581         {  0, 1,  0, 14 },      /* QME7342 backplane settings 1.0 */
7582         {  0, 1,  2, 15 },      /* QME7342 backplane settings 1.0 */
7583         {  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7584         {  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7585         {  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7586         {  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7587         {  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7588 };
7589 
7590 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7591         /* amp, pre, main, post */
7592         { 0, 0, 0, 0 },         /* QME7342 mfg settings */
7593         { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7594 };
7595 
7596 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7597                                                unsigned atten)
7598 {
7599         /*
7600          * The attenuation table starts at 2dB for entry 1,
7601          * with entry 0 being the loopback entry.
7602          */
7603         if (atten <= 2)
7604                 atten = 1;
7605         else if (atten > TXDDS_TABLE_SZ)
7606                 atten = TXDDS_TABLE_SZ - 1;
7607         else
7608                 atten--;
7609         return txdds + atten;
7610 }
7611 
7612 /*
7613  * if override is set, the module parameter txselect has a value
7614  * for this specific port, so use it, rather than our normal mechanism.
7615  */
7616 static void find_best_ent(struct qib_pportdata *ppd,
7617                           const struct txdds_ent **sdr_dds,
7618                           const struct txdds_ent **ddr_dds,
7619                           const struct txdds_ent **qdr_dds, int override)
7620 {
7621         struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7622         int idx;
7623 
7624         /* Search table of known cables */
7625         for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7626                 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7627 
7628                 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7629                     (!v->partnum ||
7630                      !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7631                         *sdr_dds = &v->sdr;
7632                         *ddr_dds = &v->ddr;
7633                         *qdr_dds = &v->qdr;
7634                         return;
7635                 }
7636         }
7637 
7638         /* Active cables don't have attenuation so we only set SERDES
7639          * settings to account for the attenuation of the board traces. */
7640         if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7641                 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7642                 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7643                 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7644                 return;
7645         }
7646 
7647         if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7648                                                       qd->atten[1])) {
7649                 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7650                 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7651                 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7652                 return;
7653         } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7654                 /*
7655                  * If we have no (or incomplete) data from the cable
7656                  * EEPROM, or no QSFP, or override is set, use the
7657                  * module parameter value to index into the attentuation
7658                  * table.
7659                  */
7660                 idx = ppd->cpspec->no_eep;
7661                 *sdr_dds = &txdds_sdr[idx];
7662                 *ddr_dds = &txdds_ddr[idx];
7663                 *qdr_dds = &txdds_qdr[idx];
7664         } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7665                 /* similar to above, but index into the "extra" table. */
7666                 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7667                 *sdr_dds = &txdds_extra_sdr[idx];
7668                 *ddr_dds = &txdds_extra_ddr[idx];
7669                 *qdr_dds = &txdds_extra_qdr[idx];
7670         } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7671                    ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7672                                           TXDDS_MFG_SZ)) {
7673                 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7674                 pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7675                         ppd->dd->unit, ppd->port, idx);
7676                 *sdr_dds = &txdds_extra_mfg[idx];
7677                 *ddr_dds = &txdds_extra_mfg[idx];
7678                 *qdr_dds = &txdds_extra_mfg[idx];
7679         } else {
7680                 /* this shouldn't happen, it's range checked */
7681                 *sdr_dds = txdds_sdr + qib_long_atten;
7682                 *ddr_dds = txdds_ddr + qib_long_atten;
7683                 *qdr_dds = txdds_qdr + qib_long_atten;
7684         }
7685 }
7686 
7687 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7688 {
7689         const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7690         struct txdds_ent *dds;
7691         int idx;
7692         int single_ent = 0;
7693 
7694         find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7695 
7696         /* for mez cards or override, use the selected value for all entries */
7697         if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7698                 single_ent = 1;
7699 
7700         /* Fill in the first entry with the best entry found. */
7701         set_txdds(ppd, 0, sdr_dds);
7702         set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7703         set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7704         if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7705                 QIBL_LINKACTIVE)) {
7706                 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7707                                            QIB_IB_QDR ?  qdr_dds :
7708                                            (ppd->link_speed_active ==
7709                                             QIB_IB_DDR ? ddr_dds : sdr_dds));
7710                 write_tx_serdes_param(ppd, dds);
7711         }
7712 
7713         /* Fill in the remaining entries with the default table values. */
7714         for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7715                 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7716                 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7717                           single_ent ? ddr_dds : txdds_ddr + idx);
7718                 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7719                           single_ent ? qdr_dds : txdds_qdr + idx);
7720         }
7721 }
7722 
7723 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7724 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7725 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7726 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7727 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7728 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7729 #define AHB_TRANS_TRIES 10
7730 
7731 /*
7732  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7733  * 5=subsystem which is why most calls have "chan + chan >> 1"
7734  * for the channel argument.
7735  */
7736 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7737                     u32 data, u32 mask)
7738 {
7739         u32 rd_data, wr_data, sz_mask;
7740         u64 trans, acc, prev_acc;
7741         u32 ret = 0xBAD0BAD;
7742         int tries;
7743 
7744         prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7745         /* From this point on, make sure we return access */
7746         acc = (quad << 1) | 1;
7747         qib_write_kreg(dd, KR_AHB_ACC, acc);
7748 
7749         for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7750                 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7751                 if (trans & AHB_TRANS_RDY)
7752                         break;
7753         }
7754         if (tries >= AHB_TRANS_TRIES) {
7755                 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7756                 goto bail;
7757         }
7758 
7759         /* If mask is not all 1s, we need to read, but different SerDes
7760          * entities have different sizes
7761          */
7762         sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7763         wr_data = data & mask & sz_mask;
7764         if ((~mask & sz_mask) != 0) {
7765                 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7766                 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7767 
7768                 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7769                         trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7770                         if (trans & AHB_TRANS_RDY)
7771                                 break;
7772                 }
7773                 if (tries >= AHB_TRANS_TRIES) {
7774                         qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7775                                     AHB_TRANS_TRIES);
7776                         goto bail;
7777                 }
7778                 /* Re-read in case host split reads and read data first */
7779                 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7780                 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7781                 wr_data |= (rd_data & ~mask & sz_mask);
7782         }
7783 
7784         /* If mask is not zero, we need to write. */
7785         if (mask & sz_mask) {
7786                 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7787                 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7788                 trans |= AHB_WR;
7789                 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7790 
7791                 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7792                         trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7793                         if (trans & AHB_TRANS_RDY)
7794                                 break;
7795                 }
7796                 if (tries >= AHB_TRANS_TRIES) {
7797                         qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7798                                     AHB_TRANS_TRIES);
7799                         goto bail;
7800                 }
7801         }
7802         ret = wr_data;
7803 bail:
7804         qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7805         return ret;
7806 }
7807 
7808 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7809                              unsigned mask)
7810 {
7811         struct qib_devdata *dd = ppd->dd;
7812         int chan;
7813 
7814         for (chan = 0; chan < SERDES_CHANS; ++chan) {
7815                 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7816                         data, mask);
7817                 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7818                         0, 0);
7819         }
7820 }
7821 
7822 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7823 {
7824         u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7825         u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7826 
7827         if (enable && !state) {
7828                 pr_info("IB%u:%u Turning LOS on\n",
7829                         ppd->dd->unit, ppd->port);
7830                 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7831         } else if (!enable && state) {
7832                 pr_info("IB%u:%u Turning LOS off\n",
7833                         ppd->dd->unit, ppd->port);
7834                 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7835         }
7836         qib_write_kreg_port(ppd, krp_serdesctrl, data);
7837 }
7838 
7839 static int serdes_7322_init(struct qib_pportdata *ppd)
7840 {
7841         int ret = 0;
7842 
7843         if (ppd->dd->cspec->r1)
7844                 ret = serdes_7322_init_old(ppd);
7845         else
7846                 ret = serdes_7322_init_new(ppd);
7847         return ret;
7848 }
7849 
7850 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7851 {
7852         u32 le_val;
7853 
7854         /*
7855          * Initialize the Tx DDS tables.  Also done every QSFP event,
7856          * for adapters with QSFP
7857          */
7858         init_txdds_table(ppd, 0);
7859 
7860         /* ensure no tx overrides from earlier driver loads */
7861         qib_write_kreg_port(ppd, krp_tx_deemph_override,
7862                 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7863                 reset_tx_deemphasis_override));
7864 
7865         /* Patch some SerDes defaults to "Better for IB" */
7866         /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7867         ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7868 
7869         /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7870         ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7871         /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7872         ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7873 
7874         /* May be overridden in qsfp_7322_event */
7875         le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7876         ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7877 
7878         /* enable LE1 adaptation for all but QME, which is disabled */
7879         le_val = IS_QME(ppd->dd) ? 0 : 1;
7880         ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7881 
7882         /* Clear cmode-override, may be set from older driver */
7883         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7884 
7885         /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7886         ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7887 
7888         /* setup LoS params; these are subsystem, so chan == 5 */
7889         /* LoS filter threshold_count on, ch 0-3, set to 8 */
7890         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7891         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7892         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7893         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7894 
7895         /* LoS filter threshold_count off, ch 0-3, set to 4 */
7896         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7897         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7898         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7899         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7900 
7901         /* LoS filter select enabled */
7902         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7903 
7904         /* LoS target data:  SDR=4, DDR=2, QDR=1 */
7905         ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7906         ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7907         ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7908 
7909         serdes_7322_los_enable(ppd, 1);
7910 
7911         /* rxbistena; set 0 to avoid effects of it switch later */
7912         ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7913 
7914         /* Configure 4 DFE taps, and only they adapt */
7915         ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7916 
7917         /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7918         le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7919         ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7920 
7921         /*
7922          * Set receive adaptation mode.  SDR and DDR adaptation are
7923          * always on, and QDR is initially enabled; later disabled.
7924          */
7925         qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7926         qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7927         qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7928                             ppd->dd->cspec->r1 ?
7929                             QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7930         ppd->cpspec->qdr_dfe_on = 1;
7931 
7932         /* FLoop LOS gate: PPM filter  enabled */
7933         ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7934 
7935         /* rx offset center enabled */
7936         ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7937 
7938         if (!ppd->dd->cspec->r1) {
7939                 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7940                 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7941         }
7942 
7943         /* Set the frequency loop bandwidth to 15 */
7944         ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7945 
7946         return 0;
7947 }
7948 
7949 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7950 {
7951         unsigned long tend;
7952         u32 le_val, rxcaldone;
7953         int chan, chan_done = (1 << SERDES_CHANS) - 1;
7954 
7955         /* Clear cmode-override, may be set from older driver */
7956         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7957 
7958         /* ensure no tx overrides from earlier driver loads */
7959         qib_write_kreg_port(ppd, krp_tx_deemph_override,
7960                 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7961                 reset_tx_deemphasis_override));
7962 
7963         /* START OF LSI SUGGESTED SERDES BRINGUP */
7964         /* Reset - Calibration Setup */
7965         /*       Stop DFE adaptaion */
7966         ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7967         /*       Disable LE1 */
7968         ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7969         /*       Disable autoadapt for LE1 */
7970         ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7971         /*       Disable LE2 */
7972         ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7973         /*       Disable VGA */
7974         ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7975         /*       Disable AFE Offset Cancel */
7976         ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7977         /*       Disable Timing Loop */
7978         ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7979         /*       Disable Frequency Loop */
7980         ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7981         /*       Disable Baseline Wander Correction */
7982         ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7983         /*       Disable RX Calibration */
7984         ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7985         /*       Disable RX Offset Calibration */
7986         ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7987         /*       Select BB CDR */
7988         ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7989         /*       CDR Step Size */
7990         ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7991         /*       Enable phase Calibration */
7992         ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7993         /*       DFE Bandwidth [2:14-12] */
7994         ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7995         /*       DFE Config (4 taps only) */
7996         ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7997         /*       Gain Loop Bandwidth */
7998         if (!ppd->dd->cspec->r1) {
7999                 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8000                 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8001         } else {
8002                 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8003         }
8004         /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8005         /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8006         /*       Data Rate Select [5:7-6] (leave as default) */
8007         /*       RX Parallel Word Width [3:10-8] (leave as default) */
8008 
8009         /* RX REST */
8010         /*       Single- or Multi-channel reset */
8011         /*       RX Analog reset */
8012         /*       RX Digital reset */
8013         ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8014         msleep(20);
8015         /*       RX Analog reset */
8016         ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8017         msleep(20);
8018         /*       RX Digital reset */
8019         ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8020         msleep(20);
8021 
8022         /* setup LoS params; these are subsystem, so chan == 5 */
8023         /* LoS filter threshold_count on, ch 0-3, set to 8 */
8024         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8025         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8026         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8027         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8028 
8029         /* LoS filter threshold_count off, ch 0-3, set to 4 */
8030         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8031         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8032         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8033         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8034 
8035         /* LoS filter select enabled */
8036         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8037 
8038         /* LoS target data:  SDR=4, DDR=2, QDR=1 */
8039         ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8040         ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8041         ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8042 
8043         /* Turn on LOS on initial SERDES init */
8044         serdes_7322_los_enable(ppd, 1);
8045         /* FLoop LOS gate: PPM filter  enabled */
8046         ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8047 
8048         /* RX LATCH CALIBRATION */
8049         /*       Enable Eyefinder Phase Calibration latch */
8050         ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8051         /*       Enable RX Offset Calibration latch */
8052         ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8053         msleep(20);
8054         /*       Start Calibration */
8055         ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8056         tend = jiffies + msecs_to_jiffies(500);
8057         while (chan_done && !time_is_before_jiffies(tend)) {
8058                 msleep(20);
8059                 for (chan = 0; chan < SERDES_CHANS; ++chan) {
8060                         rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8061                                             (chan + (chan >> 1)),
8062                                             25, 0, 0);
8063                         if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8064                             (~chan_done & (1 << chan)) == 0)
8065                                 chan_done &= ~(1 << chan);
8066                 }
8067         }
8068         if (chan_done) {
8069                 pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8070                          IBSD(ppd->hw_pidx), chan_done);
8071         } else {
8072                 for (chan = 0; chan < SERDES_CHANS; ++chan) {
8073                         rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8074                                             (chan + (chan >> 1)),
8075                                             25, 0, 0);
8076                         if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8077                                 pr_info("Serdes %d chan %d calibration failed\n",
8078                                         IBSD(ppd->hw_pidx), chan);
8079                 }
8080         }
8081 
8082         /*       Turn off Calibration */
8083         ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8084         msleep(20);
8085 
8086         /* BRING RX UP */
8087         /*       Set LE2 value (May be overridden in qsfp_7322_event) */
8088         le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8089         ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8090         /*       Set LE2 Loop bandwidth */
8091         ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8092         /*       Enable LE2 */
8093         ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8094         msleep(20);
8095         /*       Enable H0 only */
8096         ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8097         /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8098         le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8099         ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8100         /*       Enable VGA */
8101         ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8102         msleep(20);
8103         /*       Set Frequency Loop Bandwidth */
8104         ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8105         /*       Enable Frequency Loop */
8106         ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8107         /*       Set Timing Loop Bandwidth */
8108         ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8109         /*       Enable Timing Loop */
8110         ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8111         msleep(50);
8112         /*       Enable DFE
8113          *       Set receive adaptation mode.  SDR and DDR adaptation are
8114          *       always on, and QDR is initially enabled; later disabled.
8115          */
8116         qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8117         qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8118         qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8119                             ppd->dd->cspec->r1 ?
8120                             QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8121         ppd->cpspec->qdr_dfe_on = 1;
8122         /*       Disable LE1  */
8123         ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8124         /*       Disable auto adapt for LE1 */
8125         ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8126         msleep(20);
8127         /*       Enable AFE Offset Cancel */
8128         ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8129         /*       Enable Baseline Wander Correction */
8130         ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8131         /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8132         ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8133         /* VGA output common mode */
8134         ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8135 
8136         /*
8137          * Initialize the Tx DDS tables.  Also done every QSFP event,
8138          * for adapters with QSFP
8139          */
8140         init_txdds_table(ppd, 0);
8141 
8142         return 0;
8143 }
8144 
8145 /* start adjust QMH serdes parameters */
8146 
8147 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8148 {
8149         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8150                 9, code << 9, 0x3f << 9);
8151 }
8152 
8153 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8154         int enable, u32 tapenable)
8155 {
8156         if (enable)
8157                 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8158                         1, 3 << 10, 0x1f << 10);
8159         else
8160                 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8161                         1, 0, 0x1f << 10);
8162 }
8163 
8164 /* Set clock to 1, 0, 1, 0 */
8165 static void clock_man(struct qib_pportdata *ppd, int chan)
8166 {
8167         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8168                 4, 0x4000, 0x4000);
8169         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8170                 4, 0, 0x4000);
8171         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8172                 4, 0x4000, 0x4000);
8173         ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8174                 4, 0, 0x4000);
8175 }
8176 
8177 /*
8178  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8179  * The caller must pass the settings appropriate for the current speed,
8180  * or not care if they are correct for the current speed.
8181  */
8182 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8183                                   struct txdds_ent *txdds)
8184 {
8185         u64 deemph;
8186 
8187         deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8188         /* field names for amp, main, post, pre, respectively */
8189         deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8190                     SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8191                     SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8192                     SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8193 
8194         deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8195                            tx_override_deemphasis_select);
8196         deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8197                     txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8198                                        txampcntl_d2a);
8199         deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8200                      txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8201                                    txc0_ena);
8202         deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8203                      txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8204                                     txcp1_ena);
8205         deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8206                      txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8207                                     txcn1_ena);
8208         qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8209 }
8210 
8211 /*
8212  * Set the parameters for mez cards on link bounce, so they are
8213  * always exactly what was requested.  Similar logic to init_txdds
8214  * but does just the serdes.
8215  */
8216 static void adj_tx_serdes(struct qib_pportdata *ppd)
8217 {
8218         const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8219         struct txdds_ent *dds;
8220 
8221         find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8222         dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8223                 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8224                                 ddr_dds : sdr_dds));
8225         write_tx_serdes_param(ppd, dds);
8226 }
8227 
8228 /* set QDR forced value for H1, if needed */
8229 static void force_h1(struct qib_pportdata *ppd)
8230 {
8231         int chan;
8232 
8233         ppd->cpspec->qdr_reforce = 0;
8234         if (!ppd->dd->cspec->r1)
8235                 return;
8236 
8237         for (chan = 0; chan < SERDES_CHANS; chan++) {
8238                 set_man_mode_h1(ppd, chan, 1, 0);
8239                 set_man_code(ppd, chan, ppd->cpspec->h1_val);
8240                 clock_man(ppd, chan);
8241                 set_man_mode_h1(ppd, chan, 0, 0);
8242         }
8243 }
8244 
8245 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8246 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8247 
8248 #define R_OPCODE_LSB 3
8249 #define R_OP_NOP 0
8250 #define R_OP_SHIFT 2
8251 #define R_OP_UPDATE 3
8252 #define R_TDI_LSB 2
8253 #define R_TDO_LSB 1
8254 #define R_RDY 1
8255 
8256 static int qib_r_grab(struct qib_devdata *dd)
8257 {
8258         u64 val = SJA_EN;
8259 
8260         qib_write_kreg(dd, kr_r_access, val);
8261         qib_read_kreg32(dd, kr_scratch);
8262         return 0;
8263 }
8264 
8265 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8266  * returns the current state of R_TDO
8267  */
8268 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8269 {
8270         u64 val;
8271         int timeout;
8272 
8273         for (timeout = 0; timeout < 100 ; ++timeout) {
8274                 val = qib_read_kreg32(dd, kr_r_access);
8275                 if (val & R_RDY)
8276                         return (val >> R_TDO_LSB) & 1;
8277         }
8278         return -1;
8279 }
8280 
8281 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8282                        int len, u8 *inp, u8 *outp)
8283 {
8284         u64 valbase, val;
8285         int ret, pos;
8286 
8287         valbase = SJA_EN | (bisten << BISTEN_LSB) |
8288                 (R_OP_SHIFT << R_OPCODE_LSB);
8289         ret = qib_r_wait_for_rdy(dd);
8290         if (ret < 0)
8291                 goto bail;
8292         for (pos = 0; pos < len; ++pos) {
8293                 val = valbase;
8294                 if (outp) {
8295                         outp[pos >> 3] &= ~(1 << (pos & 7));
8296                         outp[pos >> 3] |= (ret << (pos & 7));
8297                 }
8298                 if (inp) {
8299                         int tdi = inp[pos >> 3] >> (pos & 7);
8300 
8301                         val |= ((tdi & 1) << R_TDI_LSB);
8302                 }
8303                 qib_write_kreg(dd, kr_r_access, val);
8304                 qib_read_kreg32(dd, kr_scratch);
8305                 ret = qib_r_wait_for_rdy(dd);
8306                 if (ret < 0)
8307                         break;
8308         }
8309         /* Restore to NOP between operations. */
8310         val =  SJA_EN | (bisten << BISTEN_LSB);
8311         qib_write_kreg(dd, kr_r_access, val);
8312         qib_read_kreg32(dd, kr_scratch);
8313         ret = qib_r_wait_for_rdy(dd);
8314 
8315         if (ret >= 0)
8316                 ret = pos;
8317 bail:
8318         return ret;
8319 }
8320 
8321 static int qib_r_update(struct qib_devdata *dd, int bisten)
8322 {
8323         u64 val;
8324         int ret;
8325 
8326         val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8327         ret = qib_r_wait_for_rdy(dd);
8328         if (ret >= 0) {
8329                 qib_write_kreg(dd, kr_r_access, val);
8330                 qib_read_kreg32(dd, kr_scratch);
8331         }
8332         return ret;
8333 }
8334 
8335 #define BISTEN_PORT_SEL 15
8336 #define LEN_PORT_SEL 625
8337 #define BISTEN_AT 17
8338 #define LEN_AT 156
8339 #define BISTEN_ETM 16
8340 #define LEN_ETM 632
8341 
8342 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8343 
8344 /* these are common for all IB port use cases. */
8345 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8346         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8347         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8348 };
8349 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8350         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8351         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8352         0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8353         0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8354         0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8355         0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8356         0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8357         0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8358 };
8359 static u8 at[BIT2BYTE(LEN_AT)] = {
8360         0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8361         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8362 };
8363 
8364 /* used for IB1 or IB2, only one in use */
8365 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8366         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8367         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8368         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8369         0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8370         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8371         0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8372         0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8373         0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8374 };
8375 
8376 /* used when both IB1 and IB2 are in use */
8377 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8378         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8379         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8380         0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8381         0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8382         0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8383         0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8384         0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8385         0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8386 };
8387 
8388 /* used when only IB1 is in use */
8389 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8390         0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8391         0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8392         0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8393         0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8394         0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8395         0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8396         0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8397         0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8398 };
8399 
8400 /* used when only IB2 is in use */
8401 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8402         0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8403         0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8404         0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8405         0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8406         0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8407         0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8408         0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8409         0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8410 };
8411 
8412 /* used when both IB1 and IB2 are in use */
8413 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8414         0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8415         0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8416         0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8417         0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8418         0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8419         0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8420         0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8421         0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8422 };
8423 
8424 /*
8425  * Do setup to properly handle IB link recovery; if port is zero, we
8426  * are initializing to cover both ports; otherwise we are initializing
8427  * to cover a single port card, or the port has reached INIT and we may
8428  * need to switch coverage types.
8429  */
8430 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8431 {
8432         u8 *portsel, *etm;
8433         struct qib_devdata *dd = ppd->dd;
8434 
8435         if (!ppd->dd->cspec->r1)
8436                 return;
8437         if (!both) {
8438                 dd->cspec->recovery_ports_initted++;
8439                 ppd->cpspec->recovery_init = 1;
8440         }
8441         if (!both && dd->cspec->recovery_ports_initted == 1) {
8442                 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8443                 etm = atetm_1port;
8444         } else {
8445                 portsel = portsel_2port;
8446                 etm = atetm_2port;
8447         }
8448 
8449         if (qib_r_grab(dd) < 0 ||
8450                 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8451                 qib_r_update(dd, BISTEN_ETM) < 0 ||
8452                 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8453                 qib_r_update(dd, BISTEN_AT) < 0 ||
8454                 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8455                             portsel, NULL) < 0 ||
8456                 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8457                 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8458                 qib_r_update(dd, BISTEN_AT) < 0 ||
8459                 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8460                 qib_r_update(dd, BISTEN_ETM) < 0)
8461                 qib_dev_err(dd, "Failed IB link recovery setup\n");
8462 }
8463 
8464 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8465 {
8466         struct qib_devdata *dd = ppd->dd;
8467         u64 fmask;
8468 
8469         if (dd->cspec->recovery_ports_initted != 1)
8470                 return; /* rest doesn't apply to dualport */
8471         qib_write_kreg(dd, kr_control, dd->control |
8472                        SYM_MASK(Control, FreezeMode));
8473         (void)qib_read_kreg64(dd, kr_scratch);
8474         udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8475         fmask = qib_read_kreg64(dd, kr_act_fmask);
8476         if (!fmask) {
8477                 /*
8478                  * require a powercycle before we'll work again, and make
8479                  * sure we get no more interrupts, and don't turn off
8480                  * freeze.
8481                  */
8482                 ppd->dd->cspec->stay_in_freeze = 1;
8483                 qib_7322_set_intr_state(ppd->dd, 0);
8484                 qib_write_kreg(dd, kr_fmask, 0ULL);
8485                 qib_dev_err(dd, "HCA unusable until powercycled\n");
8486                 return; /* eventually reset */
8487         }
8488 
8489         qib_write_kreg(ppd->dd, kr_hwerrclear,
8490             SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8491 
8492         /* don't do the full clear_freeze(), not needed for this */
8493         qib_write_kreg(dd, kr_control, dd->control);
8494         qib_read_kreg32(dd, kr_scratch);
8495         /* take IBC out of reset */
8496         if (ppd->link_speed_supported) {
8497                 ppd->cpspec->ibcctrl_a &=
8498                         ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8499                 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8500                                     ppd->cpspec->ibcctrl_a);
8501                 qib_read_kreg32(dd, kr_scratch);
8502                 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8503                         qib_set_ib_7322_lstate(ppd, 0,
8504                                 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8505         }
8506 }

/* [<][>][^][v][top][bottom][index][help] */