This source file includes following definitions.
- qib_format_hwmsg
- qib_format_hwerrors
- signal_ib_event
- qib_handle_e_ibstatuschanged
- qib_clear_symerror_on_linkup
- qib_handle_urcv
- qib_bad_intrstatus
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/pci.h>
36 #include <linux/delay.h>
37
38 #include "qib.h"
39 #include "qib_common.h"
40
41
42
43
44
45
46
47 static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
48 {
49 strlcat(msg, "[", msgl);
50 strlcat(msg, hwmsg, msgl);
51 strlcat(msg, "]", msgl);
52 }
53
54
55
56
57
58
59
60
61
62 void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
63 size_t nhwerrmsgs, char *msg, size_t msgl)
64 {
65 int i;
66
67 for (i = 0; i < nhwerrmsgs; i++)
68 if (hwerrs & hwerrmsgs[i].mask)
69 qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
70 }
71
72 static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
73 {
74 struct ib_event event;
75 struct qib_devdata *dd = ppd->dd;
76
77 event.device = &dd->verbs_dev.rdi.ibdev;
78 event.element.port_num = ppd->port;
79 event.event = ev;
80 ib_dispatch_event(&event);
81 }
82
83 void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
84 {
85 struct qib_devdata *dd = ppd->dd;
86 unsigned long flags;
87 u32 lstate;
88 u8 ltstate;
89 enum ib_event_type ev = 0;
90
91 lstate = dd->f_iblink_state(ibcs);
92 ltstate = dd->f_ibphys_portstate(ibcs);
93
94
95
96
97
98
99
100
101
102
103 if (lstate >= IB_PORT_INIT &&
104 (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
105 ltstate == IB_PHYSPORTSTATE_LINKUP) {
106
107 if (dd->f_ib_updown(ppd, 1, ibcs))
108 goto skip_ibchange;
109 } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
110 QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
111 if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
112 ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
113 dd->f_ib_updown(ppd, 0, ibcs))
114 goto skip_ibchange;
115 qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
116 }
117
118 if (lstate != IB_PORT_DOWN) {
119
120 if (lstate != IB_PORT_ACTIVE) {
121 *ppd->statusp &= ~QIB_STATUS_IB_READY;
122 if (ppd->lflags & QIBL_LINKACTIVE)
123 ev = IB_EVENT_PORT_ERR;
124 spin_lock_irqsave(&ppd->lflags_lock, flags);
125 if (lstate == IB_PORT_ARMED) {
126 ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
127 ppd->lflags &= ~(QIBL_LINKINIT |
128 QIBL_LINKDOWN | QIBL_LINKACTIVE);
129 } else {
130 ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
131 ppd->lflags &= ~(QIBL_LINKARMED |
132 QIBL_LINKDOWN | QIBL_LINKACTIVE);
133 }
134 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
135
136 mod_timer(&ppd->symerr_clear_timer,
137 msecs_to_jiffies(75));
138 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
139 !(ppd->lflags & QIBL_LINKACTIVE)) {
140
141 qib_hol_up(ppd);
142 *ppd->statusp |=
143 QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
144 qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
145 spin_lock_irqsave(&ppd->lflags_lock, flags);
146 ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
147 ppd->lflags &= ~(QIBL_LINKINIT |
148 QIBL_LINKDOWN | QIBL_LINKARMED);
149 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
150 if (dd->flags & QIB_HAS_SEND_DMA)
151 qib_sdma_process_event(ppd,
152 qib_sdma_event_e30_go_running);
153 ev = IB_EVENT_PORT_ACTIVE;
154 dd->f_setextled(ppd, 1);
155 }
156 } else {
157 if (ppd->lflags & QIBL_LINKACTIVE)
158 ev = IB_EVENT_PORT_ERR;
159 spin_lock_irqsave(&ppd->lflags_lock, flags);
160 ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
161 ppd->lflags &= ~(QIBL_LINKINIT |
162 QIBL_LINKACTIVE | QIBL_LINKARMED);
163 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
164 *ppd->statusp &= ~QIB_STATUS_IB_READY;
165 }
166
167 skip_ibchange:
168 ppd->lastibcstat = ibcs;
169 if (ev)
170 signal_ib_event(ppd, ev);
171 }
172
173 void qib_clear_symerror_on_linkup(struct timer_list *t)
174 {
175 struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
176
177 if (ppd->lflags & QIBL_LINKACTIVE)
178 return;
179
180 ppd->ibport_data.z_symbol_error_counter =
181 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
182 }
183
184
185
186
187
188
189 void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
190 {
191 struct qib_ctxtdata *rcd;
192 unsigned long flags;
193 int i;
194
195 spin_lock_irqsave(&dd->uctxt_lock, flags);
196 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
197 if (!(ctxtr & (1ULL << i)))
198 continue;
199 rcd = dd->rcd[i];
200 if (!rcd || !rcd->cnt)
201 continue;
202
203 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
204 wake_up_interruptible(&rcd->wait);
205 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
206 rcd->ctxt);
207 } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
208 &rcd->flag)) {
209 rcd->urgent++;
210 wake_up_interruptible(&rcd->wait);
211 }
212 }
213 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
214 }
215
216 void qib_bad_intrstatus(struct qib_devdata *dd)
217 {
218 static int allbits;
219
220
221
222
223
224
225
226 qib_dev_err(dd,
227 "Read of chip interrupt status failed disabling interrupts\n");
228 if (allbits++) {
229
230 if (allbits == 2)
231 dd->f_set_intr_state(dd, 0);
232 if (allbits == 3) {
233 qib_dev_err(dd,
234 "2nd bad interrupt status, unregistering interrupts\n");
235 dd->flags |= QIB_BADINTR;
236 dd->flags &= ~QIB_INITTED;
237 dd->f_free_irq(dd);
238 }
239 }
240 }