This source file includes following definitions.
- qib_disarm_piobufs
- qib_disarm_piobufs_ifneeded
- is_sdma_buf
- find_ctxt
- qib_disarm_piobufs_set
- update_send_bufs
- no_send_bufs
- qib_getsendbuf_range
- qib_sendbuf_done
- qib_chg_pioavailkernel
- qib_cancel_sends
- qib_force_pio_avail_update
- qib_hol_down
- qib_hol_init
- qib_hol_up
- qib_hol_event
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/moduleparam.h>
40
41 #include "qib.h"
42
43 static unsigned qib_hol_timeout_ms = 3000;
44 module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
45 MODULE_PARM_DESC(hol_timeout_ms,
46 "duration of user app suspension after link failure");
47
48 unsigned qib_sdma_fetch_arb = 1;
49 module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
50 MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
51
52
53
54
55
56
57
58
59
60
61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
62 {
63 unsigned long flags;
64 unsigned i;
65 unsigned last;
66
67 last = first + cnt;
68 spin_lock_irqsave(&dd->pioavail_lock, flags);
69 for (i = first; i < last; i++) {
70 __clear_bit(i, dd->pio_need_disarm);
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
72 }
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
74 }
75
76
77
78
79
80 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
81 {
82 struct qib_devdata *dd = rcd->dd;
83 unsigned i;
84 unsigned last;
85 unsigned n = 0;
86
87 last = rcd->pio_base + rcd->piocnt;
88
89
90
91
92
93 if (rcd->user_event_mask) {
94
95
96
97
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
99 for (i = 1; i < rcd->subctxt_cnt; i++)
100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
101 &rcd->user_event_mask[i]);
102 }
103 spin_lock_irq(&dd->pioavail_lock);
104 for (i = rcd->pio_base; i < last; i++) {
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
106 n++;
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
108 }
109 }
110 spin_unlock_irq(&dd->pioavail_lock);
111 return 0;
112 }
113
114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
115 {
116 struct qib_pportdata *ppd;
117 unsigned pidx;
118
119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
120 ppd = dd->pport + pidx;
121 if (i >= ppd->sdma_state.first_sendbuf &&
122 i < ppd->sdma_state.last_sendbuf)
123 return ppd;
124 }
125 return NULL;
126 }
127
128
129
130
131
132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
133 {
134 struct qib_ctxtdata *rcd;
135 unsigned ctxt;
136 int ret = 0;
137
138 spin_lock(&dd->uctxt_lock);
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
140 rcd = dd->rcd[ctxt];
141 if (!rcd || bufn < rcd->pio_base ||
142 bufn >= rcd->pio_base + rcd->piocnt)
143 continue;
144 if (rcd->user_event_mask) {
145 int i;
146
147
148
149
150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
151 &rcd->user_event_mask[0]);
152 for (i = 1; i < rcd->subctxt_cnt; i++)
153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
154 &rcd->user_event_mask[i]);
155 }
156 ret = 1;
157 break;
158 }
159 spin_unlock(&dd->uctxt_lock);
160
161 return ret;
162 }
163
164
165
166
167
168
169
170
171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
172 unsigned cnt)
173 {
174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
175 unsigned i;
176 unsigned long flags;
177
178 for (i = 0; i < dd->num_pports; i++)
179 pppd[i] = NULL;
180
181 for (i = 0; i < cnt; i++) {
182 if (!test_bit(i, mask))
183 continue;
184
185
186
187
188 ppd = is_sdma_buf(dd, i);
189 if (ppd) {
190 pppd[ppd->port] = ppd;
191 continue;
192 }
193
194
195
196
197 spin_lock_irqsave(&dd->pioavail_lock, flags);
198 if (test_bit(i, dd->pio_writing) ||
199 (!test_bit(i << 1, dd->pioavailkernel) &&
200 find_ctxt(dd, i))) {
201 __set_bit(i, dd->pio_need_disarm);
202 } else {
203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
204 }
205 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
206 }
207
208
209 for (i = 0; i < dd->num_pports; i++)
210 if (pppd[i])
211 qib_cancel_sends(pppd[i]);
212 }
213
214
215
216
217
218
219
220 static void update_send_bufs(struct qib_devdata *dd)
221 {
222 unsigned long flags;
223 unsigned i;
224 const unsigned piobregs = dd->pioavregs;
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 if (!dd->pioavailregs_dma)
245 return;
246 spin_lock_irqsave(&dd->pioavail_lock, flags);
247 for (i = 0; i < piobregs; i++) {
248 u64 pchbusy, pchg, piov, pnew;
249
250 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
251 pchg = dd->pioavailkernel[i] &
252 ~(dd->pioavailshadow[i] ^ piov);
253 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
254 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
255 pnew = dd->pioavailshadow[i] & ~pchbusy;
256 pnew |= piov & pchbusy;
257 dd->pioavailshadow[i] = pnew;
258 }
259 }
260 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
261 }
262
263
264
265
266 static noinline void no_send_bufs(struct qib_devdata *dd)
267 {
268 dd->upd_pio_shadow = 1;
269
270
271 qib_stats.sps_nopiobufs++;
272 }
273
274
275
276
277
278
279
280
281 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
282 u32 first, u32 last)
283 {
284 unsigned i, j, updated = 0;
285 unsigned nbufs;
286 unsigned long flags;
287 unsigned long *shadow = dd->pioavailshadow;
288 u32 __iomem *buf;
289
290 if (!(dd->flags & QIB_PRESENT))
291 return NULL;
292
293 nbufs = last - first + 1;
294 if (dd->upd_pio_shadow) {
295 update_shadow:
296
297
298
299
300
301 update_send_bufs(dd);
302 updated++;
303 }
304 i = first;
305
306
307
308
309
310 spin_lock_irqsave(&dd->pioavail_lock, flags);
311 if (dd->last_pio >= first && dd->last_pio <= last)
312 i = dd->last_pio + 1;
313 if (!first)
314
315 nbufs = last - dd->min_kernel_pio + 1;
316 for (j = 0; j < nbufs; j++, i++) {
317 if (i > last)
318 i = !first ? dd->min_kernel_pio : first;
319 if (__test_and_set_bit((2 * i) + 1, shadow))
320 continue;
321
322 __change_bit(2 * i, shadow);
323
324 __set_bit(i, dd->pio_writing);
325 if (!first && first != last)
326 dd->last_pio = i;
327 break;
328 }
329 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
330
331 if (j == nbufs) {
332 if (!updated)
333
334
335
336
337 goto update_shadow;
338 no_send_bufs(dd);
339 buf = NULL;
340 } else {
341 if (i < dd->piobcnt2k)
342 buf = (u32 __iomem *)(dd->pio2kbase +
343 i * dd->palign);
344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
345 buf = (u32 __iomem *)(dd->pio4kbase +
346 (i - dd->piobcnt2k) * dd->align4k);
347 else
348 buf = (u32 __iomem *)(dd->piovl15base +
349 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
350 dd->align4k);
351 if (pbufnum)
352 *pbufnum = i;
353 dd->upd_pio_shadow = 0;
354 }
355
356 return buf;
357 }
358
359
360
361
362
363 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
364 {
365 unsigned long flags;
366
367 spin_lock_irqsave(&dd->pioavail_lock, flags);
368 __clear_bit(n, dd->pio_writing);
369 if (__test_and_clear_bit(n, dd->pio_need_disarm))
370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
371 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
372 }
373
374
375
376
377
378
379
380
381 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
382 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
383 {
384 unsigned long flags;
385 unsigned end;
386 unsigned ostart = start;
387
388
389 start *= 2;
390 end = start + len * 2;
391
392 spin_lock_irqsave(&dd->pioavail_lock, flags);
393
394 while (start < end) {
395 if (avail) {
396 unsigned long dma;
397 int i;
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412 i = start / BITS_PER_LONG;
413 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
414 dd->pioavailshadow);
415 dma = (unsigned long)
416 le64_to_cpu(dd->pioavailregs_dma[i]);
417 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
418 start) % BITS_PER_LONG, &dma))
419 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
420 start, dd->pioavailshadow);
421 else
422 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
423 + start, dd->pioavailshadow);
424 __set_bit(start, dd->pioavailkernel);
425 if ((start >> 1) < dd->min_kernel_pio)
426 dd->min_kernel_pio = start >> 1;
427 } else {
428 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
429 dd->pioavailshadow);
430 __clear_bit(start, dd->pioavailkernel);
431 if ((start >> 1) > dd->min_kernel_pio)
432 dd->min_kernel_pio = start >> 1;
433 }
434 start += 2;
435 }
436
437 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
438 dd->last_pio = dd->min_kernel_pio - 1;
439 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
440
441 dd->f_txchk_change(dd, ostart, len, avail, rcd);
442 }
443
444
445
446
447
448
449
450
451
452
453 void qib_cancel_sends(struct qib_pportdata *ppd)
454 {
455 struct qib_devdata *dd = ppd->dd;
456 struct qib_ctxtdata *rcd;
457 unsigned long flags;
458 unsigned ctxt;
459 unsigned i;
460 unsigned last;
461
462
463
464
465
466
467
468
469
470 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
471 spin_lock_irqsave(&dd->uctxt_lock, flags);
472 rcd = dd->rcd[ctxt];
473 if (rcd && rcd->ppd == ppd) {
474 last = rcd->pio_base + rcd->piocnt;
475 if (rcd->user_event_mask) {
476
477
478
479
480
481 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
482 &rcd->user_event_mask[0]);
483 for (i = 1; i < rcd->subctxt_cnt; i++)
484 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
485 &rcd->user_event_mask[i]);
486 }
487 i = rcd->pio_base;
488 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
489 spin_lock_irqsave(&dd->pioavail_lock, flags);
490 for (; i < last; i++)
491 __set_bit(i, dd->pio_need_disarm);
492 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
493 } else
494 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
495 }
496
497 if (!(dd->flags & QIB_HAS_SEND_DMA))
498 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
499 QIB_SENDCTRL_FLUSH);
500 }
501
502
503
504
505
506
507
508
509 void qib_force_pio_avail_update(struct qib_devdata *dd)
510 {
511 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
512 }
513
514 void qib_hol_down(struct qib_pportdata *ppd)
515 {
516
517
518
519
520 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
521 qib_cancel_sends(ppd);
522 }
523
524
525
526
527
528
529 void qib_hol_init(struct qib_pportdata *ppd)
530 {
531 if (ppd->hol_state != QIB_HOL_INIT) {
532 ppd->hol_state = QIB_HOL_INIT;
533 mod_timer(&ppd->hol_timer,
534 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
535 }
536 }
537
538
539
540
541
542
543 void qib_hol_up(struct qib_pportdata *ppd)
544 {
545 ppd->hol_state = QIB_HOL_UP;
546 }
547
548
549
550
551 void qib_hol_event(struct timer_list *t)
552 {
553 struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
554
555
556 if (!(ppd->dd->flags & QIB_INITTED))
557 return;
558
559 if (ppd->hol_state != QIB_HOL_UP) {
560
561
562
563
564 qib_hol_down(ppd);
565 mod_timer(&ppd->hol_timer,
566 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
567 }
568 }