This source file includes following definitions.
- qib_user_sdma_rb_search
- qib_user_sdma_rb_insert
- qib_user_sdma_queue_create
- qib_user_sdma_init_frag
- qib_user_sdma_alloc_header
- qib_user_sdma_page_to_frags
- qib_user_sdma_coalesce
- qib_user_sdma_num_pages
- qib_user_sdma_free_pkt_frag
- qib_user_sdma_pin_pages
- qib_user_sdma_pin_pkt
- qib_user_sdma_init_payload
- qib_user_sdma_free_pkt_list
- qib_user_sdma_queue_pkts
- qib_user_sdma_set_complete_counter
- qib_user_sdma_queue_clean
- qib_user_sdma_queue_destroy
- qib_user_sdma_hwqueue_clean
- qib_user_sdma_queue_drain
- qib_sdma_make_desc0
- qib_sdma_make_first_desc0
- qib_sdma_make_last_desc0
- qib_sdma_make_desc1
- qib_user_sdma_send_frag
- qib_user_sdma_send_desc
- qib_user_sdma_push_pkts
- qib_user_sdma_writev
- qib_user_sdma_make_progress
- qib_user_sdma_complete_counter
- qib_user_sdma_inflight_counter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44
45 #include "qib.h"
46 #include "qib_user_sdma.h"
47
48
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
54
55
56
57
58 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
59
60 struct qib_user_sdma_rb_node {
61 struct rb_node node;
62 int refcount;
63 pid_t pid;
64 };
65
66 struct qib_user_sdma_pkt {
67 struct list_head list;
68
69 u8 tiddma;
70 u8 largepkt;
71 u16 frag_size;
72 u16 index;
73 u16 naddr;
74 u16 addrlimit;
75 u16 tidsmidx;
76 u16 tidsmcount;
77 u16 payload_size;
78 u32 bytes_togo;
79 u32 counter;
80 struct qib_tid_session_member *tidsm;
81 struct qib_user_sdma_queue *pq;
82 u64 added;
83
84 struct {
85 u16 offset;
86 u16 length;
87 u16 first_desc;
88 u16 last_desc;
89 u16 put_page;
90 u16 dma_mapped;
91 u16 dma_length;
92 u16 padding;
93 struct page *page;
94 void *kvaddr;
95 dma_addr_t addr;
96 } addr[4];
97 };
98
99 struct qib_user_sdma_queue {
100
101
102
103
104
105 struct list_head sent;
106
107
108
109
110
111 spinlock_t sent_lock ____cacheline_aligned_in_smp;
112
113
114 char header_cache_name[64];
115 struct dma_pool *header_cache;
116
117
118 char pkt_slab_name[64];
119 struct kmem_cache *pkt_slab;
120
121
122 u32 counter;
123 u32 sent_counter;
124
125 u32 num_pending;
126
127 u32 num_sending;
128
129 u64 added;
130
131
132 struct rb_root dma_pages_root;
133
134 struct qib_user_sdma_rb_node *sdma_rb_node;
135
136
137 struct mutex lock;
138 };
139
140 static struct qib_user_sdma_rb_node *
141 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
142 {
143 struct qib_user_sdma_rb_node *sdma_rb_node;
144 struct rb_node *node = root->rb_node;
145
146 while (node) {
147 sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
148 node);
149 if (pid < sdma_rb_node->pid)
150 node = node->rb_left;
151 else if (pid > sdma_rb_node->pid)
152 node = node->rb_right;
153 else
154 return sdma_rb_node;
155 }
156 return NULL;
157 }
158
159 static int
160 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
161 {
162 struct rb_node **node = &(root->rb_node);
163 struct rb_node *parent = NULL;
164 struct qib_user_sdma_rb_node *got;
165
166 while (*node) {
167 got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
168 parent = *node;
169 if (new->pid < got->pid)
170 node = &((*node)->rb_left);
171 else if (new->pid > got->pid)
172 node = &((*node)->rb_right);
173 else
174 return 0;
175 }
176
177 rb_link_node(&new->node, parent, node);
178 rb_insert_color(&new->node, root);
179 return 1;
180 }
181
182 struct qib_user_sdma_queue *
183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
184 {
185 struct qib_user_sdma_queue *pq =
186 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
187 struct qib_user_sdma_rb_node *sdma_rb_node;
188
189 if (!pq)
190 goto done;
191
192 pq->counter = 0;
193 pq->sent_counter = 0;
194 pq->num_pending = 0;
195 pq->num_sending = 0;
196 pq->added = 0;
197 pq->sdma_rb_node = NULL;
198
199 INIT_LIST_HEAD(&pq->sent);
200 spin_lock_init(&pq->sent_lock);
201 mutex_init(&pq->lock);
202
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
204 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
206 sizeof(struct qib_user_sdma_pkt),
207 0, 0, NULL);
208
209 if (!pq->pkt_slab)
210 goto err_kfree;
211
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
213 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
214 pq->header_cache = dma_pool_create(pq->header_cache_name,
215 dev,
216 QIB_USER_SDMA_EXP_HEADER_LENGTH,
217 4, 0);
218 if (!pq->header_cache)
219 goto err_slab;
220
221 pq->dma_pages_root = RB_ROOT;
222
223 sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
224 current->pid);
225 if (sdma_rb_node) {
226 sdma_rb_node->refcount++;
227 } else {
228 sdma_rb_node = kmalloc(sizeof(
229 struct qib_user_sdma_rb_node), GFP_KERNEL);
230 if (!sdma_rb_node)
231 goto err_rb;
232
233 sdma_rb_node->refcount = 1;
234 sdma_rb_node->pid = current->pid;
235
236 qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
237 }
238 pq->sdma_rb_node = sdma_rb_node;
239
240 goto done;
241
242 err_rb:
243 dma_pool_destroy(pq->header_cache);
244 err_slab:
245 kmem_cache_destroy(pq->pkt_slab);
246 err_kfree:
247 kfree(pq);
248 pq = NULL;
249
250 done:
251 return pq;
252 }
253
254 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
255 int i, u16 offset, u16 len,
256 u16 first_desc, u16 last_desc,
257 u16 put_page, u16 dma_mapped,
258 struct page *page, void *kvaddr,
259 dma_addr_t dma_addr, u16 dma_length)
260 {
261 pkt->addr[i].offset = offset;
262 pkt->addr[i].length = len;
263 pkt->addr[i].first_desc = first_desc;
264 pkt->addr[i].last_desc = last_desc;
265 pkt->addr[i].put_page = put_page;
266 pkt->addr[i].dma_mapped = dma_mapped;
267 pkt->addr[i].page = page;
268 pkt->addr[i].kvaddr = kvaddr;
269 pkt->addr[i].addr = dma_addr;
270 pkt->addr[i].dma_length = dma_length;
271 }
272
273 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
274 size_t len, dma_addr_t *dma_addr)
275 {
276 void *hdr;
277
278 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
279 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
280 dma_addr);
281 else
282 hdr = NULL;
283
284 if (!hdr) {
285 hdr = kmalloc(len, GFP_KERNEL);
286 if (!hdr)
287 return NULL;
288
289 *dma_addr = 0;
290 }
291
292 return hdr;
293 }
294
295 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
296 struct qib_user_sdma_queue *pq,
297 struct qib_user_sdma_pkt *pkt,
298 struct page *page, u16 put,
299 u16 offset, u16 len, void *kvaddr)
300 {
301 __le16 *pbc16;
302 void *pbcvaddr;
303 struct qib_message_header *hdr;
304 u16 newlen, pbclen, lastdesc, dma_mapped;
305 u32 vcto;
306 union qib_seqnum seqnum;
307 dma_addr_t pbcdaddr;
308 dma_addr_t dma_addr =
309 dma_map_page(&dd->pcidev->dev,
310 page, offset, len, DMA_TO_DEVICE);
311 int ret = 0;
312
313 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
314
315
316
317
318
319 if (put) {
320 put_user_page(page);
321 } else {
322
323 kunmap(page);
324 __free_page(page);
325 }
326 ret = -ENOMEM;
327 goto done;
328 }
329 offset = 0;
330 dma_mapped = 1;
331
332
333 next_fragment:
334
335
336
337
338
339 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
340 newlen = pkt->tidsm[pkt->tidsmidx].length;
341 else
342 newlen = len;
343
344
345
346
347
348
349
350
351 lastdesc = 0;
352 if ((pkt->payload_size + newlen) >= pkt->frag_size) {
353 newlen = pkt->frag_size - pkt->payload_size;
354 lastdesc = 1;
355 } else if (pkt->tiddma) {
356 if (newlen == pkt->tidsm[pkt->tidsmidx].length)
357 lastdesc = 1;
358 } else {
359 if (newlen == pkt->bytes_togo)
360 lastdesc = 1;
361 }
362
363
364 qib_user_sdma_init_frag(pkt, pkt->naddr,
365 offset, newlen,
366 0, lastdesc,
367 put, dma_mapped,
368 page, kvaddr,
369 dma_addr, len);
370 pkt->bytes_togo -= newlen;
371 pkt->payload_size += newlen;
372 pkt->naddr++;
373 if (pkt->naddr == pkt->addrlimit) {
374 ret = -EFAULT;
375 goto done;
376 }
377
378
379 if (pkt->bytes_togo == 0) {
380
381
382 if (!pkt->addr[pkt->index].addr) {
383 pkt->addr[pkt->index].addr =
384 dma_map_single(&dd->pcidev->dev,
385 pkt->addr[pkt->index].kvaddr,
386 pkt->addr[pkt->index].dma_length,
387 DMA_TO_DEVICE);
388 if (dma_mapping_error(&dd->pcidev->dev,
389 pkt->addr[pkt->index].addr)) {
390 ret = -ENOMEM;
391 goto done;
392 }
393 pkt->addr[pkt->index].dma_mapped = 1;
394 }
395
396 goto done;
397 }
398
399
400 if (pkt->tiddma) {
401 pkt->tidsm[pkt->tidsmidx].length -= newlen;
402 if (pkt->tidsm[pkt->tidsmidx].length) {
403 pkt->tidsm[pkt->tidsmidx].offset += newlen;
404 } else {
405 pkt->tidsmidx++;
406 if (pkt->tidsmidx == pkt->tidsmcount) {
407 ret = -EFAULT;
408 goto done;
409 }
410 }
411 }
412
413
414
415
416
417
418 if (lastdesc == 0)
419 goto done;
420
421
422
423
424
425
426
427
428
429
430
431
432 pbclen = pkt->addr[pkt->index].length;
433 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
434 if (!pbcvaddr) {
435 ret = -ENOMEM;
436 goto done;
437 }
438
439 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
440 memcpy(pbcvaddr, pbc16, pbclen);
441
442
443 hdr = (struct qib_message_header *)&pbc16[4];
444
445
446 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
447
448
449 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
450
451 if (pkt->tiddma) {
452
453 hdr->iph.pkt_flags =
454 cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
455
456 hdr->flags &= ~(0x04|0x20);
457 } else {
458
459 hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
460
461 hdr->flags &= ~(0x04);
462 }
463
464
465 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
466 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
467 be16_to_cpu(hdr->lrh[2]) -
468 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
469 le16_to_cpu(hdr->iph.pkt_flags));
470
471
472
473 if (!pkt->addr[pkt->index].addr) {
474 pkt->addr[pkt->index].addr =
475 dma_map_single(&dd->pcidev->dev,
476 pkt->addr[pkt->index].kvaddr,
477 pkt->addr[pkt->index].dma_length,
478 DMA_TO_DEVICE);
479 if (dma_mapping_error(&dd->pcidev->dev,
480 pkt->addr[pkt->index].addr)) {
481 ret = -ENOMEM;
482 goto done;
483 }
484 pkt->addr[pkt->index].dma_mapped = 1;
485 }
486
487
488 pbc16 = (__le16 *)pbcvaddr;
489 hdr = (struct qib_message_header *)&pbc16[4];
490
491
492 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
493
494
495 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
496
497 if (pkt->tiddma) {
498
499 hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
500 (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
501 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
502 (pkt->tidsm[pkt->tidsmidx].offset>>2));
503 } else {
504
505 hdr->uwords[2] += pkt->payload_size;
506 }
507
508
509 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
510 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
511 be16_to_cpu(hdr->lrh[2]) -
512 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
513 le16_to_cpu(hdr->iph.pkt_flags));
514
515
516 seqnum.val = be32_to_cpu(hdr->bth[2]);
517 if (pkt->tiddma)
518 seqnum.seq++;
519 else
520 seqnum.pkt++;
521 hdr->bth[2] = cpu_to_be32(seqnum.val);
522
523
524 qib_user_sdma_init_frag(pkt, pkt->naddr,
525 0, pbclen,
526 1, 0,
527 0, 0,
528 NULL, pbcvaddr,
529 pbcdaddr, pbclen);
530 pkt->index = pkt->naddr;
531 pkt->payload_size = 0;
532 pkt->naddr++;
533 if (pkt->naddr == pkt->addrlimit) {
534 ret = -EFAULT;
535 goto done;
536 }
537
538
539 if (newlen != len) {
540 if (dma_mapped) {
541 put = 0;
542 dma_mapped = 0;
543 page = NULL;
544 kvaddr = NULL;
545 }
546 len -= newlen;
547 offset += newlen;
548
549 goto next_fragment;
550 }
551
552 done:
553 return ret;
554 }
555
556
557 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
558 struct qib_user_sdma_queue *pq,
559 struct qib_user_sdma_pkt *pkt,
560 const struct iovec *iov,
561 unsigned long niov)
562 {
563 int ret = 0;
564 struct page *page = alloc_page(GFP_KERNEL);
565 void *mpage_save;
566 char *mpage;
567 int i;
568 int len = 0;
569
570 if (!page) {
571 ret = -ENOMEM;
572 goto done;
573 }
574
575 mpage = kmap(page);
576 mpage_save = mpage;
577 for (i = 0; i < niov; i++) {
578 int cfur;
579
580 cfur = copy_from_user(mpage,
581 iov[i].iov_base, iov[i].iov_len);
582 if (cfur) {
583 ret = -EFAULT;
584 goto free_unmap;
585 }
586
587 mpage += iov[i].iov_len;
588 len += iov[i].iov_len;
589 }
590
591 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
592 page, 0, 0, len, mpage_save);
593 goto done;
594
595 free_unmap:
596 kunmap(page);
597 __free_page(page);
598 done:
599 return ret;
600 }
601
602
603
604
605 static int qib_user_sdma_num_pages(const struct iovec *iov)
606 {
607 const unsigned long addr = (unsigned long) iov->iov_base;
608 const unsigned long len = iov->iov_len;
609 const unsigned long spage = addr & PAGE_MASK;
610 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
611
612 return 1 + ((epage - spage) >> PAGE_SHIFT);
613 }
614
615 static void qib_user_sdma_free_pkt_frag(struct device *dev,
616 struct qib_user_sdma_queue *pq,
617 struct qib_user_sdma_pkt *pkt,
618 int frag)
619 {
620 const int i = frag;
621
622 if (pkt->addr[i].page) {
623
624 if (pkt->addr[i].dma_mapped)
625 dma_unmap_page(dev,
626 pkt->addr[i].addr,
627 pkt->addr[i].dma_length,
628 DMA_TO_DEVICE);
629
630 if (pkt->addr[i].kvaddr)
631 kunmap(pkt->addr[i].page);
632
633 if (pkt->addr[i].put_page)
634 put_user_page(pkt->addr[i].page);
635 else
636 __free_page(pkt->addr[i].page);
637 } else if (pkt->addr[i].kvaddr) {
638
639 if (pkt->addr[i].dma_mapped) {
640
641 dma_unmap_single(dev,
642 pkt->addr[i].addr,
643 pkt->addr[i].dma_length,
644 DMA_TO_DEVICE);
645 kfree(pkt->addr[i].kvaddr);
646 } else if (pkt->addr[i].addr) {
647
648 dma_pool_free(pq->header_cache,
649 pkt->addr[i].kvaddr, pkt->addr[i].addr);
650 } else {
651
652 kfree(pkt->addr[i].kvaddr);
653 }
654 }
655 }
656
657
658 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
659 struct qib_user_sdma_queue *pq,
660 struct qib_user_sdma_pkt *pkt,
661 unsigned long addr, int tlen, int npages)
662 {
663 struct page *pages[8];
664 int i, j;
665 int ret = 0;
666
667 while (npages) {
668 if (npages > 8)
669 j = 8;
670 else
671 j = npages;
672
673 ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
674 if (ret != j) {
675 i = 0;
676 j = ret;
677 ret = -ENOMEM;
678 goto free_pages;
679 }
680
681 for (i = 0; i < j; i++) {
682
683 unsigned long fofs = addr & ~PAGE_MASK;
684 int flen = ((fofs + tlen) > PAGE_SIZE) ?
685 (PAGE_SIZE - fofs) : tlen;
686
687 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
688 pages[i], 1, fofs, flen, NULL);
689 if (ret < 0) {
690
691
692
693 i++;
694 goto free_pages;
695 }
696
697 addr += flen;
698 tlen -= flen;
699 }
700
701 npages -= j;
702 }
703
704 goto done;
705
706
707 free_pages:
708 while (i < j)
709 put_user_page(pages[i++]);
710
711 done:
712 return ret;
713 }
714
715 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
716 struct qib_user_sdma_queue *pq,
717 struct qib_user_sdma_pkt *pkt,
718 const struct iovec *iov,
719 unsigned long niov)
720 {
721 int ret = 0;
722 unsigned long idx;
723
724 for (idx = 0; idx < niov; idx++) {
725 const int npages = qib_user_sdma_num_pages(iov + idx);
726 const unsigned long addr = (unsigned long) iov[idx].iov_base;
727
728 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
729 iov[idx].iov_len, npages);
730 if (ret < 0)
731 goto free_pkt;
732 }
733
734 goto done;
735
736 free_pkt:
737
738 for (idx = 1; idx < pkt->naddr; idx++)
739 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
740
741
742
743
744 if (pkt->addr[0].dma_mapped) {
745 dma_unmap_single(&dd->pcidev->dev,
746 pkt->addr[0].addr,
747 pkt->addr[0].dma_length,
748 DMA_TO_DEVICE);
749 pkt->addr[0].addr = 0;
750 pkt->addr[0].dma_mapped = 0;
751 }
752
753 done:
754 return ret;
755 }
756
757 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
758 struct qib_user_sdma_queue *pq,
759 struct qib_user_sdma_pkt *pkt,
760 const struct iovec *iov,
761 unsigned long niov, int npages)
762 {
763 int ret = 0;
764
765 if (pkt->frag_size == pkt->bytes_togo &&
766 npages >= ARRAY_SIZE(pkt->addr))
767 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
768 else
769 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
770
771 return ret;
772 }
773
774
775 static void qib_user_sdma_free_pkt_list(struct device *dev,
776 struct qib_user_sdma_queue *pq,
777 struct list_head *list)
778 {
779 struct qib_user_sdma_pkt *pkt, *pkt_next;
780
781 list_for_each_entry_safe(pkt, pkt_next, list, list) {
782 int i;
783
784 for (i = 0; i < pkt->naddr; i++)
785 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
786
787 if (pkt->largepkt)
788 kfree(pkt);
789 else
790 kmem_cache_free(pq->pkt_slab, pkt);
791 }
792 INIT_LIST_HEAD(list);
793 }
794
795
796
797
798
799
800
801
802 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
803 struct qib_pportdata *ppd,
804 struct qib_user_sdma_queue *pq,
805 const struct iovec *iov,
806 unsigned long niov,
807 struct list_head *list,
808 int *maxpkts, int *ndesc)
809 {
810 unsigned long idx = 0;
811 int ret = 0;
812 int npkts = 0;
813 __le32 *pbc;
814 dma_addr_t dma_addr;
815 struct qib_user_sdma_pkt *pkt = NULL;
816 size_t len;
817 size_t nw;
818 u32 counter = pq->counter;
819 u16 frag_size;
820
821 while (idx < niov && npkts < *maxpkts) {
822 const unsigned long addr = (unsigned long) iov[idx].iov_base;
823 const unsigned long idx_save = idx;
824 unsigned pktnw;
825 unsigned pktnwc;
826 int nfrags = 0;
827 int npages = 0;
828 int bytes_togo = 0;
829 int tiddma = 0;
830 int cfur;
831
832 len = iov[idx].iov_len;
833 nw = len >> 2;
834
835 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
836 len > PAGE_SIZE || len & 3 || addr & 3) {
837 ret = -EINVAL;
838 goto free_list;
839 }
840
841 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
842 if (!pbc) {
843 ret = -ENOMEM;
844 goto free_list;
845 }
846
847 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
848 if (cfur) {
849 ret = -EFAULT;
850 goto free_pbc;
851 }
852
853
854
855
856
857
858 pktnwc = nw - 1;
859
860
861
862
863
864
865
866
867
868
869 pktnw = le32_to_cpu(*pbc) & 0xFFFF;
870 if (pktnw < pktnwc) {
871 ret = -EINVAL;
872 goto free_pbc;
873 }
874
875 idx++;
876 while (pktnwc < pktnw && idx < niov) {
877 const size_t slen = iov[idx].iov_len;
878 const unsigned long faddr =
879 (unsigned long) iov[idx].iov_base;
880
881 if (slen & 3 || faddr & 3 || !slen) {
882 ret = -EINVAL;
883 goto free_pbc;
884 }
885
886 npages += qib_user_sdma_num_pages(&iov[idx]);
887
888 bytes_togo += slen;
889 pktnwc += slen >> 2;
890 idx++;
891 nfrags++;
892 }
893
894 if (pktnwc != pktnw) {
895 ret = -EINVAL;
896 goto free_pbc;
897 }
898
899 frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
900 if (((frag_size ? frag_size : bytes_togo) + len) >
901 ppd->ibmaxlen) {
902 ret = -EINVAL;
903 goto free_pbc;
904 }
905
906 if (frag_size) {
907 int tidsmsize, n;
908 size_t pktsize;
909
910 n = npages*((2*PAGE_SIZE/frag_size)+1);
911 pktsize = struct_size(pkt, addr, n);
912
913
914
915
916 tiddma = (((le32_to_cpu(pbc[7])>>
917 QLOGIC_IB_I_TID_SHIFT)&
918 QLOGIC_IB_I_TID_MASK) !=
919 QLOGIC_IB_I_TID_MASK);
920
921 if (tiddma)
922 tidsmsize = iov[idx].iov_len;
923 else
924 tidsmsize = 0;
925
926 pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
927 if (!pkt) {
928 ret = -ENOMEM;
929 goto free_pbc;
930 }
931 pkt->largepkt = 1;
932 pkt->frag_size = frag_size;
933 pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
934
935 if (tiddma) {
936 char *tidsm = (char *)pkt + pktsize;
937
938 cfur = copy_from_user(tidsm,
939 iov[idx].iov_base, tidsmsize);
940 if (cfur) {
941 ret = -EFAULT;
942 goto free_pkt;
943 }
944 pkt->tidsm =
945 (struct qib_tid_session_member *)tidsm;
946 pkt->tidsmcount = tidsmsize/
947 sizeof(struct qib_tid_session_member);
948 pkt->tidsmidx = 0;
949 idx++;
950 }
951
952
953
954
955
956
957 *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
958 } else {
959 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
960 if (!pkt) {
961 ret = -ENOMEM;
962 goto free_pbc;
963 }
964 pkt->largepkt = 0;
965 pkt->frag_size = bytes_togo;
966 pkt->addrlimit = ARRAY_SIZE(pkt->addr);
967 }
968 pkt->bytes_togo = bytes_togo;
969 pkt->payload_size = 0;
970 pkt->counter = counter;
971 pkt->tiddma = tiddma;
972
973
974 qib_user_sdma_init_frag(pkt, 0,
975 0, len,
976 1, 0,
977 0, 0,
978 NULL, pbc,
979 dma_addr, len);
980 pkt->index = 0;
981 pkt->naddr = 1;
982
983 if (nfrags) {
984 ret = qib_user_sdma_init_payload(dd, pq, pkt,
985 iov + idx_save + 1,
986 nfrags, npages);
987 if (ret < 0)
988 goto free_pkt;
989 } else {
990
991
992 pkt->addr[0].last_desc = 1;
993
994 if (dma_addr == 0) {
995
996
997
998
999 dma_addr = dma_map_single(&dd->pcidev->dev,
1000 pbc, len, DMA_TO_DEVICE);
1001 if (dma_mapping_error(&dd->pcidev->dev,
1002 dma_addr)) {
1003 ret = -ENOMEM;
1004 goto free_pkt;
1005 }
1006 pkt->addr[0].addr = dma_addr;
1007 pkt->addr[0].dma_mapped = 1;
1008 }
1009 }
1010
1011 counter++;
1012 npkts++;
1013 pkt->pq = pq;
1014 pkt->index = 0;
1015 *ndesc += pkt->naddr;
1016
1017 list_add_tail(&pkt->list, list);
1018 }
1019
1020 *maxpkts = npkts;
1021 ret = idx;
1022 goto done;
1023
1024 free_pkt:
1025 if (pkt->largepkt)
1026 kfree(pkt);
1027 else
1028 kmem_cache_free(pq->pkt_slab, pkt);
1029 free_pbc:
1030 if (dma_addr)
1031 dma_pool_free(pq->header_cache, pbc, dma_addr);
1032 else
1033 kfree(pbc);
1034 free_list:
1035 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1036 done:
1037 return ret;
1038 }
1039
1040 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1041 u32 c)
1042 {
1043 pq->sent_counter = c;
1044 }
1045
1046
1047 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1048 struct qib_user_sdma_queue *pq)
1049 {
1050 struct qib_devdata *dd = ppd->dd;
1051 struct list_head free_list;
1052 struct qib_user_sdma_pkt *pkt;
1053 struct qib_user_sdma_pkt *pkt_prev;
1054 unsigned long flags;
1055 int ret = 0;
1056
1057 if (!pq->num_sending)
1058 return 0;
1059
1060 INIT_LIST_HEAD(&free_list);
1061
1062
1063
1064
1065
1066
1067 spin_lock_irqsave(&pq->sent_lock, flags);
1068 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1069 s64 descd = ppd->sdma_descq_removed - pkt->added;
1070
1071 if (descd < 0)
1072 break;
1073
1074 list_move_tail(&pkt->list, &free_list);
1075
1076
1077 ret++;
1078 pq->num_sending--;
1079 }
1080 spin_unlock_irqrestore(&pq->sent_lock, flags);
1081
1082 if (!list_empty(&free_list)) {
1083 u32 counter;
1084
1085 pkt = list_entry(free_list.prev,
1086 struct qib_user_sdma_pkt, list);
1087 counter = pkt->counter;
1088
1089 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1090 qib_user_sdma_set_complete_counter(pq, counter);
1091 }
1092
1093 return ret;
1094 }
1095
1096 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1097 {
1098 if (!pq)
1099 return;
1100
1101 pq->sdma_rb_node->refcount--;
1102 if (pq->sdma_rb_node->refcount == 0) {
1103 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1104 kfree(pq->sdma_rb_node);
1105 }
1106 dma_pool_destroy(pq->header_cache);
1107 kmem_cache_destroy(pq->pkt_slab);
1108 kfree(pq);
1109 }
1110
1111
1112 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1113 {
1114 int ret;
1115 unsigned long flags;
1116
1117 spin_lock_irqsave(&ppd->sdma_lock, flags);
1118 ret = qib_sdma_make_progress(ppd);
1119 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1120
1121 return ret;
1122 }
1123
1124
1125 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1126 struct qib_user_sdma_queue *pq)
1127 {
1128 struct qib_devdata *dd = ppd->dd;
1129 unsigned long flags;
1130 int i;
1131
1132 if (!pq)
1133 return;
1134
1135 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1136 mutex_lock(&pq->lock);
1137 if (!pq->num_pending && !pq->num_sending) {
1138 mutex_unlock(&pq->lock);
1139 break;
1140 }
1141 qib_user_sdma_hwqueue_clean(ppd);
1142 qib_user_sdma_queue_clean(ppd, pq);
1143 mutex_unlock(&pq->lock);
1144 msleep(20);
1145 }
1146
1147 if (pq->num_pending || pq->num_sending) {
1148 struct qib_user_sdma_pkt *pkt;
1149 struct qib_user_sdma_pkt *pkt_prev;
1150 struct list_head free_list;
1151
1152 mutex_lock(&pq->lock);
1153 spin_lock_irqsave(&ppd->sdma_lock, flags);
1154
1155
1156
1157 if (pq->num_pending) {
1158 list_for_each_entry_safe(pkt, pkt_prev,
1159 &ppd->sdma_userpending, list) {
1160 if (pkt->pq == pq) {
1161 list_move_tail(&pkt->list, &pq->sent);
1162 pq->num_pending--;
1163 pq->num_sending++;
1164 }
1165 }
1166 }
1167 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1168
1169 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1170 INIT_LIST_HEAD(&free_list);
1171 list_splice_init(&pq->sent, &free_list);
1172 pq->num_sending = 0;
1173 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1174 mutex_unlock(&pq->lock);
1175 }
1176 }
1177
1178 static inline __le64 qib_sdma_make_desc0(u8 gen,
1179 u64 addr, u64 dwlen, u64 dwoffset)
1180 {
1181 return cpu_to_le64(
1182 ((addr & 0xfffffffcULL) << 32) |
1183
1184 ((gen & 3ULL) << 30) |
1185
1186 ((dwlen & 0x7ffULL) << 16) |
1187
1188 (dwoffset & 0x7ffULL));
1189 }
1190
1191 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1192 {
1193 return descq | cpu_to_le64(1ULL << 12);
1194 }
1195
1196 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1197 {
1198
1199 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1200 }
1201
1202 static inline __le64 qib_sdma_make_desc1(u64 addr)
1203 {
1204
1205 return cpu_to_le64(addr >> 32);
1206 }
1207
1208 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1209 struct qib_user_sdma_pkt *pkt, int idx,
1210 unsigned ofs, u16 tail, u8 gen)
1211 {
1212 const u64 addr = (u64) pkt->addr[idx].addr +
1213 (u64) pkt->addr[idx].offset;
1214 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1215 __le64 *descqp;
1216 __le64 descq0;
1217
1218 descqp = &ppd->sdma_descq[tail].qw[0];
1219
1220 descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1221 if (pkt->addr[idx].first_desc)
1222 descq0 = qib_sdma_make_first_desc0(descq0);
1223 if (pkt->addr[idx].last_desc) {
1224 descq0 = qib_sdma_make_last_desc0(descq0);
1225 if (ppd->sdma_intrequest) {
1226 descq0 |= cpu_to_le64(1ULL << 15);
1227 ppd->sdma_intrequest = 0;
1228 }
1229 }
1230
1231 descqp[0] = descq0;
1232 descqp[1] = qib_sdma_make_desc1(addr);
1233 }
1234
1235 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1236 struct list_head *pktlist)
1237 {
1238 struct qib_devdata *dd = ppd->dd;
1239 u16 nfree, nsent;
1240 u16 tail, tail_c;
1241 u8 gen, gen_c;
1242
1243 nfree = qib_sdma_descq_freecnt(ppd);
1244 if (!nfree)
1245 return;
1246
1247 retry:
1248 nsent = 0;
1249 tail_c = tail = ppd->sdma_descq_tail;
1250 gen_c = gen = ppd->sdma_generation;
1251 while (!list_empty(pktlist)) {
1252 struct qib_user_sdma_pkt *pkt =
1253 list_entry(pktlist->next, struct qib_user_sdma_pkt,
1254 list);
1255 int i, j, c = 0;
1256 unsigned ofs = 0;
1257 u16 dtail = tail;
1258
1259 for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1260 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1261 ofs += pkt->addr[i].length >> 2;
1262
1263 if (++tail == ppd->sdma_descq_cnt) {
1264 tail = 0;
1265 ++gen;
1266 ppd->sdma_intrequest = 1;
1267 } else if (tail == (ppd->sdma_descq_cnt>>1)) {
1268 ppd->sdma_intrequest = 1;
1269 }
1270 nfree--;
1271 if (pkt->addr[i].last_desc == 0)
1272 continue;
1273
1274
1275
1276
1277
1278
1279
1280 if (ofs > dd->piosize2kmax_dwords) {
1281 for (j = pkt->index; j <= i; j++) {
1282 ppd->sdma_descq[dtail].qw[0] |=
1283 cpu_to_le64(1ULL << 14);
1284 if (++dtail == ppd->sdma_descq_cnt)
1285 dtail = 0;
1286 }
1287 }
1288 c += i + 1 - pkt->index;
1289 pkt->index = i + 1;
1290 tail_c = dtail = tail;
1291 gen_c = gen;
1292 ofs = 0;
1293 }
1294
1295 ppd->sdma_descq_added += c;
1296 nsent += c;
1297 if (pkt->index == pkt->naddr) {
1298 pkt->added = ppd->sdma_descq_added;
1299 pkt->pq->added = pkt->added;
1300 pkt->pq->num_pending--;
1301 spin_lock(&pkt->pq->sent_lock);
1302 pkt->pq->num_sending++;
1303 list_move_tail(&pkt->list, &pkt->pq->sent);
1304 spin_unlock(&pkt->pq->sent_lock);
1305 }
1306 if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1307 break;
1308 }
1309
1310
1311 if (ppd->sdma_descq_tail != tail_c) {
1312 ppd->sdma_generation = gen_c;
1313 dd->f_sdma_update_tail(ppd, tail_c);
1314 }
1315
1316 if (nfree && !list_empty(pktlist))
1317 goto retry;
1318 }
1319
1320
1321 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1322 struct qib_user_sdma_queue *pq,
1323 struct list_head *pktlist, int count)
1324 {
1325 unsigned long flags;
1326
1327 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1328 return -ECOMM;
1329
1330
1331 if (pq->sdma_rb_node->refcount > 1) {
1332 spin_lock_irqsave(&ppd->sdma_lock, flags);
1333 if (unlikely(!__qib_sdma_running(ppd))) {
1334 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1335 return -ECOMM;
1336 }
1337 pq->num_pending += count;
1338 list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1339 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1340 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1341 return 0;
1342 }
1343
1344
1345
1346
1347
1348
1349
1350
1351 pq->num_pending += count;
1352
1353
1354
1355
1356
1357
1358 do {
1359 spin_lock_irqsave(&ppd->sdma_lock, flags);
1360 if (unlikely(!__qib_sdma_running(ppd))) {
1361 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1362 return -ECOMM;
1363 }
1364 qib_user_sdma_send_desc(ppd, pktlist);
1365 if (!list_empty(pktlist))
1366 qib_sdma_make_progress(ppd);
1367 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1368 } while (!list_empty(pktlist));
1369
1370 return 0;
1371 }
1372
1373 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1374 struct qib_user_sdma_queue *pq,
1375 const struct iovec *iov,
1376 unsigned long dim)
1377 {
1378 struct qib_devdata *dd = rcd->dd;
1379 struct qib_pportdata *ppd = rcd->ppd;
1380 int ret = 0;
1381 struct list_head list;
1382 int npkts = 0;
1383
1384 INIT_LIST_HEAD(&list);
1385
1386 mutex_lock(&pq->lock);
1387
1388
1389 if (!qib_sdma_running(ppd))
1390 goto done_unlock;
1391
1392
1393 if (pq->added > ppd->sdma_descq_removed)
1394 qib_user_sdma_hwqueue_clean(ppd);
1395
1396 if (pq->num_sending)
1397 qib_user_sdma_queue_clean(ppd, pq);
1398
1399 while (dim) {
1400 int mxp = 1;
1401 int ndesc = 0;
1402
1403 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1404 iov, dim, &list, &mxp, &ndesc);
1405 if (ret < 0)
1406 goto done_unlock;
1407 else {
1408 dim -= ret;
1409 iov += ret;
1410 }
1411
1412
1413 if (!list_empty(&list)) {
1414
1415
1416
1417 if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1418 qib_user_sdma_hwqueue_clean(ppd);
1419 if (pq->num_sending)
1420 qib_user_sdma_queue_clean(ppd, pq);
1421 }
1422
1423 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1424 if (ret < 0)
1425 goto done_unlock;
1426 else {
1427 npkts += mxp;
1428 pq->counter += mxp;
1429 }
1430 }
1431 }
1432
1433 done_unlock:
1434 if (!list_empty(&list))
1435 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1436 mutex_unlock(&pq->lock);
1437
1438 return (ret < 0) ? ret : npkts;
1439 }
1440
1441 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1442 struct qib_user_sdma_queue *pq)
1443 {
1444 int ret = 0;
1445
1446 mutex_lock(&pq->lock);
1447 qib_user_sdma_hwqueue_clean(ppd);
1448 ret = qib_user_sdma_queue_clean(ppd, pq);
1449 mutex_unlock(&pq->lock);
1450
1451 return ret;
1452 }
1453
1454 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1455 {
1456 return pq ? pq->sent_counter : 0;
1457 }
1458
1459 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1460 {
1461 return pq ? pq->counter : 0;
1462 }