This source file includes following definitions.
- hfi1_user_exp_rcv_init
- hfi1_user_exp_rcv_free
- unpin_rcv_pages
- pin_rcv_pages
- hfi1_user_exp_rcv_setup
- hfi1_user_exp_rcv_clear
- hfi1_user_exp_rcv_invalid
- find_phys_blocks
- program_rcvarray
- set_rcvarray_entry
- unprogram_rcvarray
- clear_tid_node
- unlock_exp_tids
- tid_rb_invalidate
- tid_rb_insert
- cacheless_tid_rb_remove
- tid_rb_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <asm/page.h>
48 #include <linux/string.h>
49
50 #include "mmu_rb.h"
51 #include "user_exp_rcv.h"
52 #include "trace.h"
53
54 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
55 struct exp_tid_set *set,
56 struct hfi1_filedata *fd);
57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
58 static int set_rcvarray_entry(struct hfi1_filedata *fd,
59 struct tid_user_buf *tbuf,
60 u32 rcventry, struct tid_group *grp,
61 u16 pageidx, unsigned int npages);
62 static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
63 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
64 struct tid_rb_node *tnode);
65 static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
66 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
67 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
68 struct tid_group *grp,
69 unsigned int start, u16 count,
70 u32 *tidlist, unsigned int *tididx,
71 unsigned int *pmapped);
72 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
73 struct tid_group **grp);
74 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
75
76 static struct mmu_rb_ops tid_rb_ops = {
77 .insert = tid_rb_insert,
78 .remove = tid_rb_remove,
79 .invalidate = tid_rb_invalidate
80 };
81
82
83
84
85
86
87 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
88 struct hfi1_ctxtdata *uctxt)
89 {
90 struct hfi1_devdata *dd = uctxt->dd;
91 int ret = 0;
92
93 fd->entry_to_rb = kcalloc(uctxt->expected_count,
94 sizeof(struct rb_node *),
95 GFP_KERNEL);
96 if (!fd->entry_to_rb)
97 return -ENOMEM;
98
99 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
100 fd->invalid_tid_idx = 0;
101 fd->invalid_tids = kcalloc(uctxt->expected_count,
102 sizeof(*fd->invalid_tids),
103 GFP_KERNEL);
104 if (!fd->invalid_tids) {
105 kfree(fd->entry_to_rb);
106 fd->entry_to_rb = NULL;
107 return -ENOMEM;
108 }
109
110
111
112
113
114 ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
115 dd->pport->hfi1_wq,
116 &fd->handler);
117 if (ret) {
118 dd_dev_info(dd,
119 "Failed MMU notifier registration %d\n",
120 ret);
121 ret = 0;
122 }
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138 spin_lock(&fd->tid_lock);
139 if (uctxt->subctxt_cnt && fd->handler) {
140 u16 remainder;
141
142 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
143 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
144 if (remainder && fd->subctxt < remainder)
145 fd->tid_limit++;
146 } else {
147 fd->tid_limit = uctxt->expected_count;
148 }
149 spin_unlock(&fd->tid_lock);
150
151 return ret;
152 }
153
154 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
155 {
156 struct hfi1_ctxtdata *uctxt = fd->uctxt;
157
158
159
160
161
162 if (fd->handler) {
163 hfi1_mmu_rb_unregister(fd->handler);
164 } else {
165 mutex_lock(&uctxt->exp_mutex);
166 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
167 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
168 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
169 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
170 mutex_unlock(&uctxt->exp_mutex);
171 }
172
173 kfree(fd->invalid_tids);
174 fd->invalid_tids = NULL;
175
176 kfree(fd->entry_to_rb);
177 fd->entry_to_rb = NULL;
178 }
179
180
181
182
183
184
185
186
187
188
189
190
191 static void unpin_rcv_pages(struct hfi1_filedata *fd,
192 struct tid_user_buf *tidbuf,
193 struct tid_rb_node *node,
194 unsigned int idx,
195 unsigned int npages,
196 bool mapped)
197 {
198 struct page **pages;
199 struct hfi1_devdata *dd = fd->uctxt->dd;
200
201 if (mapped) {
202 pci_unmap_single(dd->pcidev, node->dma_addr,
203 node->mmu.len, PCI_DMA_FROMDEVICE);
204 pages = &node->pages[idx];
205 } else {
206 pages = &tidbuf->pages[idx];
207 }
208 hfi1_release_user_pages(fd->mm, pages, npages, mapped);
209 fd->tid_n_pinned -= npages;
210 }
211
212
213
214
215 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
216 {
217 int pinned;
218 unsigned int npages;
219 unsigned long vaddr = tidbuf->vaddr;
220 struct page **pages = NULL;
221 struct hfi1_devdata *dd = fd->uctxt->dd;
222
223
224 npages = num_user_pages(vaddr, tidbuf->length);
225 if (!npages)
226 return -EINVAL;
227
228 if (npages > fd->uctxt->expected_count) {
229 dd_dev_err(dd, "Expected buffer too big\n");
230 return -EINVAL;
231 }
232
233
234 if (!access_ok((void __user *)vaddr,
235 npages * PAGE_SIZE)) {
236 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
237 (void *)vaddr, npages);
238 return -EFAULT;
239 }
240
241 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
242 if (!pages)
243 return -ENOMEM;
244
245
246
247
248
249
250 if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
251 kfree(pages);
252 return -ENOMEM;
253 }
254
255 pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
256 if (pinned <= 0) {
257 kfree(pages);
258 return pinned;
259 }
260 tidbuf->pages = pages;
261 tidbuf->npages = npages;
262 fd->tid_n_pinned += pinned;
263 return pinned;
264 }
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
316 struct hfi1_tid_info *tinfo)
317 {
318 int ret = 0, need_group = 0, pinned;
319 struct hfi1_ctxtdata *uctxt = fd->uctxt;
320 struct hfi1_devdata *dd = uctxt->dd;
321 unsigned int ngroups, pageidx = 0, pageset_count,
322 tididx = 0, mapped, mapped_pages = 0;
323 u32 *tidlist = NULL;
324 struct tid_user_buf *tidbuf;
325
326 if (!PAGE_ALIGNED(tinfo->vaddr))
327 return -EINVAL;
328
329 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
330 if (!tidbuf)
331 return -ENOMEM;
332
333 tidbuf->vaddr = tinfo->vaddr;
334 tidbuf->length = tinfo->length;
335 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
336 GFP_KERNEL);
337 if (!tidbuf->psets) {
338 kfree(tidbuf);
339 return -ENOMEM;
340 }
341
342 pinned = pin_rcv_pages(fd, tidbuf);
343 if (pinned <= 0) {
344 kfree(tidbuf->psets);
345 kfree(tidbuf);
346 return pinned;
347 }
348
349
350 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
351
352
353
354
355
356
357 spin_lock(&fd->tid_lock);
358 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
359 pageset_count = fd->tid_limit - fd->tid_used;
360 else
361 pageset_count = tidbuf->n_psets;
362 spin_unlock(&fd->tid_lock);
363
364 if (!pageset_count)
365 goto bail;
366
367 ngroups = pageset_count / dd->rcv_entries.group_size;
368 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
369 if (!tidlist) {
370 ret = -ENOMEM;
371 goto nomem;
372 }
373
374 tididx = 0;
375
376
377
378
379
380 mutex_lock(&uctxt->exp_mutex);
381
382
383
384
385 while (ngroups && uctxt->tid_group_list.count) {
386 struct tid_group *grp =
387 tid_group_pop(&uctxt->tid_group_list);
388
389 ret = program_rcvarray(fd, tidbuf, grp,
390 pageidx, dd->rcv_entries.group_size,
391 tidlist, &tididx, &mapped);
392
393
394
395
396
397 if (ret <= 0) {
398 tid_group_add_tail(grp, &uctxt->tid_group_list);
399 hfi1_cdbg(TID,
400 "Failed to program RcvArray group %d", ret);
401 goto unlock;
402 }
403
404 tid_group_add_tail(grp, &uctxt->tid_full_list);
405 ngroups--;
406 pageidx += ret;
407 mapped_pages += mapped;
408 }
409
410 while (pageidx < pageset_count) {
411 struct tid_group *grp, *ptr;
412
413
414
415
416
417 if (!uctxt->tid_used_list.count || need_group) {
418 if (!uctxt->tid_group_list.count)
419 goto unlock;
420
421 grp = tid_group_pop(&uctxt->tid_group_list);
422 tid_group_add_tail(grp, &uctxt->tid_used_list);
423 need_group = 0;
424 }
425
426
427
428
429
430 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
431 list) {
432 unsigned use = min_t(unsigned, pageset_count - pageidx,
433 grp->size - grp->used);
434
435 ret = program_rcvarray(fd, tidbuf, grp,
436 pageidx, use, tidlist,
437 &tididx, &mapped);
438 if (ret < 0) {
439 hfi1_cdbg(TID,
440 "Failed to program RcvArray entries %d",
441 ret);
442 goto unlock;
443 } else if (ret > 0) {
444 if (grp->used == grp->size)
445 tid_group_move(grp,
446 &uctxt->tid_used_list,
447 &uctxt->tid_full_list);
448 pageidx += ret;
449 mapped_pages += mapped;
450 need_group = 0;
451
452 if (pageidx >= pageset_count)
453 break;
454 } else if (WARN_ON(ret == 0)) {
455
456
457
458
459
460
461 need_group = 1;
462 }
463 }
464 }
465 unlock:
466 mutex_unlock(&uctxt->exp_mutex);
467 nomem:
468 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
469 mapped_pages, ret);
470 if (tididx) {
471 spin_lock(&fd->tid_lock);
472 fd->tid_used += tididx;
473 spin_unlock(&fd->tid_lock);
474 tinfo->tidcnt = tididx;
475 tinfo->length = mapped_pages * PAGE_SIZE;
476
477 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
478 tidlist, sizeof(tidlist[0]) * tididx)) {
479
480
481
482
483 tinfo->tidlist = (unsigned long)&tidlist;
484 hfi1_user_exp_rcv_clear(fd, tinfo);
485 tinfo->tidlist = 0;
486 ret = -EFAULT;
487 goto bail;
488 }
489 }
490
491
492
493
494
495 if (mapped_pages != pinned)
496 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
497 (pinned - mapped_pages), false);
498 bail:
499 kfree(tidbuf->psets);
500 kfree(tidlist);
501 kfree(tidbuf->pages);
502 kfree(tidbuf);
503 return ret > 0 ? 0 : ret;
504 }
505
506 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
507 struct hfi1_tid_info *tinfo)
508 {
509 int ret = 0;
510 struct hfi1_ctxtdata *uctxt = fd->uctxt;
511 u32 *tidinfo;
512 unsigned tididx;
513
514 if (unlikely(tinfo->tidcnt > fd->tid_used))
515 return -EINVAL;
516
517 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
518 sizeof(tidinfo[0]) * tinfo->tidcnt);
519 if (IS_ERR(tidinfo))
520 return PTR_ERR(tidinfo);
521
522 mutex_lock(&uctxt->exp_mutex);
523 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
524 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
525 if (ret) {
526 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
527 ret);
528 break;
529 }
530 }
531 spin_lock(&fd->tid_lock);
532 fd->tid_used -= tididx;
533 spin_unlock(&fd->tid_lock);
534 tinfo->tidcnt = tididx;
535 mutex_unlock(&uctxt->exp_mutex);
536
537 kfree(tidinfo);
538 return ret;
539 }
540
541 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
542 struct hfi1_tid_info *tinfo)
543 {
544 struct hfi1_ctxtdata *uctxt = fd->uctxt;
545 unsigned long *ev = uctxt->dd->events +
546 (uctxt_offset(uctxt) + fd->subctxt);
547 u32 *array;
548 int ret = 0;
549
550
551
552
553
554
555
556 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
557 if (!array)
558 return -EFAULT;
559
560 spin_lock(&fd->invalid_lock);
561 if (fd->invalid_tid_idx) {
562 memcpy(array, fd->invalid_tids, sizeof(*array) *
563 fd->invalid_tid_idx);
564 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
565 fd->invalid_tid_idx);
566 tinfo->tidcnt = fd->invalid_tid_idx;
567 fd->invalid_tid_idx = 0;
568
569
570
571
572 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
573 } else {
574 tinfo->tidcnt = 0;
575 }
576 spin_unlock(&fd->invalid_lock);
577
578 if (tinfo->tidcnt) {
579 if (copy_to_user((void __user *)tinfo->tidlist,
580 array, sizeof(*array) * tinfo->tidcnt))
581 ret = -EFAULT;
582 }
583 kfree(array);
584
585 return ret;
586 }
587
588 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
589 {
590 unsigned pagecount, pageidx, setcount = 0, i;
591 unsigned long pfn, this_pfn;
592 struct page **pages = tidbuf->pages;
593 struct tid_pageset *list = tidbuf->psets;
594
595 if (!npages)
596 return 0;
597
598
599
600
601
602
603 pfn = page_to_pfn(pages[0]);
604 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
605 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
606
607
608
609
610
611 if (this_pfn != ++pfn) {
612
613
614
615
616
617
618
619
620
621
622
623
624 while (pagecount) {
625 int maxpages = pagecount;
626 u32 bufsize = pagecount * PAGE_SIZE;
627
628 if (bufsize > MAX_EXPECTED_BUFFER)
629 maxpages =
630 MAX_EXPECTED_BUFFER >>
631 PAGE_SHIFT;
632 else if (!is_power_of_2(bufsize))
633 maxpages =
634 rounddown_pow_of_two(bufsize) >>
635 PAGE_SHIFT;
636
637 list[setcount].idx = pageidx;
638 list[setcount].count = maxpages;
639 pagecount -= maxpages;
640 pageidx += maxpages;
641 setcount++;
642 }
643 pageidx = i;
644 pagecount = 1;
645 pfn = this_pfn;
646 } else {
647 pagecount++;
648 }
649 }
650 return setcount;
651 }
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
681 struct tid_group *grp,
682 unsigned int start, u16 count,
683 u32 *tidlist, unsigned int *tididx,
684 unsigned int *pmapped)
685 {
686 struct hfi1_ctxtdata *uctxt = fd->uctxt;
687 struct hfi1_devdata *dd = uctxt->dd;
688 u16 idx;
689 u32 tidinfo = 0, rcventry, useidx = 0;
690 int mapped = 0;
691
692
693 if (count > grp->size)
694 return -EINVAL;
695
696
697 for (idx = 0; idx < grp->size; idx++) {
698 if (!(grp->map & (1 << idx))) {
699 useidx = idx;
700 break;
701 }
702 rcv_array_wc_fill(dd, grp->base + idx);
703 }
704
705 idx = 0;
706 while (idx < count) {
707 u16 npages, pageidx, setidx = start + idx;
708 int ret = 0;
709
710
711
712
713
714 if (useidx >= grp->size) {
715 break;
716 } else if (grp->map & (1 << useidx)) {
717 rcv_array_wc_fill(dd, grp->base + useidx);
718 useidx++;
719 continue;
720 }
721
722 rcventry = grp->base + useidx;
723 npages = tbuf->psets[setidx].count;
724 pageidx = tbuf->psets[setidx].idx;
725
726 ret = set_rcvarray_entry(fd, tbuf,
727 rcventry, grp, pageidx,
728 npages);
729 if (ret)
730 return ret;
731 mapped += npages;
732
733 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
734 EXP_TID_SET(LEN, npages);
735 tidlist[(*tididx)++] = tidinfo;
736 grp->used++;
737 grp->map |= 1 << useidx++;
738 idx++;
739 }
740
741
742 for (; useidx < grp->size; useidx++)
743 rcv_array_wc_fill(dd, grp->base + useidx);
744 *pmapped = mapped;
745 return idx;
746 }
747
748 static int set_rcvarray_entry(struct hfi1_filedata *fd,
749 struct tid_user_buf *tbuf,
750 u32 rcventry, struct tid_group *grp,
751 u16 pageidx, unsigned int npages)
752 {
753 int ret;
754 struct hfi1_ctxtdata *uctxt = fd->uctxt;
755 struct tid_rb_node *node;
756 struct hfi1_devdata *dd = uctxt->dd;
757 dma_addr_t phys;
758 struct page **pages = tbuf->pages + pageidx;
759
760
761
762
763
764 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
765 GFP_KERNEL);
766 if (!node)
767 return -ENOMEM;
768
769 phys = pci_map_single(dd->pcidev,
770 __va(page_to_phys(pages[0])),
771 npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
772 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
773 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
774 phys);
775 kfree(node);
776 return -EFAULT;
777 }
778
779 node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
780 node->mmu.len = npages * PAGE_SIZE;
781 node->phys = page_to_phys(pages[0]);
782 node->npages = npages;
783 node->rcventry = rcventry;
784 node->dma_addr = phys;
785 node->grp = grp;
786 node->freed = false;
787 memcpy(node->pages, pages, sizeof(struct page *) * npages);
788
789 if (!fd->handler)
790 ret = tid_rb_insert(fd, &node->mmu);
791 else
792 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
793
794 if (ret) {
795 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
796 node->rcventry, node->mmu.addr, node->phys, ret);
797 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
798 PCI_DMA_FROMDEVICE);
799 kfree(node);
800 return -EFAULT;
801 }
802 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
803 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
804 node->mmu.addr, node->phys, phys);
805 return 0;
806 }
807
808 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
809 struct tid_group **grp)
810 {
811 struct hfi1_ctxtdata *uctxt = fd->uctxt;
812 struct hfi1_devdata *dd = uctxt->dd;
813 struct tid_rb_node *node;
814 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
815 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
816
817 if (tididx >= uctxt->expected_count) {
818 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
819 tididx, uctxt->ctxt);
820 return -EINVAL;
821 }
822
823 if (tidctrl == 0x3)
824 return -EINVAL;
825
826 rcventry = tididx + (tidctrl - 1);
827
828 node = fd->entry_to_rb[rcventry];
829 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
830 return -EBADF;
831
832 if (grp)
833 *grp = node->grp;
834
835 if (!fd->handler)
836 cacheless_tid_rb_remove(fd, node);
837 else
838 hfi1_mmu_rb_remove(fd->handler, &node->mmu);
839
840 return 0;
841 }
842
843 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
844 {
845 struct hfi1_ctxtdata *uctxt = fd->uctxt;
846 struct hfi1_devdata *dd = uctxt->dd;
847
848 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
849 node->npages, node->mmu.addr, node->phys,
850 node->dma_addr);
851
852
853
854
855
856 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
857
858 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
859
860 node->grp->used--;
861 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
862
863 if (node->grp->used == node->grp->size - 1)
864 tid_group_move(node->grp, &uctxt->tid_full_list,
865 &uctxt->tid_used_list);
866 else if (!node->grp->used)
867 tid_group_move(node->grp, &uctxt->tid_used_list,
868 &uctxt->tid_group_list);
869 kfree(node);
870 }
871
872
873
874
875
876 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
877 struct exp_tid_set *set,
878 struct hfi1_filedata *fd)
879 {
880 struct tid_group *grp, *ptr;
881 int i;
882
883 list_for_each_entry_safe(grp, ptr, &set->list, list) {
884 list_del_init(&grp->list);
885
886 for (i = 0; i < grp->size; i++) {
887 if (grp->map & (1 << i)) {
888 u16 rcventry = grp->base + i;
889 struct tid_rb_node *node;
890
891 node = fd->entry_to_rb[rcventry -
892 uctxt->expected_base];
893 if (!node || node->rcventry != rcventry)
894 continue;
895
896 cacheless_tid_rb_remove(fd, node);
897 }
898 }
899 }
900 }
901
902
903
904
905
906
907
908
909 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
910 {
911 struct hfi1_filedata *fdata = arg;
912 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
913 struct tid_rb_node *node =
914 container_of(mnode, struct tid_rb_node, mmu);
915
916 if (node->freed)
917 return 0;
918
919 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
920 node->rcventry, node->npages, node->dma_addr);
921 node->freed = true;
922
923 spin_lock(&fdata->invalid_lock);
924 if (fdata->invalid_tid_idx < uctxt->expected_count) {
925 fdata->invalid_tids[fdata->invalid_tid_idx] =
926 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
927 fdata->invalid_tids[fdata->invalid_tid_idx] |=
928 EXP_TID_SET(LEN, node->npages);
929 if (!fdata->invalid_tid_idx) {
930 unsigned long *ev;
931
932
933
934
935
936
937
938
939
940
941 ev = uctxt->dd->events +
942 (uctxt_offset(uctxt) + fdata->subctxt);
943 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
944 }
945 fdata->invalid_tid_idx++;
946 }
947 spin_unlock(&fdata->invalid_lock);
948 return 0;
949 }
950
951 static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
952 {
953 struct hfi1_filedata *fdata = arg;
954 struct tid_rb_node *tnode =
955 container_of(node, struct tid_rb_node, mmu);
956 u32 base = fdata->uctxt->expected_base;
957
958 fdata->entry_to_rb[tnode->rcventry - base] = tnode;
959 return 0;
960 }
961
962 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
963 struct tid_rb_node *tnode)
964 {
965 u32 base = fdata->uctxt->expected_base;
966
967 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
968 clear_tid_node(fdata, tnode);
969 }
970
971 static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
972 {
973 struct hfi1_filedata *fdata = arg;
974 struct tid_rb_node *tnode =
975 container_of(node, struct tid_rb_node, mmu);
976
977 cacheless_tid_rb_remove(fdata, tnode);
978 }