1 /*
2 * Copyright(c) 2015-2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47 #include <asm/page.h>
48 #include <linux/string.h>
49
50 #include "mmu_rb.h"
51 #include "user_exp_rcv.h"
52 #include "trace.h"
53
54 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
55 struct exp_tid_set *set,
56 struct hfi1_filedata *fd);
57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
58 static int set_rcvarray_entry(struct hfi1_filedata *fd,
59 struct tid_user_buf *tbuf,
60 u32 rcventry, struct tid_group *grp,
61 u16 pageidx, unsigned int npages);
62 static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
63 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
64 struct tid_rb_node *tnode);
65 static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
66 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
67 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
68 struct tid_group *grp,
69 unsigned int start, u16 count,
70 u32 *tidlist, unsigned int *tididx,
71 unsigned int *pmapped);
72 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
73 struct tid_group **grp);
74 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
75
76 static struct mmu_rb_ops tid_rb_ops = {
77 .insert = tid_rb_insert,
78 .remove = tid_rb_remove,
79 .invalidate = tid_rb_invalidate
80 };
81
82 /*
83 * Initialize context and file private data needed for Expected
84 * receive caching. This needs to be done after the context has
85 * been configured with the eager/expected RcvEntry counts.
86 */
87 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
88 struct hfi1_ctxtdata *uctxt)
89 {
90 struct hfi1_devdata *dd = uctxt->dd;
91 int ret = 0;
92
93 fd->entry_to_rb = kcalloc(uctxt->expected_count,
94 sizeof(struct rb_node *),
95 GFP_KERNEL);
96 if (!fd->entry_to_rb)
97 return -ENOMEM;
98
99 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
100 fd->invalid_tid_idx = 0;
101 fd->invalid_tids = kcalloc(uctxt->expected_count,
102 sizeof(*fd->invalid_tids),
103 GFP_KERNEL);
104 if (!fd->invalid_tids) {
105 kfree(fd->entry_to_rb);
106 fd->entry_to_rb = NULL;
107 return -ENOMEM;
108 }
109
110 /*
111 * Register MMU notifier callbacks. If the registration
112 * fails, continue without TID caching for this context.
113 */
114 ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
115 dd->pport->hfi1_wq,
116 &fd->handler);
117 if (ret) {
118 dd_dev_info(dd,
119 "Failed MMU notifier registration %d\n",
120 ret);
121 ret = 0;
122 }
123 }
124
125 /*
126 * PSM does not have a good way to separate, count, and
127 * effectively enforce a limit on RcvArray entries used by
128 * subctxts (when context sharing is used) when TID caching
129 * is enabled. To help with that, we calculate a per-process
130 * RcvArray entry share and enforce that.
131 * If TID caching is not in use, PSM deals with usage on its
132 * own. In that case, we allow any subctxt to take all of the
133 * entries.
134 *
135 * Make sure that we set the tid counts only after successful
136 * init.
137 */
138 spin_lock(&fd->tid_lock);
139 if (uctxt->subctxt_cnt && fd->handler) {
140 u16 remainder;
141
142 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
143 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
144 if (remainder && fd->subctxt < remainder)
145 fd->tid_limit++;
146 } else {
147 fd->tid_limit = uctxt->expected_count;
148 }
149 spin_unlock(&fd->tid_lock);
150
151 return ret;
152 }
153
154 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
155 {
156 struct hfi1_ctxtdata *uctxt = fd->uctxt;
157
158 /*
159 * The notifier would have been removed when the process'es mm
160 * was freed.
161 */
162 if (fd->handler) {
163 hfi1_mmu_rb_unregister(fd->handler);
164 } else {
165 mutex_lock(&uctxt->exp_mutex);
166 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
167 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
168 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
169 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
170 mutex_unlock(&uctxt->exp_mutex);
171 }
172
173 kfree(fd->invalid_tids);
174 fd->invalid_tids = NULL;
175
176 kfree(fd->entry_to_rb);
177 fd->entry_to_rb = NULL;
178 }
179
180 /**
181 * Release pinned receive buffer pages.
182 *
183 * @mapped - true if the pages have been DMA mapped. false otherwise.
184 * @idx - Index of the first page to unpin.
185 * @npages - No of pages to unpin.
186 *
187 * If the pages have been DMA mapped (indicated by mapped parameter), their
188 * info will be passed via a struct tid_rb_node. If they haven't been mapped,
189 * their info will be passed via a struct tid_user_buf.
190 */
191 static void unpin_rcv_pages(struct hfi1_filedata *fd,
192 struct tid_user_buf *tidbuf,
193 struct tid_rb_node *node,
194 unsigned int idx,
195 unsigned int npages,
196 bool mapped)
197 {
198 struct page **pages;
199 struct hfi1_devdata *dd = fd->uctxt->dd;
200
201 if (mapped) {
202 pci_unmap_single(dd->pcidev, node->dma_addr,
203 node->mmu.len, PCI_DMA_FROMDEVICE);
204 pages = &node->pages[idx];
205 } else {
206 pages = &tidbuf->pages[idx];
207 }
208 hfi1_release_user_pages(fd->mm, pages, npages, mapped);
209 fd->tid_n_pinned -= npages;
210 }
211
212 /**
213 * Pin receive buffer pages.
214 */
215 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
216 {
217 int pinned;
218 unsigned int npages;
219 unsigned long vaddr = tidbuf->vaddr;
220 struct page **pages = NULL;
221 struct hfi1_devdata *dd = fd->uctxt->dd;
222
223 /* Get the number of pages the user buffer spans */
224 npages = num_user_pages(vaddr, tidbuf->length);
225 if (!npages)
226 return -EINVAL;
227
228 if (npages > fd->uctxt->expected_count) {
229 dd_dev_err(dd, "Expected buffer too big\n");
230 return -EINVAL;
231 }
232
233 /* Verify that access is OK for the user buffer */
234 if (!access_ok((void __user *)vaddr,
235 npages * PAGE_SIZE)) {
236 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
237 (void *)vaddr, npages);
238 return -EFAULT;
239 }
240 /* Allocate the array of struct page pointers needed for pinning */
241 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
242 if (!pages)
243 return -ENOMEM;
244
245 /*
246 * Pin all the pages of the user buffer. If we can't pin all the
247 * pages, accept the amount pinned so far and program only that.
248 * User space knows how to deal with partially programmed buffers.
249 */
250 if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
251 kfree(pages);
252 return -ENOMEM;
253 }
254
255 pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
256 if (pinned <= 0) {
257 kfree(pages);
258 return pinned;
259 }
260 tidbuf->pages = pages;
261 tidbuf->npages = npages;
262 fd->tid_n_pinned += pinned;
263 return pinned;
264 }
265
266 /*
267 * RcvArray entry allocation for Expected Receives is done by the
268 * following algorithm:
269 *
270 * The context keeps 3 lists of groups of RcvArray entries:
271 * 1. List of empty groups - tid_group_list
272 * This list is created during user context creation and
273 * contains elements which describe sets (of 8) of empty
274 * RcvArray entries.
275 * 2. List of partially used groups - tid_used_list
276 * This list contains sets of RcvArray entries which are
277 * not completely used up. Another mapping request could
278 * use some of all of the remaining entries.
279 * 3. List of full groups - tid_full_list
280 * This is the list where sets that are completely used
281 * up go.
282 *
283 * An attempt to optimize the usage of RcvArray entries is
284 * made by finding all sets of physically contiguous pages in a
285 * user's buffer.
286 * These physically contiguous sets are further split into
287 * sizes supported by the receive engine of the HFI. The
288 * resulting sets of pages are stored in struct tid_pageset,
289 * which describes the sets as:
290 * * .count - number of pages in this set
291 * * .idx - starting index into struct page ** array
292 * of this set
293 *
294 * From this point on, the algorithm deals with the page sets
295 * described above. The number of pagesets is divided by the
296 * RcvArray group size to produce the number of full groups
297 * needed.
298 *
299 * Groups from the 3 lists are manipulated using the following
300 * rules:
301 * 1. For each set of 8 pagesets, a complete group from
302 * tid_group_list is taken, programmed, and moved to
303 * the tid_full_list list.
304 * 2. For all remaining pagesets:
305 * 2.1 If the tid_used_list is empty and the tid_group_list
306 * is empty, stop processing pageset and return only
307 * what has been programmed up to this point.
308 * 2.2 If the tid_used_list is empty and the tid_group_list
309 * is not empty, move a group from tid_group_list to
310 * tid_used_list.
311 * 2.3 For each group is tid_used_group, program as much as
312 * can fit into the group. If the group becomes fully
313 * used, move it to tid_full_list.
314 */
315 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
316 struct hfi1_tid_info *tinfo)
317 {
318 int ret = 0, need_group = 0, pinned;
319 struct hfi1_ctxtdata *uctxt = fd->uctxt;
320 struct hfi1_devdata *dd = uctxt->dd;
321 unsigned int ngroups, pageidx = 0, pageset_count,
322 tididx = 0, mapped, mapped_pages = 0;
323 u32 *tidlist = NULL;
324 struct tid_user_buf *tidbuf;
325
326 if (!PAGE_ALIGNED(tinfo->vaddr))
327 return -EINVAL;
328
329 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
330 if (!tidbuf)
331 return -ENOMEM;
332
333 tidbuf->vaddr = tinfo->vaddr;
334 tidbuf->length = tinfo->length;
335 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
336 GFP_KERNEL);
337 if (!tidbuf->psets) {
338 kfree(tidbuf);
339 return -ENOMEM;
340 }
341
342 pinned = pin_rcv_pages(fd, tidbuf);
343 if (pinned <= 0) {
344 kfree(tidbuf->psets);
345 kfree(tidbuf);
346 return pinned;
347 }
348
349 /* Find sets of physically contiguous pages */
350 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
351
352 /*
353 * We don't need to access this under a lock since tid_used is per
354 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
355 * and hfi1_user_exp_rcv_setup() at the same time.
356 */
357 spin_lock(&fd->tid_lock);
358 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
359 pageset_count = fd->tid_limit - fd->tid_used;
360 else
361 pageset_count = tidbuf->n_psets;
362 spin_unlock(&fd->tid_lock);
363
364 if (!pageset_count)
365 goto bail;
366
367 ngroups = pageset_count / dd->rcv_entries.group_size;
368 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
369 if (!tidlist) {
370 ret = -ENOMEM;
371 goto nomem;
372 }
373
374 tididx = 0;
375
376 /*
377 * From this point on, we are going to be using shared (between master
378 * and subcontexts) context resources. We need to take the lock.
379 */
380 mutex_lock(&uctxt->exp_mutex);
381 /*
382 * The first step is to program the RcvArray entries which are complete
383 * groups.
384 */
385 while (ngroups && uctxt->tid_group_list.count) {
386 struct tid_group *grp =
387 tid_group_pop(&uctxt->tid_group_list);
388
389 ret = program_rcvarray(fd, tidbuf, grp,
390 pageidx, dd->rcv_entries.group_size,
391 tidlist, &tididx, &mapped);
392 /*
393 * If there was a failure to program the RcvArray
394 * entries for the entire group, reset the grp fields
395 * and add the grp back to the free group list.
396 */
397 if (ret <= 0) {
398 tid_group_add_tail(grp, &uctxt->tid_group_list);
399 hfi1_cdbg(TID,
400 "Failed to program RcvArray group %d", ret);
401 goto unlock;
402 }
403
404 tid_group_add_tail(grp, &uctxt->tid_full_list);
405 ngroups--;
406 pageidx += ret;
407 mapped_pages += mapped;
408 }
409
410 while (pageidx < pageset_count) {
411 struct tid_group *grp, *ptr;
412 /*
413 * If we don't have any partially used tid groups, check
414 * if we have empty groups. If so, take one from there and
415 * put in the partially used list.
416 */
417 if (!uctxt->tid_used_list.count || need_group) {
418 if (!uctxt->tid_group_list.count)
419 goto unlock;
420
421 grp = tid_group_pop(&uctxt->tid_group_list);
422 tid_group_add_tail(grp, &uctxt->tid_used_list);
423 need_group = 0;
424 }
425 /*
426 * There is an optimization opportunity here - instead of
427 * fitting as many page sets as we can, check for a group
428 * later on in the list that could fit all of them.
429 */
430 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
431 list) {
432 unsigned use = min_t(unsigned, pageset_count - pageidx,
433 grp->size - grp->used);
434
435 ret = program_rcvarray(fd, tidbuf, grp,
436 pageidx, use, tidlist,
437 &tididx, &mapped);
438 if (ret < 0) {
439 hfi1_cdbg(TID,
440 "Failed to program RcvArray entries %d",
441 ret);
442 goto unlock;
443 } else if (ret > 0) {
444 if (grp->used == grp->size)
445 tid_group_move(grp,
446 &uctxt->tid_used_list,
447 &uctxt->tid_full_list);
448 pageidx += ret;
449 mapped_pages += mapped;
450 need_group = 0;
451 /* Check if we are done so we break out early */
452 if (pageidx >= pageset_count)
453 break;
454 } else if (WARN_ON(ret == 0)) {
455 /*
456 * If ret is 0, we did not program any entries
457 * into this group, which can only happen if
458 * we've screwed up the accounting somewhere.
459 * Warn and try to continue.
460 */
461 need_group = 1;
462 }
463 }
464 }
465 unlock:
466 mutex_unlock(&uctxt->exp_mutex);
467 nomem:
468 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
469 mapped_pages, ret);
470 if (tididx) {
471 spin_lock(&fd->tid_lock);
472 fd->tid_used += tididx;
473 spin_unlock(&fd->tid_lock);
474 tinfo->tidcnt = tididx;
475 tinfo->length = mapped_pages * PAGE_SIZE;
476
477 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
478 tidlist, sizeof(tidlist[0]) * tididx)) {
479 /*
480 * On failure to copy to the user level, we need to undo
481 * everything done so far so we don't leak resources.
482 */
483 tinfo->tidlist = (unsigned long)&tidlist;
484 hfi1_user_exp_rcv_clear(fd, tinfo);
485 tinfo->tidlist = 0;
486 ret = -EFAULT;
487 goto bail;
488 }
489 }
490
491 /*
492 * If not everything was mapped (due to insufficient RcvArray entries,
493 * for example), unpin all unmapped pages so we can pin them nex time.
494 */
495 if (mapped_pages != pinned)
496 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
497 (pinned - mapped_pages), false);
498 bail:
499 kfree(tidbuf->psets);
500 kfree(tidlist);
501 kfree(tidbuf->pages);
502 kfree(tidbuf);
503 return ret > 0 ? 0 : ret;
504 }
505
506 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
507 struct hfi1_tid_info *tinfo)
508 {
509 int ret = 0;
510 struct hfi1_ctxtdata *uctxt = fd->uctxt;
511 u32 *tidinfo;
512 unsigned tididx;
513
514 if (unlikely(tinfo->tidcnt > fd->tid_used))
515 return -EINVAL;
516
517 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
518 sizeof(tidinfo[0]) * tinfo->tidcnt);
519 if (IS_ERR(tidinfo))
520 return PTR_ERR(tidinfo);
521
522 mutex_lock(&uctxt->exp_mutex);
523 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
524 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
525 if (ret) {
526 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
527 ret);
528 break;
529 }
530 }
531 spin_lock(&fd->tid_lock);
532 fd->tid_used -= tididx;
533 spin_unlock(&fd->tid_lock);
534 tinfo->tidcnt = tididx;
535 mutex_unlock(&uctxt->exp_mutex);
536
537 kfree(tidinfo);
538 return ret;
539 }
540
541 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
542 struct hfi1_tid_info *tinfo)
543 {
544 struct hfi1_ctxtdata *uctxt = fd->uctxt;
545 unsigned long *ev = uctxt->dd->events +
546 (uctxt_offset(uctxt) + fd->subctxt);
547 u32 *array;
548 int ret = 0;
549
550 /*
551 * copy_to_user() can sleep, which will leave the invalid_lock
552 * locked and cause the MMU notifier to be blocked on the lock
553 * for a long time.
554 * Copy the data to a local buffer so we can release the lock.
555 */
556 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
557 if (!array)
558 return -EFAULT;
559
560 spin_lock(&fd->invalid_lock);
561 if (fd->invalid_tid_idx) {
562 memcpy(array, fd->invalid_tids, sizeof(*array) *
563 fd->invalid_tid_idx);
564 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
565 fd->invalid_tid_idx);
566 tinfo->tidcnt = fd->invalid_tid_idx;
567 fd->invalid_tid_idx = 0;
568 /*
569 * Reset the user flag while still holding the lock.
570 * Otherwise, PSM can miss events.
571 */
572 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
573 } else {
574 tinfo->tidcnt = 0;
575 }
576 spin_unlock(&fd->invalid_lock);
577
578 if (tinfo->tidcnt) {
579 if (copy_to_user((void __user *)tinfo->tidlist,
580 array, sizeof(*array) * tinfo->tidcnt))
581 ret = -EFAULT;
582 }
583 kfree(array);
584
585 return ret;
586 }
587
588 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
589 {
590 unsigned pagecount, pageidx, setcount = 0, i;
591 unsigned long pfn, this_pfn;
592 struct page **pages = tidbuf->pages;
593 struct tid_pageset *list = tidbuf->psets;
594
595 if (!npages)
596 return 0;
597
598 /*
599 * Look for sets of physically contiguous pages in the user buffer.
600 * This will allow us to optimize Expected RcvArray entry usage by
601 * using the bigger supported sizes.
602 */
603 pfn = page_to_pfn(pages[0]);
604 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
605 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
606
607 /*
608 * If the pfn's are not sequential, pages are not physically
609 * contiguous.
610 */
611 if (this_pfn != ++pfn) {
612 /*
613 * At this point we have to loop over the set of
614 * physically contiguous pages and break them down it
615 * sizes supported by the HW.
616 * There are two main constraints:
617 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
618 * If the total set size is bigger than that
619 * program only a MAX_EXPECTED_BUFFER chunk.
620 * 2. The buffer size has to be a power of two. If
621 * it is not, round down to the closes power of
622 * 2 and program that size.
623 */
624 while (pagecount) {
625 int maxpages = pagecount;
626 u32 bufsize = pagecount * PAGE_SIZE;
627
628 if (bufsize > MAX_EXPECTED_BUFFER)
629 maxpages =
630 MAX_EXPECTED_BUFFER >>
631 PAGE_SHIFT;
632 else if (!is_power_of_2(bufsize))
633 maxpages =
634 rounddown_pow_of_two(bufsize) >>
635 PAGE_SHIFT;
636
637 list[setcount].idx = pageidx;
638 list[setcount].count = maxpages;
639 pagecount -= maxpages;
640 pageidx += maxpages;
641 setcount++;
642 }
643 pageidx = i;
644 pagecount = 1;
645 pfn = this_pfn;
646 } else {
647 pagecount++;
648 }
649 }
650 return setcount;
651 }
652
653 /**
654 * program_rcvarray() - program an RcvArray group with receive buffers
655 * @fd: filedata pointer
656 * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
657 * virtual address, buffer length, page pointers, pagesets (array of
658 * struct tid_pageset holding information on physically contiguous
659 * chunks from the user buffer), and other fields.
660 * @grp: RcvArray group
661 * @start: starting index into sets array
662 * @count: number of struct tid_pageset's to program
663 * @tidlist: the array of u32 elements when the information about the
664 * programmed RcvArray entries is to be encoded.
665 * @tididx: starting offset into tidlist
666 * @pmapped: (output parameter) number of pages programmed into the RcvArray
667 * entries.
668 *
669 * This function will program up to 'count' number of RcvArray entries from the
670 * group 'grp'. To make best use of write-combining writes, the function will
671 * perform writes to the unused RcvArray entries which will be ignored by the
672 * HW. Each RcvArray entry will be programmed with a physically contiguous
673 * buffer chunk from the user's virtual buffer.
674 *
675 * Return:
676 * -EINVAL if the requested count is larger than the size of the group,
677 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
678 * number of RcvArray entries programmed.
679 */
680 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
681 struct tid_group *grp,
682 unsigned int start, u16 count,
683 u32 *tidlist, unsigned int *tididx,
684 unsigned int *pmapped)
685 {
686 struct hfi1_ctxtdata *uctxt = fd->uctxt;
687 struct hfi1_devdata *dd = uctxt->dd;
688 u16 idx;
689 u32 tidinfo = 0, rcventry, useidx = 0;
690 int mapped = 0;
691
692 /* Count should never be larger than the group size */
693 if (count > grp->size)
694 return -EINVAL;
695
696 /* Find the first unused entry in the group */
697 for (idx = 0; idx < grp->size; idx++) {
698 if (!(grp->map & (1 << idx))) {
699 useidx = idx;
700 break;
701 }
702 rcv_array_wc_fill(dd, grp->base + idx);
703 }
704
705 idx = 0;
706 while (idx < count) {
707 u16 npages, pageidx, setidx = start + idx;
708 int ret = 0;
709
710 /*
711 * If this entry in the group is used, move to the next one.
712 * If we go past the end of the group, exit the loop.
713 */
714 if (useidx >= grp->size) {
715 break;
716 } else if (grp->map & (1 << useidx)) {
717 rcv_array_wc_fill(dd, grp->base + useidx);
718 useidx++;
719 continue;
720 }
721
722 rcventry = grp->base + useidx;
723 npages = tbuf->psets[setidx].count;
724 pageidx = tbuf->psets[setidx].idx;
725
726 ret = set_rcvarray_entry(fd, tbuf,
727 rcventry, grp, pageidx,
728 npages);
729 if (ret)
730 return ret;
731 mapped += npages;
732
733 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
734 EXP_TID_SET(LEN, npages);
735 tidlist[(*tididx)++] = tidinfo;
736 grp->used++;
737 grp->map |= 1 << useidx++;
738 idx++;
739 }
740
741 /* Fill the rest of the group with "blank" writes */
742 for (; useidx < grp->size; useidx++)
743 rcv_array_wc_fill(dd, grp->base + useidx);
744 *pmapped = mapped;
745 return idx;
746 }
747
748 static int set_rcvarray_entry(struct hfi1_filedata *fd,
749 struct tid_user_buf *tbuf,
750 u32 rcventry, struct tid_group *grp,
751 u16 pageidx, unsigned int npages)
752 {
753 int ret;
754 struct hfi1_ctxtdata *uctxt = fd->uctxt;
755 struct tid_rb_node *node;
756 struct hfi1_devdata *dd = uctxt->dd;
757 dma_addr_t phys;
758 struct page **pages = tbuf->pages + pageidx;
759
760 /*
761 * Allocate the node first so we can handle a potential
762 * failure before we've programmed anything.
763 */
764 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
765 GFP_KERNEL);
766 if (!node)
767 return -ENOMEM;
768
769 phys = pci_map_single(dd->pcidev,
770 __va(page_to_phys(pages[0])),
771 npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
772 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
773 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
774 phys);
775 kfree(node);
776 return -EFAULT;
777 }
778
779 node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
780 node->mmu.len = npages * PAGE_SIZE;
781 node->phys = page_to_phys(pages[0]);
782 node->npages = npages;
783 node->rcventry = rcventry;
784 node->dma_addr = phys;
785 node->grp = grp;
786 node->freed = false;
787 memcpy(node->pages, pages, sizeof(struct page *) * npages);
788
789 if (!fd->handler)
790 ret = tid_rb_insert(fd, &node->mmu);
791 else
792 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
793
794 if (ret) {
795 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
796 node->rcventry, node->mmu.addr, node->phys, ret);
797 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
798 PCI_DMA_FROMDEVICE);
799 kfree(node);
800 return -EFAULT;
801 }
802 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
803 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
804 node->mmu.addr, node->phys, phys);
805 return 0;
806 }
807
808 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
809 struct tid_group **grp)
810 {
811 struct hfi1_ctxtdata *uctxt = fd->uctxt;
812 struct hfi1_devdata *dd = uctxt->dd;
813 struct tid_rb_node *node;
814 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
815 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
816
817 if (tididx >= uctxt->expected_count) {
818 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
819 tididx, uctxt->ctxt);
820 return -EINVAL;
821 }
822
823 if (tidctrl == 0x3)
824 return -EINVAL;
825
826 rcventry = tididx + (tidctrl - 1);
827
828 node = fd->entry_to_rb[rcventry];
829 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
830 return -EBADF;
831
832 if (grp)
833 *grp = node->grp;
834
835 if (!fd->handler)
836 cacheless_tid_rb_remove(fd, node);
837 else
838 hfi1_mmu_rb_remove(fd->handler, &node->mmu);
839
840 return 0;
841 }
842
843 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
844 {
845 struct hfi1_ctxtdata *uctxt = fd->uctxt;
846 struct hfi1_devdata *dd = uctxt->dd;
847
848 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
849 node->npages, node->mmu.addr, node->phys,
850 node->dma_addr);
851
852 /*
853 * Make sure device has seen the write before we unpin the
854 * pages.
855 */
856 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
857
858 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
859
860 node->grp->used--;
861 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
862
863 if (node->grp->used == node->grp->size - 1)
864 tid_group_move(node->grp, &uctxt->tid_full_list,
865 &uctxt->tid_used_list);
866 else if (!node->grp->used)
867 tid_group_move(node->grp, &uctxt->tid_used_list,
868 &uctxt->tid_group_list);
869 kfree(node);
870 }
871
872 /*
873 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
874 * clearing nodes in the non-cached case.
875 */
876 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
877 struct exp_tid_set *set,
878 struct hfi1_filedata *fd)
879 {
880 struct tid_group *grp, *ptr;
881 int i;
882
883 list_for_each_entry_safe(grp, ptr, &set->list, list) {
884 list_del_init(&grp->list);
885
886 for (i = 0; i < grp->size; i++) {
887 if (grp->map & (1 << i)) {
888 u16 rcventry = grp->base + i;
889 struct tid_rb_node *node;
890
891 node = fd->entry_to_rb[rcventry -
892 uctxt->expected_base];
893 if (!node || node->rcventry != rcventry)
894 continue;
895
896 cacheless_tid_rb_remove(fd, node);
897 }
898 }
899 }
900 }
901
902 /*
903 * Always return 0 from this function. A non-zero return indicates that the
904 * remove operation will be called and that memory should be unpinned.
905 * However, the driver cannot unpin out from under PSM. Instead, retain the
906 * memory (by returning 0) and inform PSM that the memory is going away. PSM
907 * will call back later when it has removed the memory from its list.
908 */
909 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
910 {
911 struct hfi1_filedata *fdata = arg;
912 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
913 struct tid_rb_node *node =
914 container_of(mnode, struct tid_rb_node, mmu);
915
916 if (node->freed)
917 return 0;
918
919 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
920 node->rcventry, node->npages, node->dma_addr);
921 node->freed = true;
922
923 spin_lock(&fdata->invalid_lock);
924 if (fdata->invalid_tid_idx < uctxt->expected_count) {
925 fdata->invalid_tids[fdata->invalid_tid_idx] =
926 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
927 fdata->invalid_tids[fdata->invalid_tid_idx] |=
928 EXP_TID_SET(LEN, node->npages);
929 if (!fdata->invalid_tid_idx) {
930 unsigned long *ev;
931
932 /*
933 * hfi1_set_uevent_bits() sets a user event flag
934 * for all processes. Because calling into the
935 * driver to process TID cache invalidations is
936 * expensive and TID cache invalidations are
937 * handled on a per-process basis, we can
938 * optimize this to set the flag only for the
939 * process in question.
940 */
941 ev = uctxt->dd->events +
942 (uctxt_offset(uctxt) + fdata->subctxt);
943 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
944 }
945 fdata->invalid_tid_idx++;
946 }
947 spin_unlock(&fdata->invalid_lock);
948 return 0;
949 }
950
951 static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
952 {
953 struct hfi1_filedata *fdata = arg;
954 struct tid_rb_node *tnode =
955 container_of(node, struct tid_rb_node, mmu);
956 u32 base = fdata->uctxt->expected_base;
957
958 fdata->entry_to_rb[tnode->rcventry - base] = tnode;
959 return 0;
960 }
961
962 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
963 struct tid_rb_node *tnode)
964 {
965 u32 base = fdata->uctxt->expected_base;
966
967 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
968 clear_tid_node(fdata, tnode);
969 }
970
971 static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
972 {
973 struct hfi1_filedata *fdata = arg;
974 struct tid_rb_node *tnode =
975 container_of(node, struct tid_rb_node, mmu);
976
977 cacheless_tid_rb_remove(fdata, tnode);
978 }