This source file includes following definitions.
- rds_pages_in_vec
- rds_mr_tree_walk
- rds_destroy_mr
- __rds_put_mr_final
- rds_rdma_drop_keys
- rds_pin_pages
- __rds_rdma_map
- rds_get_mr
- rds_get_mr_for_dest
- rds_free_mr
- rds_rdma_unuse
- rds_rdma_free_op
- rds_atomic_free_op
- rds_rdma_pages
- rds_rdma_extra_size
- rds_cmsg_rdma_args
- rds_cmsg_rdma_dest
- rds_cmsg_rdma_map
- rds_cmsg_atomic
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h>
37
38 #include "rds.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56 {
57 if ((vec->addr + vec->bytes <= vec->addr) ||
58 (vec->bytes > (u64)UINT_MAX))
59 return 0;
60
61 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 (vec->addr >> PAGE_SHIFT);
63 }
64
65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 struct rds_mr *insert)
67 {
68 struct rb_node **p = &root->rb_node;
69 struct rb_node *parent = NULL;
70 struct rds_mr *mr;
71
72 while (*p) {
73 parent = *p;
74 mr = rb_entry(parent, struct rds_mr, r_rb_node);
75
76 if (key < mr->r_key)
77 p = &(*p)->rb_left;
78 else if (key > mr->r_key)
79 p = &(*p)->rb_right;
80 else
81 return mr;
82 }
83
84 if (insert) {
85 rb_link_node(&insert->r_rb_node, parent, p);
86 rb_insert_color(&insert->r_rb_node, root);
87 refcount_inc(&insert->r_refcount);
88 }
89 return NULL;
90 }
91
92
93
94
95 static void rds_destroy_mr(struct rds_mr *mr)
96 {
97 struct rds_sock *rs = mr->r_sock;
98 void *trans_private = NULL;
99 unsigned long flags;
100
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 mr->r_key, refcount_read(&mr->r_refcount));
103
104 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
105 return;
106
107 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
108 if (!RB_EMPTY_NODE(&mr->r_rb_node))
109 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
110 trans_private = mr->r_trans_private;
111 mr->r_trans_private = NULL;
112 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
113
114 if (trans_private)
115 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
116 }
117
118 void __rds_put_mr_final(struct rds_mr *mr)
119 {
120 rds_destroy_mr(mr);
121 kfree(mr);
122 }
123
124
125
126
127
128 void rds_rdma_drop_keys(struct rds_sock *rs)
129 {
130 struct rds_mr *mr;
131 struct rb_node *node;
132 unsigned long flags;
133
134
135 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
136 while ((node = rb_first(&rs->rs_rdma_keys))) {
137 mr = rb_entry(node, struct rds_mr, r_rb_node);
138 if (mr->r_trans == rs->rs_transport)
139 mr->r_invalidate = 0;
140 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
141 RB_CLEAR_NODE(&mr->r_rb_node);
142 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
143 rds_destroy_mr(mr);
144 rds_mr_put(mr);
145 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
146 }
147 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
148
149 if (rs->rs_transport && rs->rs_transport->flush_mrs)
150 rs->rs_transport->flush_mrs();
151 }
152
153
154
155
156 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
157 struct page **pages, int write)
158 {
159 int ret;
160
161 ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0,
162 pages);
163
164 if (ret >= 0 && ret < nr_pages) {
165 while (ret--)
166 put_page(pages[ret]);
167 ret = -EFAULT;
168 }
169
170 return ret;
171 }
172
173 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
174 u64 *cookie_ret, struct rds_mr **mr_ret,
175 struct rds_conn_path *cp)
176 {
177 struct rds_mr *mr = NULL, *found;
178 unsigned int nr_pages;
179 struct page **pages = NULL;
180 struct scatterlist *sg;
181 void *trans_private;
182 unsigned long flags;
183 rds_rdma_cookie_t cookie;
184 unsigned int nents;
185 long i;
186 int ret;
187
188 if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
189 ret = -ENOTCONN;
190 goto out;
191 }
192
193 if (!rs->rs_transport->get_mr) {
194 ret = -EOPNOTSUPP;
195 goto out;
196 }
197
198 nr_pages = rds_pages_in_vec(&args->vec);
199 if (nr_pages == 0) {
200 ret = -EINVAL;
201 goto out;
202 }
203
204
205
206
207 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
208 ret = -EMSGSIZE;
209 goto out;
210 }
211
212 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
213 args->vec.addr, args->vec.bytes, nr_pages);
214
215
216 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
217 if (!pages) {
218 ret = -ENOMEM;
219 goto out;
220 }
221
222 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
223 if (!mr) {
224 ret = -ENOMEM;
225 goto out;
226 }
227
228 refcount_set(&mr->r_refcount, 1);
229 RB_CLEAR_NODE(&mr->r_rb_node);
230 mr->r_trans = rs->rs_transport;
231 mr->r_sock = rs;
232
233 if (args->flags & RDS_RDMA_USE_ONCE)
234 mr->r_use_once = 1;
235 if (args->flags & RDS_RDMA_INVALIDATE)
236 mr->r_invalidate = 1;
237 if (args->flags & RDS_RDMA_READWRITE)
238 mr->r_write = 1;
239
240
241
242
243
244
245
246
247
248
249
250 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
251 if (ret < 0)
252 goto out;
253
254 nents = ret;
255 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
256 if (!sg) {
257 ret = -ENOMEM;
258 goto out;
259 }
260 WARN_ON(!nents);
261 sg_init_table(sg, nents);
262
263
264 for (i = 0 ; i < nents; i++)
265 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
266
267 rdsdebug("RDS: trans_private nents is %u\n", nents);
268
269
270
271
272
273 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
274 &mr->r_key,
275 cp ? cp->cp_conn : NULL);
276
277 if (IS_ERR(trans_private)) {
278 for (i = 0 ; i < nents; i++)
279 put_page(sg_page(&sg[i]));
280 kfree(sg);
281 ret = PTR_ERR(trans_private);
282 goto out;
283 }
284
285 mr->r_trans_private = trans_private;
286
287 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
288 mr->r_key, (void *)(unsigned long) args->cookie_addr);
289
290
291
292
293
294 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
295 if (cookie_ret)
296 *cookie_ret = cookie;
297
298 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
299 ret = -EFAULT;
300 goto out;
301 }
302
303
304
305 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
306 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
307 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
308
309 BUG_ON(found && found != mr);
310
311 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
312 if (mr_ret) {
313 refcount_inc(&mr->r_refcount);
314 *mr_ret = mr;
315 }
316
317 ret = 0;
318 out:
319 kfree(pages);
320 if (mr)
321 rds_mr_put(mr);
322 return ret;
323 }
324
325 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
326 {
327 struct rds_get_mr_args args;
328
329 if (optlen != sizeof(struct rds_get_mr_args))
330 return -EINVAL;
331
332 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
333 sizeof(struct rds_get_mr_args)))
334 return -EFAULT;
335
336 return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
337 }
338
339 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
340 {
341 struct rds_get_mr_for_dest_args args;
342 struct rds_get_mr_args new_args;
343
344 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
345 return -EINVAL;
346
347 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
348 sizeof(struct rds_get_mr_for_dest_args)))
349 return -EFAULT;
350
351
352
353
354
355
356 new_args.vec = args.vec;
357 new_args.cookie_addr = args.cookie_addr;
358 new_args.flags = args.flags;
359
360 return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
361 }
362
363
364
365
366 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
367 {
368 struct rds_free_mr_args args;
369 struct rds_mr *mr;
370 unsigned long flags;
371
372 if (optlen != sizeof(struct rds_free_mr_args))
373 return -EINVAL;
374
375 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
376 sizeof(struct rds_free_mr_args)))
377 return -EFAULT;
378
379
380 if (args.cookie == 0) {
381 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
382 return -EINVAL;
383 rs->rs_transport->flush_mrs();
384 return 0;
385 }
386
387
388
389
390
391 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
392 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
393 if (mr) {
394 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
395 RB_CLEAR_NODE(&mr->r_rb_node);
396 if (args.flags & RDS_RDMA_INVALIDATE)
397 mr->r_invalidate = 1;
398 }
399 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
400
401 if (!mr)
402 return -EINVAL;
403
404
405
406
407
408
409 rds_destroy_mr(mr);
410 rds_mr_put(mr);
411 return 0;
412 }
413
414
415
416
417
418
419 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
420 {
421 struct rds_mr *mr;
422 unsigned long flags;
423 int zot_me = 0;
424
425 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
426 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
427 if (!mr) {
428 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
429 r_key);
430 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
431 return;
432 }
433
434 if (mr->r_use_once || force) {
435 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
436 RB_CLEAR_NODE(&mr->r_rb_node);
437 zot_me = 1;
438 }
439 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
440
441
442
443
444 if (mr->r_trans->sync_mr)
445 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
446
447
448
449 if (zot_me) {
450 rds_destroy_mr(mr);
451 rds_mr_put(mr);
452 }
453 }
454
455 void rds_rdma_free_op(struct rm_rdma_op *ro)
456 {
457 unsigned int i;
458
459 for (i = 0; i < ro->op_nents; i++) {
460 struct page *page = sg_page(&ro->op_sg[i]);
461
462
463
464
465 if (!ro->op_write) {
466 WARN_ON(!page->mapping && irqs_disabled());
467 set_page_dirty(page);
468 }
469 put_page(page);
470 }
471
472 kfree(ro->op_notifier);
473 ro->op_notifier = NULL;
474 ro->op_active = 0;
475 }
476
477 void rds_atomic_free_op(struct rm_atomic_op *ao)
478 {
479 struct page *page = sg_page(ao->op_sg);
480
481
482
483
484 set_page_dirty(page);
485 put_page(page);
486
487 kfree(ao->op_notifier);
488 ao->op_notifier = NULL;
489 ao->op_active = 0;
490 }
491
492
493
494
495
496 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
497 {
498 int tot_pages = 0;
499 unsigned int nr_pages;
500 unsigned int i;
501
502
503 for (i = 0; i < nr_iovecs; i++) {
504 nr_pages = rds_pages_in_vec(&iov[i]);
505 if (nr_pages == 0)
506 return -EINVAL;
507
508 tot_pages += nr_pages;
509
510
511
512
513
514 if (tot_pages < 0)
515 return -EINVAL;
516 }
517
518 return tot_pages;
519 }
520
521 int rds_rdma_extra_size(struct rds_rdma_args *args,
522 struct rds_iov_vector *iov)
523 {
524 struct rds_iovec *vec;
525 struct rds_iovec __user *local_vec;
526 int tot_pages = 0;
527 unsigned int nr_pages;
528 unsigned int i;
529
530 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
531
532 if (args->nr_local == 0)
533 return -EINVAL;
534
535 iov->iov = kcalloc(args->nr_local,
536 sizeof(struct rds_iovec),
537 GFP_KERNEL);
538 if (!iov->iov)
539 return -ENOMEM;
540
541 vec = &iov->iov[0];
542
543 if (copy_from_user(vec, local_vec, args->nr_local *
544 sizeof(struct rds_iovec)))
545 return -EFAULT;
546 iov->len = args->nr_local;
547
548
549 for (i = 0; i < args->nr_local; i++, vec++) {
550
551 nr_pages = rds_pages_in_vec(vec);
552 if (nr_pages == 0)
553 return -EINVAL;
554
555 tot_pages += nr_pages;
556
557
558
559
560
561 if (tot_pages < 0)
562 return -EINVAL;
563 }
564
565 return tot_pages * sizeof(struct scatterlist);
566 }
567
568
569
570
571
572 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
573 struct cmsghdr *cmsg,
574 struct rds_iov_vector *vec)
575 {
576 struct rds_rdma_args *args;
577 struct rm_rdma_op *op = &rm->rdma;
578 int nr_pages;
579 unsigned int nr_bytes;
580 struct page **pages = NULL;
581 struct rds_iovec *iovs;
582 unsigned int i, j;
583 int ret = 0;
584
585 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
586 || rm->rdma.op_active)
587 return -EINVAL;
588
589 args = CMSG_DATA(cmsg);
590
591 if (ipv6_addr_any(&rs->rs_bound_addr)) {
592 ret = -ENOTCONN;
593 goto out_ret;
594 }
595
596 if (args->nr_local > UIO_MAXIOV) {
597 ret = -EMSGSIZE;
598 goto out_ret;
599 }
600
601 if (vec->len != args->nr_local) {
602 ret = -EINVAL;
603 goto out_ret;
604 }
605
606 iovs = vec->iov;
607
608 nr_pages = rds_rdma_pages(iovs, args->nr_local);
609 if (nr_pages < 0) {
610 ret = -EINVAL;
611 goto out_ret;
612 }
613
614 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
615 if (!pages) {
616 ret = -ENOMEM;
617 goto out_ret;
618 }
619
620 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
621 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
622 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
623 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
624 op->op_active = 1;
625 op->op_recverr = rs->rs_recverr;
626 WARN_ON(!nr_pages);
627 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
628 if (IS_ERR(op->op_sg)) {
629 ret = PTR_ERR(op->op_sg);
630 goto out_pages;
631 }
632
633 if (op->op_notify || op->op_recverr) {
634
635
636
637
638
639 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
640 if (!op->op_notifier) {
641 ret = -ENOMEM;
642 goto out_pages;
643 }
644 op->op_notifier->n_user_token = args->user_token;
645 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
646 }
647
648
649
650
651
652
653
654
655 op->op_rkey = rds_rdma_cookie_key(args->cookie);
656 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
657
658 nr_bytes = 0;
659
660 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
661 (unsigned long long)args->nr_local,
662 (unsigned long long)args->remote_vec.addr,
663 op->op_rkey);
664
665 for (i = 0; i < args->nr_local; i++) {
666 struct rds_iovec *iov = &iovs[i];
667
668 unsigned int nr = rds_pages_in_vec(iov);
669
670 rs->rs_user_addr = iov->addr;
671 rs->rs_user_bytes = iov->bytes;
672
673
674
675
676 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
677 if (ret < 0)
678 goto out_pages;
679 else
680 ret = 0;
681
682 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
683 nr_bytes, nr, iov->bytes, iov->addr);
684
685 nr_bytes += iov->bytes;
686
687 for (j = 0; j < nr; j++) {
688 unsigned int offset = iov->addr & ~PAGE_MASK;
689 struct scatterlist *sg;
690
691 sg = &op->op_sg[op->op_nents + j];
692 sg_set_page(sg, pages[j],
693 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
694 offset);
695
696 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
697 sg->offset, sg->length, iov->addr, iov->bytes);
698
699 iov->addr += sg->length;
700 iov->bytes -= sg->length;
701 }
702
703 op->op_nents += nr;
704 }
705
706 if (nr_bytes > args->remote_vec.bytes) {
707 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
708 nr_bytes,
709 (unsigned int) args->remote_vec.bytes);
710 ret = -EINVAL;
711 goto out_pages;
712 }
713 op->op_bytes = nr_bytes;
714
715 out_pages:
716 kfree(pages);
717 out_ret:
718 if (ret)
719 rds_rdma_free_op(op);
720 else
721 rds_stats_inc(s_send_rdma);
722
723 return ret;
724 }
725
726
727
728
729
730 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
731 struct cmsghdr *cmsg)
732 {
733 unsigned long flags;
734 struct rds_mr *mr;
735 u32 r_key;
736 int err = 0;
737
738 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
739 rm->m_rdma_cookie != 0)
740 return -EINVAL;
741
742 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
743
744
745
746
747
748
749 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
750
751 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
752 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
753 if (!mr)
754 err = -EINVAL;
755 else
756 refcount_inc(&mr->r_refcount);
757 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
758
759 if (mr) {
760 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
761 rm->rdma.op_rdma_mr = mr;
762 }
763 return err;
764 }
765
766
767
768
769
770
771
772 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
773 struct cmsghdr *cmsg)
774 {
775 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
776 rm->m_rdma_cookie != 0)
777 return -EINVAL;
778
779 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
780 &rm->rdma.op_rdma_mr, rm->m_conn_path);
781 }
782
783
784
785
786 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
787 struct cmsghdr *cmsg)
788 {
789 struct page *page = NULL;
790 struct rds_atomic_args *args;
791 int ret = 0;
792
793 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
794 || rm->atomic.op_active)
795 return -EINVAL;
796
797 args = CMSG_DATA(cmsg);
798
799
800 switch (cmsg->cmsg_type) {
801 case RDS_CMSG_ATOMIC_FADD:
802 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
803 rm->atomic.op_m_fadd.add = args->fadd.add;
804 rm->atomic.op_m_fadd.nocarry_mask = 0;
805 break;
806 case RDS_CMSG_MASKED_ATOMIC_FADD:
807 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
808 rm->atomic.op_m_fadd.add = args->m_fadd.add;
809 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
810 break;
811 case RDS_CMSG_ATOMIC_CSWP:
812 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
813 rm->atomic.op_m_cswp.compare = args->cswp.compare;
814 rm->atomic.op_m_cswp.swap = args->cswp.swap;
815 rm->atomic.op_m_cswp.compare_mask = ~0;
816 rm->atomic.op_m_cswp.swap_mask = ~0;
817 break;
818 case RDS_CMSG_MASKED_ATOMIC_CSWP:
819 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
820 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
821 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
822 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
823 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
824 break;
825 default:
826 BUG();
827 }
828
829 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
830 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
831 rm->atomic.op_active = 1;
832 rm->atomic.op_recverr = rs->rs_recverr;
833 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
834 if (IS_ERR(rm->atomic.op_sg)) {
835 ret = PTR_ERR(rm->atomic.op_sg);
836 goto err;
837 }
838
839
840 if (args->local_addr & 0x7) {
841 ret = -EFAULT;
842 goto err;
843 }
844
845 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
846 if (ret != 1)
847 goto err;
848 ret = 0;
849
850 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
851
852 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
853
854
855
856
857
858 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
859 if (!rm->atomic.op_notifier) {
860 ret = -ENOMEM;
861 goto err;
862 }
863
864 rm->atomic.op_notifier->n_user_token = args->user_token;
865 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
866 }
867
868 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
869 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
870
871 return ret;
872 err:
873 if (page)
874 put_page(page);
875 rm->atomic.op_active = 0;
876 kfree(rm->atomic.op_notifier);
877
878 return ret;
879 }