This source file includes following definitions.
- rvt_get_swqe_ah
- rvt_get_swqe_ah_attr
- rvt_get_swqe_remote_qpn
- rvt_get_swqe_remote_qkey
- rvt_get_swqe_pkey_index
- ibsrq_to_rvtsrq
- ibqp_to_rvtqp
- rvt_get_swqe_ptr
- rvt_get_rwqe_ptr
- rvt_is_user_qp
- rvt_get_qp
- rvt_put_qp
- rvt_put_swqe
- rvt_qp_wqe_reserve
- rvt_qp_wqe_unreserve
- rvt_cmp_msn
- rvt_div_round_up_mtu
- rvt_div_mtu
- rvt_timeout_to_jiffies
- rvt_lookup_qpn
- rvt_mod_retry_timer_ext
- rvt_mod_retry_timer
- rvt_put_qp_swqe
- rvt_qp_swqe_incr
- rvt_recv_cq
- rvt_send_cq
- rvt_qp_complete_swqe
- rvt_add_retry_timer
- ib_cq_tail
- ib_cq_head
- rvt_free_rq
- rvt_to_iport
- rvt_rc_credit_avail
1 #ifndef DEF_RDMAVT_INCQP_H
2 #define DEF_RDMAVT_INCQP_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51 #include <rdma/rdma_vt.h>
52 #include <rdma/ib_pack.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/rdmavt_cq.h>
55 #include <rdma/rvt-abi.h>
56
57
58
59 #define RVT_R_WRID_VALID 0
60 #define RVT_R_REWIND_SGE 1
61
62
63
64
65 #define RVT_R_REUSE_SGE 0x01
66 #define RVT_R_RDMAR_SEQ 0x02
67 #define RVT_R_RSP_NAK 0x04
68 #define RVT_R_RSP_SEND 0x08
69 #define RVT_R_COMM_EST 0x10
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96 #define RVT_S_SIGNAL_REQ_WR 0x0001
97 #define RVT_S_BUSY 0x0002
98 #define RVT_S_TIMER 0x0004
99 #define RVT_S_RESP_PENDING 0x0008
100 #define RVT_S_ACK_PENDING 0x0010
101 #define RVT_S_WAIT_FENCE 0x0020
102 #define RVT_S_WAIT_RDMAR 0x0040
103 #define RVT_S_WAIT_RNR 0x0080
104 #define RVT_S_WAIT_SSN_CREDIT 0x0100
105 #define RVT_S_WAIT_DMA 0x0200
106 #define RVT_S_WAIT_PIO 0x0400
107 #define RVT_S_WAIT_TX 0x0800
108 #define RVT_S_WAIT_DMA_DESC 0x1000
109 #define RVT_S_WAIT_KMEM 0x2000
110 #define RVT_S_WAIT_PSN 0x4000
111 #define RVT_S_WAIT_ACK 0x8000
112 #define RVT_S_SEND_ONE 0x10000
113 #define RVT_S_UNLIMITED_CREDIT 0x20000
114 #define RVT_S_ECN 0x40000
115 #define RVT_S_MAX_BIT_MASK 0x800000
116
117
118
119
120
121
122
123
124
125 #define RVT_S_ANY_WAIT_IO \
126 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
127 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
128
129
130
131
132 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
133 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
134 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
135
136 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
137
138
139 #define RVT_OPCODE_QP_MASK 0xE0
140
141
142 #define RVT_POST_SEND_OK 0x01
143 #define RVT_POST_RECV_OK 0x02
144 #define RVT_PROCESS_RECV_OK 0x04
145 #define RVT_PROCESS_SEND_OK 0x08
146 #define RVT_PROCESS_NEXT_SEND_OK 0x10
147 #define RVT_FLUSH_SEND 0x20
148 #define RVT_FLUSH_RECV 0x40
149 #define RVT_PROCESS_OR_FLUSH_SEND \
150 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
151 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
152 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
153
154
155
156
157 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
158 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
159
160
161
162
163
164
165
166
167
168
169
170
171 struct rvt_ud_wr {
172 struct ib_ud_wr wr;
173 struct rdma_ah_attr *attr;
174 };
175
176
177
178
179
180
181 struct rvt_swqe {
182 union {
183 struct ib_send_wr wr;
184 struct rvt_ud_wr ud_wr;
185 struct ib_reg_wr reg_wr;
186 struct ib_rdma_wr rdma_wr;
187 struct ib_atomic_wr atomic_wr;
188 };
189 u32 psn;
190 u32 lpsn;
191 u32 ssn;
192 u32 length;
193 void *priv;
194 struct rvt_sge sg_list[0];
195 };
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210 struct rvt_krwq {
211 spinlock_t p_lock;
212 u32 head;
213
214
215 spinlock_t c_lock ____cacheline_aligned_in_smp;
216 u32 tail;
217 u32 count;
218 struct rvt_rwqe *curr_wq;
219 struct rvt_rwqe wq[];
220 };
221
222
223
224
225
226
227 static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
228 {
229 return ibah_to_rvtah(swqe->ud_wr.wr.ah);
230 }
231
232
233
234
235
236
237 static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
238 {
239 return swqe->ud_wr.attr;
240 }
241
242
243
244
245
246
247 static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
248 {
249 return swqe->ud_wr.wr.remote_qpn;
250 }
251
252
253
254
255
256
257 static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
258 {
259 return swqe->ud_wr.wr.remote_qkey;
260 }
261
262
263
264
265
266
267 static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
268 {
269 return swqe->ud_wr.wr.pkey_index;
270 }
271
272 struct rvt_rq {
273 struct rvt_rwq *wq;
274 struct rvt_krwq *kwq;
275 u32 size;
276 u8 max_sge;
277
278 spinlock_t lock ____cacheline_aligned_in_smp;
279 };
280
281
282
283
284
285 struct rvt_ack_entry {
286 struct rvt_sge rdma_sge;
287 u64 atomic_data;
288 u32 psn;
289 u32 lpsn;
290 u8 opcode;
291 u8 sent;
292 void *priv;
293 };
294
295 #define RC_QP_SCALING_INTERVAL 5
296
297 #define RVT_OPERATION_PRIV 0x00000001
298 #define RVT_OPERATION_ATOMIC 0x00000002
299 #define RVT_OPERATION_ATOMIC_SGE 0x00000004
300 #define RVT_OPERATION_LOCAL 0x00000008
301 #define RVT_OPERATION_USE_RESERVE 0x00000010
302 #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
303
304 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
305
306
307
308
309
310
311
312
313
314
315
316
317
318 struct rvt_operation_params {
319 size_t length;
320 u32 qpt_support;
321 u32 flags;
322 };
323
324
325
326
327
328 struct rvt_qp {
329 struct ib_qp ibqp;
330 void *priv;
331
332 struct rdma_ah_attr remote_ah_attr;
333 struct rdma_ah_attr alt_ah_attr;
334 struct rvt_qp __rcu *next;
335 struct rvt_swqe *s_wq;
336 struct rvt_mmap_info *ip;
337
338 unsigned long timeout_jiffies;
339
340 int srate_mbps;
341 pid_t pid;
342 u32 remote_qpn;
343 u32 qkey;
344 u32 s_size;
345
346 u16 pmtu;
347 u8 log_pmtu;
348 u8 state;
349 u8 allowed_ops;
350 u8 qp_access_flags;
351 u8 alt_timeout;
352 u8 timeout;
353 u8 s_srate;
354 u8 s_mig_state;
355 u8 port_num;
356 u8 s_pkey_index;
357 u8 s_alt_pkey_index;
358 u8 r_max_rd_atomic;
359 u8 s_max_rd_atomic;
360 u8 s_retry_cnt;
361 u8 s_rnr_retry_cnt;
362 u8 r_min_rnr_timer;
363 u8 s_max_sge;
364 u8 s_draining;
365
366
367 atomic_t refcount ____cacheline_aligned_in_smp;
368 wait_queue_head_t wait;
369
370 struct rvt_ack_entry *s_ack_queue;
371 struct rvt_sge_state s_rdma_read_sge;
372
373 spinlock_t r_lock ____cacheline_aligned_in_smp;
374 u32 r_psn;
375 unsigned long r_aflags;
376 u64 r_wr_id;
377 u32 r_ack_psn;
378 u32 r_len;
379 u32 r_rcv_len;
380 u32 r_msn;
381
382 u8 r_state;
383 u8 r_flags;
384 u8 r_head_ack_queue;
385 u8 r_adefered;
386
387 struct list_head rspwait;
388
389 struct rvt_sge_state r_sge;
390 struct rvt_rq r_rq;
391
392
393 spinlock_t s_hlock ____cacheline_aligned_in_smp;
394 u32 s_head;
395 u32 s_next_psn;
396 u32 s_avail;
397 u32 s_ssn;
398 atomic_t s_reserved_used;
399
400 spinlock_t s_lock ____cacheline_aligned_in_smp;
401 u32 s_flags;
402 struct rvt_sge_state *s_cur_sge;
403 struct rvt_swqe *s_wqe;
404 struct rvt_sge_state s_sge;
405 struct rvt_mregion *s_rdma_mr;
406 u32 s_len;
407 u32 s_rdma_read_len;
408 u32 s_last_psn;
409 u32 s_sending_psn;
410 u32 s_sending_hpsn;
411 u32 s_psn;
412 u32 s_ack_rdma_psn;
413 u32 s_ack_psn;
414 u32 s_tail;
415 u32 s_cur;
416 u32 s_acked;
417 u32 s_last;
418 u32 s_lsn;
419 u32 s_ahgpsn;
420 u16 s_cur_size;
421 u16 s_rdma_ack_cnt;
422 u8 s_hdrwords;
423 s8 s_ahgidx;
424 u8 s_state;
425 u8 s_ack_state;
426 u8 s_nak_state;
427 u8 r_nak_state;
428 u8 s_retry;
429 u8 s_rnr_retry;
430 u8 s_num_rd_atomic;
431 u8 s_tail_ack_queue;
432 u8 s_acked_ack_queue;
433
434 struct rvt_sge_state s_ack_rdma_sge;
435 struct timer_list s_timer;
436 struct hrtimer s_rnr_timer;
437
438 atomic_t local_ops_pending;
439
440
441
442
443 struct rvt_sge r_sg_list[0]
444 ____cacheline_aligned_in_smp;
445 };
446
447 struct rvt_srq {
448 struct ib_srq ibsrq;
449 struct rvt_rq rq;
450 struct rvt_mmap_info *ip;
451
452 u32 limit;
453 };
454
455 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
456 {
457 return container_of(ibsrq, struct rvt_srq, ibsrq);
458 }
459
460 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
461 {
462 return container_of(ibqp, struct rvt_qp, ibqp);
463 }
464
465 #define RVT_QPN_MAX BIT(24)
466 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
467 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
468 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
469 #define RVT_QPN_MASK IB_QPN_MASK
470
471
472
473
474
475
476 struct rvt_qpn_map {
477 void *page;
478 };
479
480 struct rvt_qpn_table {
481 spinlock_t lock;
482 unsigned flags;
483 u32 last;
484 u32 nmaps;
485 u16 limit;
486 u8 incr;
487
488 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
489 };
490
491 struct rvt_qp_ibdev {
492 u32 qp_table_size;
493 u32 qp_table_bits;
494 struct rvt_qp __rcu **qp_table;
495 spinlock_t qpt_lock;
496 struct rvt_qpn_table qpn_table;
497 };
498
499
500
501
502
503
504 struct rvt_mcast_qp {
505 struct list_head list;
506 struct rvt_qp *qp;
507 };
508
509 struct rvt_mcast_addr {
510 union ib_gid mgid;
511 u16 lid;
512 };
513
514 struct rvt_mcast {
515 struct rb_node rb_node;
516 struct rvt_mcast_addr mcast_addr;
517 struct list_head qp_list;
518 wait_queue_head_t wait;
519 atomic_t refcount;
520 int n_attached;
521 };
522
523
524
525
526
527 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
528 unsigned n)
529 {
530 return (struct rvt_swqe *)((char *)qp->s_wq +
531 (sizeof(struct rvt_swqe) +
532 qp->s_max_sge *
533 sizeof(struct rvt_sge)) * n);
534 }
535
536
537
538
539
540 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
541 {
542 return (struct rvt_rwqe *)
543 ((char *)rq->kwq->curr_wq +
544 (sizeof(struct rvt_rwqe) +
545 rq->max_sge * sizeof(struct ib_sge)) * n);
546 }
547
548
549
550
551
552 static inline bool rvt_is_user_qp(struct rvt_qp *qp)
553 {
554 return !!qp->pid;
555 }
556
557
558
559
560
561 static inline void rvt_get_qp(struct rvt_qp *qp)
562 {
563 atomic_inc(&qp->refcount);
564 }
565
566
567
568
569
570 static inline void rvt_put_qp(struct rvt_qp *qp)
571 {
572 if (qp && atomic_dec_and_test(&qp->refcount))
573 wake_up(&qp->wait);
574 }
575
576
577
578
579
580
581
582 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
583 {
584 int i;
585
586 for (i = 0; i < wqe->wr.num_sge; i++) {
587 struct rvt_sge *sge = &wqe->sg_list[i];
588
589 rvt_put_mr(sge->mr);
590 }
591 }
592
593
594
595
596
597
598
599
600
601 static inline void rvt_qp_wqe_reserve(
602 struct rvt_qp *qp,
603 struct rvt_swqe *wqe)
604 {
605 atomic_inc(&qp->s_reserved_used);
606 }
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
624 {
625 if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
626 atomic_dec(&qp->s_reserved_used);
627
628 smp_mb__after_atomic();
629 }
630 }
631
632 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
633
634
635
636
637
638 static inline int rvt_cmp_msn(u32 a, u32 b)
639 {
640 return (((int)a) - ((int)b)) << 8;
641 }
642
643
644
645
646
647
648
649 __be32 rvt_compute_aeth(struct rvt_qp *qp);
650
651
652
653
654
655
656
657
658 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
659
660
661
662
663
664
665
666
667
668 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
669
670
671
672
673
674
675
676 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
677 {
678 return (len + qp->pmtu - 1) >> qp->log_pmtu;
679 }
680
681
682
683
684
685
686
687 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
688 {
689 return len >> qp->log_pmtu;
690 }
691
692
693
694
695
696
697
698 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
699 {
700 if (timeout > 31)
701 timeout = 31;
702
703 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
704 }
705
706
707
708
709
710
711
712
713
714 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
715 struct rvt_ibport *rvp,
716 u32 qpn) __must_hold(RCU)
717 {
718 struct rvt_qp *qp = NULL;
719
720 if (unlikely(qpn <= 1)) {
721 qp = rcu_dereference(rvp->qp[qpn]);
722 } else {
723 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
724
725 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
726 qp = rcu_dereference(qp->next))
727 if (qp->ibqp.qp_num == qpn)
728 break;
729 }
730 return qp;
731 }
732
733
734
735
736
737
738
739 static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
740 {
741 struct ib_qp *ibqp = &qp->ibqp;
742 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
743
744 lockdep_assert_held(&qp->s_lock);
745 qp->s_flags |= RVT_S_TIMER;
746
747 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
748 (qp->timeout_jiffies << shift));
749 }
750
751 static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
752 {
753 return rvt_mod_retry_timer_ext(qp, 0);
754 }
755
756
757
758
759
760
761
762
763 static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
764 {
765 rvt_put_swqe(wqe);
766 if (qp->allowed_ops == IB_OPCODE_UD)
767 rdma_destroy_ah_attr(wqe->ud_wr.attr);
768 }
769
770
771
772
773
774
775
776
777 static inline u32
778 rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
779 {
780 if (++val >= qp->s_size)
781 val = 0;
782 return val;
783 }
784
785 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
786
787
788
789
790
791
792
793
794
795
796
797
798 static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
799 bool solicited)
800 {
801 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
802
803 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
804 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
805 }
806
807
808
809
810
811
812
813
814
815
816
817
818 static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
819 bool solicited)
820 {
821 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
822
823 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
824 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
825 }
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843 static inline u32
844 rvt_qp_complete_swqe(struct rvt_qp *qp,
845 struct rvt_swqe *wqe,
846 enum ib_wc_opcode opcode,
847 enum ib_wc_status status)
848 {
849 bool need_completion;
850 u64 wr_id;
851 u32 byte_len, last;
852 int flags = wqe->wr.send_flags;
853
854 rvt_qp_wqe_unreserve(qp, flags);
855 rvt_put_qp_swqe(qp, wqe);
856
857 need_completion =
858 !(flags & RVT_SEND_RESERVE_USED) &&
859 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
860 (flags & IB_SEND_SIGNALED) ||
861 status != IB_WC_SUCCESS);
862 if (need_completion) {
863 wr_id = wqe->wr.wr_id;
864 byte_len = wqe->length;
865
866 }
867 last = rvt_qp_swqe_incr(qp, qp->s_last);
868
869 smp_store_release(&qp->s_last, last);
870 if (need_completion) {
871 struct ib_wc w = {
872 .wr_id = wr_id,
873 .status = status,
874 .opcode = opcode,
875 .qp = &qp->ibqp,
876 .byte_len = byte_len,
877 };
878 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
879 }
880 return last;
881 }
882
883 extern const int ib_rvt_state_ops[];
884
885 struct rvt_dev_info;
886 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
887 void rvt_comm_est(struct rvt_qp *qp);
888 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
889 unsigned long rvt_rnr_tbl_to_usec(u32 index);
890 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
891 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
892 void rvt_del_timers_sync(struct rvt_qp *qp);
893 void rvt_stop_rc_timers(struct rvt_qp *qp);
894 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
895 static inline void rvt_add_retry_timer(struct rvt_qp *qp)
896 {
897 rvt_add_retry_timer_ext(qp, 0);
898 }
899
900 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
901 void *data, u32 length,
902 bool release, bool copy_last);
903 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
904 enum ib_wc_status status);
905 void rvt_ruc_loopback(struct rvt_qp *qp);
906
907
908
909
910
911
912
913
914
915 struct rvt_qp_iter {
916 struct rvt_qp *qp;
917
918 struct rvt_dev_info *rdi;
919
920 void (*cb)(struct rvt_qp *qp, u64 v);
921
922 u64 v;
923
924 int specials;
925
926 int n;
927 };
928
929
930
931
932
933
934
935
936 static inline u32 ib_cq_tail(struct ib_cq *send_cq)
937 {
938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939
940 return ibcq_to_rvtcq(send_cq)->ip ?
941 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
942 ibcq_to_rvtcq(send_cq)->kqueue->tail;
943 }
944
945
946
947
948
949
950
951
952 static inline u32 ib_cq_head(struct ib_cq *send_cq)
953 {
954 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
955
956 return ibcq_to_rvtcq(send_cq)->ip ?
957 RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
958 ibcq_to_rvtcq(send_cq)->kqueue->head;
959 }
960
961
962
963
964
965
966
967
968 static inline void rvt_free_rq(struct rvt_rq *rq)
969 {
970 kvfree(rq->kwq);
971 rq->kwq = NULL;
972 vfree(rq->wq);
973 rq->wq = NULL;
974 }
975
976
977
978
979
980
981
982 static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
983 {
984 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
985
986 return rdi->ports[qp->port_num - 1];
987 }
988
989
990
991
992
993
994
995
996
997 static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
998 {
999 lockdep_assert_held(&qp->s_lock);
1000 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
1001 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
1002 struct rvt_ibport *rvp = rvt_to_iport(qp);
1003
1004 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
1005 rvp->n_rc_crwaits++;
1006 return false;
1007 }
1008 return true;
1009 }
1010
1011 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
1012 u64 v,
1013 void (*cb)(struct rvt_qp *qp, u64 v));
1014 int rvt_qp_iter_next(struct rvt_qp_iter *iter);
1015 void rvt_qp_iter(struct rvt_dev_info *rdi,
1016 u64 v,
1017 void (*cb)(struct rvt_qp *qp, u64 v));
1018 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1019 #endif