/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 44 u32 psn, u32 pmtu) restart_sge() 48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; restart_sge() 149 qp->s_ack_rdma_psn = e->psn; 162 bth2 = e->psn & QIB_PSN_MASK; 323 wqe->psn = qp->s_next_psn; qib_make_rc_req() 343 wqe->lpsn = wqe->psn; qib_make_rc_req() 383 wqe->lpsn = wqe->psn; qib_make_rc_req() 458 wqe->lpsn = wqe->psn; qib_make_rc_req() 600 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; qib_make_rc_req() 618 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; qib_make_rc_req() 779 * @psn: the packet sequence number to restart at 785 static void reset_psn(struct qib_qp *qp, u32 psn) reset_psn() argument 797 if (qib_cmp24(psn, wqe->psn) <= 0) { reset_psn() 812 diff = qib_cmp24(psn, wqe->psn); reset_psn() 855 qp->s_psn = psn; reset_psn() 870 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) qib_restart_rc() argument 892 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; qib_restart_rc() 899 reset_psn(qp, psn); qib_restart_rc() 944 * This would be psn+1 except when RDMA reads are present. 946 static void reset_sending_psn(struct qib_qp *qp, u32 psn) reset_sending_psn() argument 954 if (qib_cmp24(psn, wqe->lpsn) <= 0) { reset_sending_psn() 958 qp->s_sending_psn = psn + 1; reset_sending_psn() 978 u32 psn; qib_rc_send_complete() local 997 psn = be32_to_cpu(ohdr->bth[2]); qib_rc_send_complete() 998 reset_sending_psn(qp, psn); qib_rc_send_complete() 1004 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && qib_rc_send_complete() 1046 static inline void update_last_psn(struct qib_qp *qp, u32 psn) update_last_psn() argument 1048 qp->s_last_psn = psn; update_last_psn() 1106 qp->s_psn = wqe->psn; do_rc_completion() 1121 * @psn: the packet sequence number of the ACK 1129 static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument 1151 ack_psn = psn; do_rc_ack() 1245 if (qib_cmp24(qp->s_psn, psn) <= 0) do_rc_ack() 1246 reset_psn(qp, psn + 1); do_rc_ack() 1247 } else if (qib_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1249 qp->s_psn = psn + 1; do_rc_ack() 1258 update_last_psn(qp, psn); do_rc_ack() 1276 update_last_psn(qp, psn - 1); do_rc_ack() 1278 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; do_rc_ack() 1280 reset_psn(qp, psn); do_rc_ack() 1295 update_last_psn(qp, psn - 1); do_rc_ack() 1306 qib_restart_rc(qp, psn, 0); do_rc_ack() 1352 static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, rdma_seq_err() argument 1365 while (qib_cmp24(psn, wqe->lpsn) > 0) { rdma_seq_err() 1391 * @psn: the packet sequence number for this packet 1404 u32 psn, u32 hdrsize, u32 pmtu, qib_rc_rcv_resp() 1421 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && qib_rc_rcv_resp() 1444 if (qib_cmp24(psn, qp->s_next_psn) >= 0) qib_rc_rcv_resp() 1448 diff = qib_cmp24(psn, qp->s_last_psn); qib_rc_rcv_resp() 1464 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) qib_rc_rcv_resp() 1486 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || qib_rc_rcv_resp() 1499 wqe, psn, pmtu); qib_rc_rcv_resp() 1504 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp() 1533 update_last_psn(qp, psn); qib_rc_rcv_resp() 1540 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) qib_rc_rcv_resp() 1558 wqe, psn, pmtu); qib_rc_rcv_resp() 1563 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp() 1583 (void) do_rc_ack(qp, aeth, psn, qib_rc_rcv_resp() 1593 rdma_seq_err(qp, ibp, psn, rcd); qib_rc_rcv_resp() 1615 * @psn: the packet sequence number for this packet 1628 u32 psn, qib_rc_rcv_error() 1701 if (qib_cmp24(psn, e->psn) >= 0) { qib_rc_rcv_error() 1703 qib_cmp24(psn, e->lpsn) <= 0) qib_rc_rcv_error() 1729 offset = ((psn - e->psn) & QIB_PSN_MASK) * OP() 1752 e->psn = psn; OP() 1777 if (!(psn & IB_BTH_REQ_ACK) || old_req) 1797 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; 1871 u32 psn; qib_rc_rcv() local 1893 psn = be32_to_cpu(ohdr->bth[2]); qib_rc_rcv() 1904 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, qib_rc_rcv() 1910 diff = qib_cmp24(psn, qp->r_psn); qib_rc_rcv() 1912 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) qib_rc_rcv() 2146 e->psn = psn; OP() 2215 e->psn = psn; OP() 2216 e->lpsn = psn; OP() 2236 qp->r_ack_psn = psn; 2239 if (psn & (1 << 31)) 43 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, u32 psn, u32 pmtu) restart_sge() argument 1399 qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, struct qib_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) qib_rc_rcv_resp() argument 1624 qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, struct qib_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) qib_rc_rcv_error() argument
|
H A D | qib_uc.c | 97 wqe->psn = qp->s_next_psn; qib_make_uc_req() 248 u32 psn; qib_uc_rcv() local 268 psn = be32_to_cpu(ohdr->bth[2]); qib_uc_rcv() 272 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { qib_uc_rcv() 277 qp->r_psn = psn; qib_uc_rcv()
|
H A D | qib_driver.c | 315 u32 psn; qib_rcv_hdrerr() local 346 psn = be32_to_cpu(ohdr->bth[2]); qib_rcv_hdrerr() 384 diff = qib_cmp24(psn, qp->r_psn); qib_rcv_hdrerr()
|
H A D | qib_verbs.h | 350 u32 psn; /* first packet sequence number */ member in struct:qib_swqe 412 u32 psn; member in struct:qib_ack_entry
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_rc.c | 43 u32 psn, u32 pmtu) restart_sge() 47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; restart_sge() 139 qp->s_ack_rdma_psn = e->psn; 152 bth2 = e->psn; 299 wqe->psn = qp->s_next_psn; ipath_make_rc_req() 319 wqe->lpsn = wqe->psn; ipath_make_rc_req() 358 wqe->lpsn = wqe->psn; ipath_make_rc_req() 431 wqe->lpsn = wqe->psn; ipath_make_rc_req() 568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; ipath_make_rc_req() 718 * @psn: the packet sequence number to restart at 724 static void reset_psn(struct ipath_qp *qp, u32 psn) reset_psn() argument 736 if (ipath_cmp24(psn, wqe->psn) <= 0) { reset_psn() 751 diff = ipath_cmp24(psn, wqe->psn); reset_psn() 794 qp->s_psn = psn; reset_psn() 800 * @psn: packet sequence number for the request 805 void ipath_restart_rc(struct ipath_qp *qp, u32 psn) ipath_restart_rc() argument 832 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK; ipath_restart_rc() 834 reset_psn(qp, psn); ipath_restart_rc() 841 static inline void update_last_psn(struct ipath_qp *qp, u32 psn) update_last_psn() argument 843 qp->s_last_psn = psn; update_last_psn() 849 * @psn: the packet sequence number of the ACK 857 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument 885 ack_psn = psn; do_rc_ack() 925 update_last_psn(qp, wqe->psn - 1); do_rc_ack() 927 ipath_restart_rc(qp, wqe->psn); do_rc_ack() 976 qp->s_psn = wqe->psn; do_rc_ack() 1003 if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1004 reset_psn(qp, psn + 1); do_rc_ack() 1007 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1009 qp->s_psn = psn + 1; do_rc_ack() 1014 update_last_psn(qp, psn); do_rc_ack() 1030 update_last_psn(qp, psn - 1); do_rc_ack() 1036 (qp->s_psn - psn) & IPATH_PSN_MASK; do_rc_ack() 1038 reset_psn(qp, psn); do_rc_ack() 1051 update_last_psn(qp, psn - 1); do_rc_ack() 1062 ipath_restart_rc(qp, psn); do_rc_ack() 1108 * @psn: the packet sequence number for this packet 1122 u32 psn, u32 hdrsize, u32 pmtu, ipath_rc_rcv_resp() 1140 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) ipath_rc_rcv_resp() 1144 diff = ipath_cmp24(psn, qp->s_last_psn); ipath_rc_rcv_resp() 1185 if (!do_rc_ack(qp, aeth, psn, opcode, val) || ipath_rc_rcv_resp() 1199 wqe, psn, pmtu); ipath_rc_rcv_resp() 1204 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP() 1235 update_last_psn(qp, psn); 1245 if (!do_rc_ack(qp, aeth, psn, opcode, 0)) 1263 wqe, psn, pmtu); 1268 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP() 1298 (void) do_rc_ack(qp, aeth, psn, 1325 * @psn: the packet sequence number for this packet 1340 u32 psn, ipath_rc_rcv_error() 1378 psn &= IPATH_PSN_MASK; ipath_rc_rcv_error() 1403 if (ipath_cmp24(psn, e->psn) >= 0) { ipath_rc_rcv_error() 1436 offset = ((psn - e->psn) & IPATH_PSN_MASK) * OP() 1459 e->psn = psn; OP() 1502 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; 1576 u32 psn; ipath_rc_rcv() local 1593 psn = be32_to_cpu(ohdr->bth[2]); ipath_rc_rcv() 1605 psn = be32_to_cpu(((__be32 *) data)[0]); ipath_rc_rcv() 1608 psn = be32_to_cpu(ohdr->bth[2]); ipath_rc_rcv() 1620 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn, ipath_rc_rcv() 1626 diff = ipath_cmp24(psn, qp->r_psn); ipath_rc_rcv() 1629 psn, diff, header_in_data)) ipath_rc_rcv() 1848 e->psn = psn; OP() 1916 e->psn = psn & IPATH_PSN_MASK; OP() 1935 qp->r_ack_psn = psn; 1938 if (psn & (1 << 31)) 42 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, u32 psn, u32 pmtu) restart_sge() argument 1117 ipath_rc_rcv_resp(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, u32 tlen, struct ipath_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, int header_in_data) ipath_rc_rcv_resp() argument 1335 ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, struct ipath_qp *qp, u32 opcode, u32 psn, int diff, int header_in_data) ipath_rc_rcv_error() argument
|
H A D | ipath_uc.c | 97 qp->s_psn = wqe->psn = qp->s_next_psn; ipath_make_uc_req() 246 u32 psn; ipath_uc_rcv() local 261 psn = be32_to_cpu(ohdr->bth[2]); ipath_uc_rcv() 274 psn = be32_to_cpu(((__be32 *) data)[0]); ipath_uc_rcv() 277 psn = be32_to_cpu(ohdr->bth[2]); ipath_uc_rcv() 288 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { ipath_uc_rcv() 293 qp->r_psn = psn; ipath_uc_rcv()
|
H A D | ipath_verbs.h | 287 u32 psn; /* first packet sequence number */ member in struct:ipath_swqe 348 u32 psn; member in struct:ipath_ack_entry 779 void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | rc.c | 64 u32 psn, u32 pmtu) restart_sge() 68 len = delta_psn(psn, wqe->psn) * pmtu; restart_sge() 170 qp->s_ack_rdma_psn = e->psn; 183 bth2 = mask_psn(e->psn); 353 wqe->psn = qp->s_next_psn; hfi1_make_rc_req() 373 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 412 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 486 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; hfi1_make_rc_req() 648 delta = delta_psn(bth2, wqe->psn); hfi1_make_rc_req() 790 * @psn: the packet sequence number to restart at 796 static void reset_psn(struct hfi1_qp *qp, u32 psn) reset_psn() argument 808 if (cmp_psn(psn, wqe->psn) <= 0) { reset_psn() 823 diff = cmp_psn(psn, wqe->psn); reset_psn() 866 qp->s_psn = psn; reset_psn() 882 static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait) restart_rc() argument 904 ibp->n_rc_resends += delta_psn(qp->s_psn, psn); restart_rc() 911 reset_psn(qp, psn); restart_rc() 957 * This would be psn+1 except when RDMA reads are present. 959 static void reset_sending_psn(struct hfi1_qp *qp, u32 psn) reset_sending_psn() argument 967 if (cmp_psn(psn, wqe->lpsn) <= 0) { reset_sending_psn() 971 qp->s_sending_psn = psn + 1; reset_sending_psn() 991 u32 psn; hfi1_rc_send_complete() local 1010 psn = be32_to_cpu(ohdr->bth[2]); hfi1_rc_send_complete() 1011 reset_sending_psn(qp, psn); hfi1_rc_send_complete() 1017 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && hfi1_rc_send_complete() 1051 trace_hfi1_rc_sendcomplete(qp, psn); hfi1_rc_send_complete() 1061 static inline void update_last_psn(struct hfi1_qp *qp, u32 psn) update_last_psn() argument 1063 qp->s_last_psn = psn; update_last_psn() 1137 qp->s_psn = wqe->psn; do_rc_completion() 1152 * @psn: the packet sequence number of the ACK 1160 static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument 1182 ack_psn = psn; do_rc_ack() 1276 if (cmp_psn(qp->s_psn, psn) <= 0) do_rc_ack() 1277 reset_psn(qp, psn + 1); do_rc_ack() 1278 } else if (cmp_psn(qp->s_psn, psn) <= 0) { do_rc_ack() 1280 qp->s_psn = psn + 1; do_rc_ack() 1289 update_last_psn(qp, psn); do_rc_ack() 1307 update_last_psn(qp, psn - 1); do_rc_ack() 1309 ibp->n_rc_resends += delta_psn(qp->s_psn, psn); do_rc_ack() 1311 reset_psn(qp, psn); do_rc_ack() 1326 update_last_psn(qp, psn - 1); do_rc_ack() 1337 restart_rc(qp, psn, 0); do_rc_ack() 1383 static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn, rdma_seq_err() argument 1396 while (cmp_psn(psn, wqe->lpsn) > 0) { rdma_seq_err() 1422 * @psn: the packet sequence number for this packet 1433 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, rc_rcv_resp() 1446 trace_hfi1_rc_ack(qp, psn); rc_rcv_resp() 1449 if (cmp_psn(psn, qp->s_next_psn) >= 0) rc_rcv_resp() 1453 diff = cmp_psn(psn, qp->s_last_psn); rc_rcv_resp() 1469 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) rc_rcv_resp() 1491 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || rc_rcv_resp() 1503 wqe, psn, pmtu); rc_rcv_resp() 1508 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) rc_rcv_resp() 1537 update_last_psn(qp, psn); rc_rcv_resp() 1544 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) rc_rcv_resp() 1561 wqe, psn, pmtu); rc_rcv_resp() 1566 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) rc_rcv_resp() 1585 (void) do_rc_ack(qp, aeth, psn, rc_rcv_resp() 1595 rdma_seq_err(qp, ibp, psn, rcd); rc_rcv_resp() 1617 * @psn: the packet sequence number for this packet 1627 struct hfi1_qp *qp, u32 opcode, u32 psn, int diff, rc_rcv_error() 1636 trace_hfi1_rc_rcv_error(qp, psn); rc_rcv_error() 1700 if (cmp_psn(psn, e->psn) >= 0) { rc_rcv_error() 1702 cmp_psn(psn, e->lpsn) <= 0) rc_rcv_error() 1728 offset = delta_psn(psn, e->psn) * qp->pmtu; OP() 1750 e->psn = psn; OP() 1775 if (!(psn & IB_BTH_REQ_ACK) || old_req) 1949 u32 psn; hfi1_rc_rcv() local 1980 psn = be32_to_cpu(ohdr->bth[2]); hfi1_rc_rcv() 1991 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hfi1_rc_rcv() 1999 diff = delta_psn(psn, qp->r_psn); hfi1_rc_rcv() 2001 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) hfi1_rc_rcv() 2233 e->psn = psn; OP() 2305 e->psn = psn; OP() 2306 e->lpsn = psn; OP() 2329 qp->r_ack_psn = psn; 2332 if (psn & (1 << 31)) 2394 u32 psn; hfi1_rc_hdrerr() local 2405 psn = be32_to_cpu(ohdr->bth[2]); hfi1_rc_hdrerr() 2410 diff = delta_psn(psn, qp->r_psn); hfi1_rc_hdrerr() 63 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe, u32 psn, u32 pmtu) restart_sge() argument 1430 rc_rcv_resp(struct hfi1_ibport *ibp, struct hfi1_other_headers *ohdr, void *data, u32 tlen, struct hfi1_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct hfi1_ctxtdata *rcd) rc_rcv_resp() argument 1626 rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, struct hfi1_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) rc_rcv_error() argument
|
H A D | trace.h | 426 "f %d b %d qpn 0x%.6x a %d psn 0x%.8x" 454 __field(u32, psn) 506 __entry->psn = 535 __entry->psn, 1256 TP_PROTO(struct hfi1_qp *qp, u32 psn), 1257 TP_ARGS(qp, psn), 1262 __field(u32, psn) 1273 __entry->psn = psn; 1281 "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x", 1285 __entry->psn, 1295 TP_PROTO(struct hfi1_qp *qp, u32 psn), 1296 TP_ARGS(qp, psn) 1300 TP_PROTO(struct hfi1_qp *qp, u32 psn), 1301 TP_ARGS(qp, psn) 1305 TP_PROTO(struct hfi1_qp *qp, u32 psn), 1306 TP_ARGS(qp, psn) 1310 TP_PROTO(struct hfi1_qp *qp, u32 psn), 1311 TP_ARGS(qp, psn)
|
H A D | uc.c | 115 wqe->psn = qp->s_next_psn; hfi1_make_uc_req() 273 u32 psn; hfi1_uc_rcv() local 318 psn = be32_to_cpu(ohdr->bth[2]); hfi1_uc_rcv() 322 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) { hfi1_uc_rcv() 327 qp->r_psn = psn; hfi1_uc_rcv()
|
H A D | user_sdma.c | 1159 psn = val & mask; set_pkt_bth_psn() local 1161 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK); set_pkt_bth_psn() 1163 psn = psn + frags; set_pkt_bth_psn() 1164 return psn & mask; set_pkt_bth_psn()
|
H A D | verbs.h | 357 u32 psn; /* first packet sequence number */ member in struct:hfi1_swqe 419 u32 psn; member in struct:hfi1_ack_entry 454 u32 s_ahgpsn; /* set to the psn in the copy of the header */
|
/linux-4.4.14/include/uapi/rdma/ |
H A D | ib_user_cm.h | 135 __u32 psn; member in struct:ib_ucm_req 159 __u32 psn; member in struct:ib_ucm_rep
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_cm.c | 274 unsigned psn) ipoib_cm_modify_rx_qp() 297 qp_attr.rq_psn = psn; ipoib_cm_modify_rx_qp() 421 unsigned psn) ipoib_cm_send_rep() 436 rep.starting_psn = psn; ipoib_cm_send_rep() 445 unsigned psn; ipoib_cm_req_handler() local 465 psn = prandom_u32() & 0xffffff; ipoib_cm_req_handler() 466 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); ipoib_cm_req_handler() 486 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); ipoib_cm_req_handler() 272 ipoib_cm_modify_rx_qp(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, unsigned psn) ipoib_cm_modify_rx_qp() argument 419 ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, struct ib_cm_req_event_param *req, unsigned psn) ipoib_cm_send_rep() argument
|
/linux-4.4.14/drivers/infiniband/core/ |
H A D | ud_header.c | 195 { STRUCT_FIELD(bth, psn),
|
H A D | ucm.c | 763 param.starting_psn = cmd.psn; ib_ucm_send_req() 808 param.starting_psn = cmd.psn; ib_ucm_send_rep()
|
/linux-4.4.14/include/rdma/ |
H A D | ib_pack.h | 210 __be32 psn; member in struct:ib_unpacked_bth
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 2198 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); build_sriov_qp0_header() 2425 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); build_mlx_header()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 1528 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); build_mlx_header()
|