/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 57 static void start_timer(struct qib_qp *qp) in start_timer() argument 59 qp->s_flags |= QIB_S_TIMER; in start_timer() 60 qp->s_timer.function = rc_timeout; in start_timer() 62 qp->s_timer.expires = jiffies + qp->timeout_jiffies; in start_timer() 63 add_timer(&qp->s_timer); in start_timer() 77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, in qib_make_rc_ack() argument 87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) in qib_make_rc_ack() 93 switch (qp->s_ack_state) { in qib_make_rc_ack() 96 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack() [all …]
|
D | qib_qp.c | 223 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) in insert_qp() argument 225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp() 227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in insert_qp() 229 atomic_inc(&qp->refcount); in insert_qp() 232 if (qp->ibqp.qp_num == 0) in insert_qp() 233 rcu_assign_pointer(ibp->qp0, qp); in insert_qp() 234 else if (qp->ibqp.qp_num == 1) in insert_qp() 235 rcu_assign_pointer(ibp->qp1, qp); in insert_qp() 237 qp->next = dev->qp_table[n]; in insert_qp() 238 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp() [all …]
|
D | qib_uc.c | 46 int qib_make_uc_req(struct qib_qp *qp) in qib_make_uc_req() argument 54 u32 pmtu = qp->pmtu; in qib_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); in qib_make_uc_req() 59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { in qib_make_uc_req() 60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) in qib_make_uc_req() 63 if (qp->s_last == qp->s_head) in qib_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { in qib_make_uc_req() 67 qp->s_flags |= QIB_S_WAIT_DMA; in qib_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() [all …]
|
D | qib_ruc.c | 82 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) in qib_init_sge() argument 90 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_init_sge() 91 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge() 92 ss = &qp->r_sge; in qib_init_sge() 93 ss->sg_list = qp->r_sg_list; in qib_init_sge() 94 qp->r_len = 0; in qib_init_sge() 102 qp->r_len += wqe->sg_list[i].length; in qib_init_sge() 106 ss->total_len = qp->r_len; in qib_init_sge() 121 wc.qp = &qp->ibqp; in qib_init_sge() 123 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge() [all …]
|
D | qib_ud.c | 53 struct qib_qp *qp; in qib_ud_loopback() local 62 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); in qib_ud_loopback() 63 if (!qp) { in qib_ud_loopback() 70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 71 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback() 74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { in qib_ud_loopback() 82 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback() 88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback() 94 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 106 if (qp->ibqp.qp_num) { in qib_ud_loopback() [all …]
|
D | qib_verbs.c | 337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, in qib_post_one_send() argument 350 spin_lock_irqsave(&qp->s_lock, flags); in qib_post_one_send() 353 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) in qib_post_one_send() 357 if (wr->num_sge > qp->s_max_sge) in qib_post_one_send() 366 if (qib_reg_mr(qp, reg_wr(wr))) in qib_post_one_send() 368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { in qib_post_one_send() 371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in qib_post_one_send() 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in qib_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in qib_post_one_send() 389 next = qp->s_head + 1; in qib_post_one_send() [all …]
|
D | qib_verbs_mcast.c | 42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) in qib_mcast_qp_alloc() argument 50 mqp->qp = qp; in qib_mcast_qp_alloc() 51 atomic_inc(&qp->refcount); in qib_mcast_qp_alloc() 59 struct qib_qp *qp = mqp->qp; in qib_mcast_qp_free() local 62 if (atomic_dec_and_test(&qp->refcount)) in qib_mcast_qp_free() 63 wake_up(&qp->wait); in qib_mcast_qp_free() 182 if (p->qp == mqp->qp) { in qib_mcast_add() 227 struct qib_qp *qp = to_iqp(ibqp); in qib_multicast_attach() local 234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { in qib_multicast_attach() 248 mqp = qib_mcast_qp_alloc(qp); in qib_multicast_attach() [all …]
|
D | qib_driver.c | 309 struct qib_qp *qp = NULL; in qib_rcv_hdrerr() local 353 qp = qib_lookup_qpn(ibp, qp_num); in qib_rcv_hdrerr() 354 if (!qp) in qib_rcv_hdrerr() 361 spin_lock(&qp->r_lock); in qib_rcv_hdrerr() 364 if (!(ib_qib_state_ops[qp->state] & in qib_rcv_hdrerr() 370 switch (qp->ibqp.qp_type) { in qib_rcv_hdrerr() 376 qp, in qib_rcv_hdrerr() 384 diff = qib_cmp24(psn, qp->r_psn); in qib_rcv_hdrerr() 385 if (!qp->r_nak_state && diff >= 0) { in qib_rcv_hdrerr() 387 qp->r_nak_state = in qib_rcv_hdrerr() [all …]
|
D | qib_verbs.h | 212 struct qib_qp *qp; member 617 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, in get_swqe_ptr() argument 620 return (struct qib_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 622 qp->s_max_sge * in get_swqe_ptr() 855 static inline int qib_send_ok(struct qib_qp *qp) in qib_send_ok() argument 857 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && in qib_send_ok() 858 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || in qib_send_ok() 859 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); in qib_send_ok() 865 void qib_schedule_send(struct qib_qp *qp); 916 __be32 qib_compute_aeth(struct qib_qp *qp); [all …]
|
D | qib_sdma.c | 516 atomic_inc(&tx->qp->s_dma_busy); in complete_sdma_err_req() 539 struct qib_qp *qp; in qib_sdma_verbs_send() local 648 atomic_inc(&tx->qp->s_dma_busy); in qib_sdma_verbs_send() 665 qp = tx->qp; in qib_sdma_verbs_send() 667 spin_lock(&qp->r_lock); in qib_sdma_verbs_send() 668 spin_lock(&qp->s_lock); in qib_sdma_verbs_send() 669 if (qp->ibqp.qp_type == IB_QPT_RC) { in qib_sdma_verbs_send() 671 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) in qib_sdma_verbs_send() 672 qib_error_qp(qp, IB_WC_GENERAL_ERR); in qib_sdma_verbs_send() 673 } else if (qp->s_wqe) in qib_sdma_verbs_send() [all …]
|
D | qib_keys.c | 252 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, in qib_rkey_ok() argument 255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_rkey_ok() 266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_rkey_ok() 289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in qib_rkey_ok() 341 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr) in qib_reg_mr() argument 343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_reg_mr() 344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_reg_mr() 361 if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd)) in qib_reg_mr()
|
D | qib_mad.h | 175 __be32 qp; member
|
D | qib_cq.c | 91 wc->uqueue[head].qp_num = entry->qp->qp_num; in qib_cq_enter()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | rc.c | 77 static void start_timer(struct hfi1_qp *qp) in start_timer() argument 79 qp->s_flags |= HFI1_S_TIMER; in start_timer() 80 qp->s_timer.function = rc_timeout; in start_timer() 82 qp->s_timer.expires = jiffies + qp->timeout_jiffies; in start_timer() 83 add_timer(&qp->s_timer); in start_timer() 97 static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, in make_rc_ack() argument 108 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) in make_rc_ack() 114 switch (qp->s_ack_state) { in make_rc_ack() 117 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in make_rc_ack() 129 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) in make_rc_ack() [all …]
|
D | qp.c | 70 static void flush_tx_list(struct hfi1_qp *qp); 232 static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) in insert_qp() argument 234 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp() 237 atomic_inc(&qp->refcount); in insert_qp() 240 if (qp->ibqp.qp_num <= 1) { in insert_qp() 241 rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp); in insert_qp() 243 u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); in insert_qp() 245 qp->next = dev->qp_dev->qp_table[n]; in insert_qp() 246 rcu_assign_pointer(dev->qp_dev->qp_table[n], qp); in insert_qp() 247 trace_hfi1_qpinsert(qp, n); in insert_qp() [all …]
|
D | uc.c | 64 int hfi1_make_uc_req(struct hfi1_qp *qp) in hfi1_make_uc_req() argument 72 u32 pmtu = qp->pmtu; in hfi1_make_uc_req() 76 spin_lock_irqsave(&qp->s_lock, flags); in hfi1_make_uc_req() 78 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) { in hfi1_make_uc_req() 79 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) in hfi1_make_uc_req() 82 if (qp->s_last == qp->s_head) in hfi1_make_uc_req() 85 if (atomic_read(&qp->s_iowait.sdma_busy)) { in hfi1_make_uc_req() 86 qp->s_flags |= HFI1_S_WAIT_DMA; in hfi1_make_uc_req() 89 clear_ahg(qp); in hfi1_make_uc_req() 90 wqe = get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() [all …]
|
D | ruc.c | 100 static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) in init_sge() argument 108 rkt = &to_idev(qp->ibqp.device)->lk_table; in init_sge() 109 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in init_sge() 110 ss = &qp->r_sge; in init_sge() 111 ss->sg_list = qp->r_sg_list; in init_sge() 112 qp->r_len = 0; in init_sge() 120 qp->r_len += wqe->sg_list[i].length; in init_sge() 124 ss->total_len = qp->r_len; in init_sge() 139 wc.qp = &qp->ibqp; in init_sge() 141 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in init_sge() [all …]
|
D | ud.c | 72 struct hfi1_qp *qp; in ud_loopback() local 83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); in ud_loopback() 84 if (!qp) { in ud_loopback() 92 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback() 93 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback() 96 !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { in ud_loopback() 104 if (qp->ibqp.qp_num > 1) { in ud_loopback() 113 qp->s_pkey_index, slid))) { in ud_loopback() 116 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback() 128 if (qp->ibqp.qp_num) { in ud_loopback() [all …]
|
D | qp.h | 104 struct hfi1_qp *qp = NULL; in hfi1_lookup_qpn() local 107 qp = rcu_dereference(ibp->qp[qpn]); in hfi1_lookup_qpn() 112 for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp; in hfi1_lookup_qpn() 113 qp = rcu_dereference(qp->next)) in hfi1_lookup_qpn() 114 if (qp->ibqp.qp_num == qpn) in hfi1_lookup_qpn() 117 return qp; in hfi1_lookup_qpn() 124 static inline void clear_ahg(struct hfi1_qp *qp) in clear_ahg() argument 126 qp->s_hdr->ahgcount = 0; in clear_ahg() 127 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); in clear_ahg() 128 if (qp->s_sde && qp->s_ahgidx >= 0) in clear_ahg() [all …]
|
D | verbs.c | 361 static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) in post_one_send() argument 370 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); in post_one_send() 375 if (unlikely(wr->num_sge > qp->s_max_sge)) in post_one_send() 378 ppd = &dd->pport[qp->port_num - 1]; in post_one_send() 386 if (qp->ibqp.qp_type == IB_QPT_UC) { in post_one_send() 389 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in post_one_send() 395 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in post_one_send() 404 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in post_one_send() 407 next = qp->s_head + 1; in post_one_send() 408 if (next >= qp->s_size) in post_one_send() [all …]
|
D | verbs_mcast.c | 59 static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp) in mcast_qp_alloc() argument 67 mqp->qp = qp; in mcast_qp_alloc() 68 atomic_inc(&qp->refcount); in mcast_qp_alloc() 76 struct hfi1_qp *qp = mqp->qp; in mcast_qp_free() local 79 if (atomic_dec_and_test(&qp->refcount)) in mcast_qp_free() 80 wake_up(&qp->wait); in mcast_qp_free() 199 if (p->qp == mqp->qp) { in mcast_add() 244 struct hfi1_qp *qp = to_iqp(ibqp); in hfi1_multicast_attach() local 251 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { in hfi1_multicast_attach() 265 mqp = mcast_qp_alloc(qp); in hfi1_multicast_attach() [all …]
|
D | driver.c | 304 struct hfi1_qp *qp; in rcv_hdrerr() local 308 qp = hfi1_lookup_qpn(ibp, qp_num); in rcv_hdrerr() 309 if (!qp) { in rcv_hdrerr() 318 spin_lock_irqsave(&qp->r_lock, flags); in rcv_hdrerr() 321 if (!(ib_hfi1_state_ops[qp->state] & in rcv_hdrerr() 326 switch (qp->ibqp.qp_type) { in rcv_hdrerr() 332 qp); in rcv_hdrerr() 339 spin_unlock_irqrestore(&qp->r_lock, flags); in rcv_hdrerr() 373 struct hfi1_qp *qp = NULL; in rcv_hdrerr() local 385 qp = hfi1_lookup_qpn(ibp, lqpn); in rcv_hdrerr() [all …]
|
D | verbs.h | 224 struct hfi1_qp *qp; member 628 static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp, in get_swqe_ptr() argument 631 return (struct hfi1_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 633 qp->s_max_sge * in get_swqe_ptr() 679 struct hfi1_qp __rcu *qp[2]; member 839 static inline int hfi1_send_ok(struct hfi1_qp *qp) in hfi1_send_ok() argument 841 return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) && in hfi1_send_ok() 842 (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) || in hfi1_send_ok() 843 !(qp->s_flags & HFI1_S_ANY_WAIT_SEND)); in hfi1_send_ok() 849 void hfi1_schedule_send(struct hfi1_qp *qp); [all …]
|
D | trace.h | 297 TP_PROTO(struct hfi1_qp *qp, u32 flags), 298 TP_ARGS(qp, flags), 300 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 306 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) 308 __entry->qpn = qp->ibqp.qp_num; 309 __entry->s_flags = qp->s_flags; 321 TP_PROTO(struct hfi1_qp *qp, u32 flags), 322 TP_ARGS(qp, flags)); 325 TP_PROTO(struct hfi1_qp *qp, u32 flags), 326 TP_ARGS(qp, flags)); [all …]
|
D | diag.c | 1621 int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr, in snoop_send_dma_handler() argument 1627 return hfi1_verbs_send_dma(qp, ibhdr, hdrwords, ss, len, plen, dwords, in snoop_send_dma_handler() 1636 int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr, in snoop_send_pio_handler() argument 1640 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in snoop_send_pio_handler() 1682 md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); in snoop_send_pio_handler() 1766 if (qp->s_wqe) { in snoop_send_pio_handler() 1767 spin_lock_irqsave(&qp->s_lock, flags); in snoop_send_pio_handler() 1769 qp, in snoop_send_pio_handler() 1770 qp->s_wqe, in snoop_send_pio_handler() 1772 spin_unlock_irqrestore(&qp->s_lock, flags); in snoop_send_pio_handler() [all …]
|
D | keys.c | 274 int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, in hfi1_rkey_ok() argument 277 struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in hfi1_rkey_ok() 288 struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); in hfi1_rkey_ok() 310 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in hfi1_rkey_ok()
|
D | Makefile | 12 qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
|
D | hfi.h | 331 struct hfi1_qp *qp; member 1064 int (*process_pio_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr, 1067 int (*process_dma_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr, 1225 void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn, 1417 int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr, 1420 int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
|
D | pio.c | 1499 struct hfi1_qp *qp; in sc_piobufavail() local 1519 qp = container_of(wait, struct hfi1_qp, s_iowait); in sc_piobufavail() 1520 list_del_init(&qp->s_iowait.list); in sc_piobufavail() 1522 qps[n++] = qp; in sc_piobufavail()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_rc.c | 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart() 67 ib_mtu_enum_to_int(qp->path_mtu)); in ipath_init_restart() 68 dev = to_idev(qp->ibqp.device); in ipath_init_restart() 70 if (list_empty(&qp->timerwait)) in ipath_init_restart() 71 list_add_tail(&qp->timerwait, in ipath_init_restart() 86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, in ipath_make_rc_ack() argument 96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) in ipath_make_rc_ack() 102 switch (qp->s_ack_state) { in ipath_make_rc_ack() 111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) in ipath_make_rc_ack() [all …]
|
D | ipath_qp.c | 208 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, in ipath_alloc_qpn() argument 217 qp->ibqp.qp_num = ret; in ipath_alloc_qpn() 223 qp->next = qpt->table[ret]; in ipath_alloc_qpn() 224 qpt->table[ret] = qp; in ipath_alloc_qpn() 225 atomic_inc(&qp->refcount); in ipath_alloc_qpn() 242 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) in ipath_free_qp() argument 250 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; in ipath_free_qp() 252 if (q == qp) { in ipath_free_qp() 253 *qpp = qp->next; in ipath_free_qp() 254 qp->next = NULL; in ipath_free_qp() [all …]
|
D | ipath_uc.c | 46 int ipath_make_uc_req(struct ipath_qp *qp) in ipath_make_uc_req() argument 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); in ipath_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); in ipath_make_uc_req() 59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { in ipath_make_uc_req() 60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) in ipath_make_uc_req() 63 if (qp->s_last == qp->s_head) in ipath_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { in ipath_make_uc_req() 67 qp->s_flags |= IPATH_S_WAIT_DMA; in ipath_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req() [all …]
|
D | ipath_ruc.c | 86 void ipath_insert_rnr_queue(struct ipath_qp *qp) in ipath_insert_rnr_queue() argument 88 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_insert_rnr_queue() 93 list_add(&qp->timerwait, &dev->rnrwait); in ipath_insert_rnr_queue() 99 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { in ipath_insert_rnr_queue() 100 qp->s_rnr_timeout -= nqp->s_rnr_timeout; in ipath_insert_rnr_queue() 110 nqp->s_rnr_timeout -= qp->s_rnr_timeout; in ipath_insert_rnr_queue() 111 list_add(&qp->timerwait, l); in ipath_insert_rnr_queue() 122 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument 133 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge() 148 wc.qp = &qp->ibqp; in ipath_init_sge() [all …]
|
D | ipath_ud.c | 52 struct ipath_qp *qp; in ipath_ud_loopback() local 67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn); in ipath_ud_loopback() 68 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { in ipath_ud_loopback() 78 if (unlikely(qp->ibqp.qp_num && in ipath_ud_loopback() 80 sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) { in ipath_ud_loopback() 106 if (qp->ibqp.srq) { in ipath_ud_loopback() 107 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback() 113 rq = &qp->r_rq; in ipath_ud_loopback() 133 rsge.sg_list = qp->r_ud_sg_list; in ipath_ud_loopback() 134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback() [all …]
|
D | ipath_verbs.c | 336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) in ipath_post_one_send() argument 345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send() 347 spin_lock_irqsave(&qp->s_lock, flags); in ipath_post_one_send() 349 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send() 356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) in ipath_post_one_send() 360 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send() 368 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send() 371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send() 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) in ipath_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in ipath_post_one_send() [all …]
|
D | ipath_verbs_mcast.c | 52 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) in ipath_mcast_qp_alloc() argument 60 mqp->qp = qp; in ipath_mcast_qp_alloc() 61 atomic_inc(&qp->refcount); in ipath_mcast_qp_alloc() 69 struct ipath_qp *qp = mqp->qp; in ipath_mcast_qp_free() local 72 if (atomic_dec_and_test(&qp->refcount)) in ipath_mcast_qp_free() 73 wake_up(&qp->wait); in ipath_mcast_qp_free() 192 if (p->qp == mqp->qp) { in ipath_mcast_add() 237 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_attach() local 252 mqp = ipath_mcast_qp_alloc(qp); in ipath_multicast_attach() 286 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_detach() local [all …]
|
D | ipath_verbs.h | 158 struct ipath_qp *qp; member 486 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, in get_swqe_ptr() argument 489 return (struct ipath_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 491 qp->s_max_sge * in get_swqe_ptr() 652 struct ipath_qp *qp; member 699 static inline void ipath_schedule_send(struct ipath_qp *qp) in ipath_schedule_send() argument 701 if (qp->s_flags & IPATH_S_ANY_WAIT) in ipath_schedule_send() 702 qp->s_flags &= ~IPATH_S_ANY_WAIT; in ipath_schedule_send() 703 if (!(qp->s_flags & IPATH_S_BUSY)) in ipath_schedule_send() 704 tasklet_hi_schedule(&qp->s_task); in ipath_schedule_send() [all …]
|
D | ipath_keys.c | 121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, in ipath_lkey_ok() argument 124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in ipath_lkey_ok() 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_lkey_ok() 151 qp->ibqp.pd != mr->pd)) { in ipath_lkey_ok() 199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, in ipath_rkey_ok() argument 202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_rkey_ok() 216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_rkey_ok() 234 qp->ibqp.pd != mr->pd)) { in ipath_rkey_ok()
|
D | ipath_cq.c | 87 wc->uqueue[head].qp_num = entry->qp->qp_num; in ipath_cq_enter()
|
/linux-4.4.14/drivers/ntb/ |
D | ntb_transport.c | 106 struct ntb_transport_qp *qp; member 134 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 144 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 249 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) argument 439 struct ntb_transport_qp *qp; in debugfs_read() local 443 qp = filp->private_data; in debugfs_read() 445 if (!qp || !qp->link_is_up) in debugfs_read() 458 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read() 460 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read() 462 "rx_memcpy - \t%llu\n", qp->rx_memcpy); in debugfs_read() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 207 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument 209 if (qp->is_direct) in get_recv_wqe() 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() [all …]
|
D | mthca_provider.c | 519 struct mthca_qp *qp; in mthca_create_qp() local 532 qp = kmalloc(sizeof *qp, GFP_KERNEL); in mthca_create_qp() 533 if (!qp) in mthca_create_qp() 540 kfree(qp); in mthca_create_qp() 548 kfree(qp); in mthca_create_qp() 560 kfree(qp); in mthca_create_qp() 564 qp->mr.ibmr.lkey = ucmd.lkey; in mthca_create_qp() 565 qp->sq.db_index = ucmd.sq_db_index; in mthca_create_qp() 566 qp->rq.db_index = ucmd.rq_db_index; in mthca_create_qp() 573 &init_attr->cap, qp); in mthca_create_qp() [all …]
|
D | mthca_mcg.c | 43 __be32 qp[MTHCA_QP_PER_MGM]; member 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { in mthca_multicast_attach() 170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { in mthca_multicast_attach() 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); in mthca_multicast_attach() 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) in mthca_multicast_detach() 244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) in mthca_multicast_detach() 254 mgm->qp[loc] = mgm->qp[i - 1]; in mthca_multicast_detach() 255 mgm->qp[i - 1] = 0; in mthca_multicast_detach()
|
D | mthca_provider.h | 289 struct mthca_qp qp; member 339 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) in to_msqp() argument 341 return container_of(qp, struct mthca_sqp, qp); in to_msqp()
|
D | mthca_eq.c | 144 } __attribute__((packed)) qp; member 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
|
D | mthca_cq.c | 378 struct mthca_qp *qp, int wqe_index, int is_send, in handle_error_cqe() argument 466 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); in handle_error_cqe() 525 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one() 536 entry->qp = &(*cur_qp)->ibqp; in mthca_poll_one() 667 struct mthca_qp *qp = NULL; in mthca_poll_cq() local 678 err = mthca_poll_one(dev, cq, &qp, in mthca_poll_cq()
|
D | mthca_dev.h | 259 struct mthca_array qp; member 541 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 550 struct mthca_qp *qp); 560 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
|
D | mthca_mad.c | 89 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, in update_sm_ah()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | qp.c | 82 struct mlx4_ib_qp qp; member 123 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp() 126 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument 131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp() 132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp() 136 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument 143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp() 144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp() 150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp() 151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp() [all …]
|
D | mad.c | 157 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); in mlx4_MAD_IFC() 200 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, in update_sm_ah() 485 tun_qp = &tun_ctx->qp[0]; in mlx4_ib_send_to_slave() 487 tun_qp = &tun_ctx->qp[1]; in mlx4_ib_send_to_slave() 506 src_qp = tun_qp->qp; in mlx4_ib_send_to_slave() 641 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad() 712 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad() 727 if (in_wc && in_wc->qp->qp_num) { in ib_process_mad() 732 in_wc->qp->qp_num, in ib_process_mad() 1143 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? in mlx4_ib_post_pv_qp_buf() [all …]
|
D | cq.c | 579 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, in use_tunnel_data() argument 584 ib_dma_sync_single_for_cpu(qp->ibqp.device, in use_tunnel_data() 585 qp->sqp_proxy_rcv[tail].map, in use_tunnel_data() 588 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); in use_tunnel_data() 607 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, in mlx4_ib_qp_sw_comp() argument 614 wq = is_send ? &qp->sq : &qp->rq; in mlx4_ib_qp_sw_comp() 626 wc->qp = &qp->ibqp; in mlx4_ib_qp_sw_comp() 634 struct mlx4_ib_qp *qp; in mlx4_ib_poll_sw_comp() local 640 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { in mlx4_ib_poll_sw_comp() 641 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); in mlx4_ib_poll_sw_comp() [all …]
|
D | main.c | 455 props->max_qp = dev->dev->quotas.qp; in mlx4_ib_query_device() 1368 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, in __mlx4_ib_default_rules_match() argument 1374 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); in __mlx4_ib_default_rules_match() 1424 struct ib_qp *qp, in __mlx4_ib_create_default_rules() argument 1460 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, in __mlx4_ib_create_flow() argument 1468 struct mlx4_ib_dev *mdev = to_mdev(qp->device); in __mlx4_ib_create_flow() 1502 ctrl->qpn = cpu_to_be32(qp->qp_num); in __mlx4_ib_create_flow() 1507 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); in __mlx4_ib_create_flow() 1510 mdev, qp, default_table + default_flow, in __mlx4_ib_create_flow() 1519 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, in __mlx4_ib_create_flow() [all …]
|
D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
D | mlx4_ib.h | 409 struct ib_qp *qp; member 435 struct mlx4_ib_demux_pv_qp qp[2]; member 707 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 746 int mlx4_ib_destroy_qp(struct ib_qp *qp);
|
D | mr.c | 369 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, in mlx4_ib_bind_mw() argument 384 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); in mlx4_ib_bind_mw()
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_qp.c | 120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) in c2_set_qp_state() argument 126 qp, in c2_set_qp_state() 127 to_ib_state_str(qp->state), in c2_set_qp_state() 129 qp->state = new_state; in c2_set_qp_state() 134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument 146 qp, in c2_qp_modify() 147 to_ib_state_str(qp->state), in c2_qp_modify() 157 wr.qp_handle = qp->adapter_handle; in c2_qp_modify() 173 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify() 174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify() [all …]
|
D | c2_ae.c | 186 struct c2_qp *qp = resource_user_context; in c2_ae_event() local 187 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() 192 qp); in c2_ae_event() 205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); in c2_ae_event() 220 spin_lock_irqsave(&qp->lock, flags); in c2_ae_event() 221 if (qp->cm_id) { in c2_ae_event() 222 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event() 223 qp->cm_id = NULL; in c2_ae_event() 225 spin_unlock_irqrestore(&qp->lock, flags); in c2_ae_event() 235 ib_event.element.qp = &qp->ibqp; in c2_ae_event() [all …]
|
D | c2_cm.c | 45 struct c2_qp *qp; in c2_llp_connect() local 57 qp = to_c2qp(ibqp); in c2_llp_connect() 60 cm_id->provider_data = qp; in c2_llp_connect() 62 qp->cm_id = cm_id; in c2_llp_connect() 74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_connect() 96 wr->qp_handle = qp->adapter_handle; in c2_llp_connect() 129 qp->cm_id = NULL; in c2_llp_connect() 291 struct c2_qp *qp; in c2_llp_accept() local 301 qp = to_c2qp(ibqp); in c2_llp_accept() 304 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_accept() [all …]
|
D | c2_provider.c | 207 struct c2_qp *qp; in c2_add_ref() local 209 qp = to_c2qp(ibqp); in c2_add_ref() 210 atomic_inc(&qp->refcount); in c2_add_ref() 215 struct c2_qp *qp; in c2_rem_ref() local 217 qp = to_c2qp(ibqp); in c2_rem_ref() 218 if (atomic_dec_and_test(&qp->refcount)) in c2_rem_ref() 219 wake_up(&qp->wait); in c2_rem_ref() 225 struct c2_qp *qp; in c2_get_qp() local 227 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp() 229 __func__, qp, qpn, device, in c2_get_qp() [all …]
|
D | c2_cq.c | 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) in c2_cq_clean() argument 100 if (msg->qp_user_context == (u64) (unsigned long) qp) { in c2_cq_clean() 135 struct c2_qp *qp; in c2_poll_one() local 148 while ((qp = in c2_poll_one() 158 entry->qp = &qp->ibqp; in c2_poll_one() 190 c2_mq_lconsume(&qp->rq_mq, 1); in c2_poll_one() 192 c2_mq_lconsume(&qp->sq_mq, in c2_poll_one()
|
D | c2.h | 489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); 490 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); 492 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, 494 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, 516 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
D | c2_vq.h | 47 struct c2_qp *qp; member
|
D | c2_intr.c | 185 c2_set_qp_state(req->qp, in handle_vq()
|
D | c2_vq.c | 113 r->qp = NULL; in vq_req_alloc()
|
/linux-4.4.14/net/ipv4/ |
D | ip_fragment.c | 98 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 125 const struct ipq *qp; in ip4_frag_match() local 128 qp = container_of(q, struct ipq, q); in ip4_frag_match() 129 return qp->id == arg->iph->id && in ip4_frag_match() 130 qp->saddr == arg->iph->saddr && in ip4_frag_match() 131 qp->daddr == arg->iph->daddr && in ip4_frag_match() 132 qp->protocol == arg->iph->protocol && in ip4_frag_match() 133 qp->user == arg->user && in ip4_frag_match() 134 qp->vif == arg->vif; in ip4_frag_match() 139 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local [all …]
|
D | inet_fragment.c | 331 struct inet_frag_queue *qp; in inet_frag_intern() local 338 hlist_for_each_entry(qp, &hb->chain, list) { in inet_frag_intern() 339 if (qp->net == nf && f->match(qp, arg)) { in inet_frag_intern() 340 atomic_inc(&qp->refcnt); in inet_frag_intern() 344 return qp; in inet_frag_intern() 348 qp = qp_in; in inet_frag_intern() 349 if (!mod_timer(&qp->timer, jiffies + nf->timeout)) in inet_frag_intern() 350 atomic_inc(&qp->refcnt); in inet_frag_intern() 352 atomic_inc(&qp->refcnt); in inet_frag_intern() 353 hlist_add_head(&qp->list, &hb->chain); in inet_frag_intern() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | qp.c | 84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) in get_wqe() argument 86 return mlx5_buf_offset(&qp->buf, offset); in get_wqe() 89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) in get_recv_wqe() argument 91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe() 94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) in mlx5_get_send_wqe() argument 96 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe() 116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, in mlx5_ib_read_user_wqe() argument 119 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe() 121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() 124 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe() [all …]
|
D | odp.c | 154 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, in mlx5_ib_page_fault_resume() argument 157 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_page_fault_resume() 158 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume() 163 qp->mqp.qpn); in mlx5_ib_page_fault_resume() 178 static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, in pagefault_single_data_segment() argument 183 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); in pagefault_single_data_segment() 212 if (mr->ibmr.pd != qp->ibqp.pd) { in pagefault_single_data_segment() 310 static int pagefault_data_segments(struct mlx5_ib_qp *qp, in pagefault_data_segments() argument 323 if (receive_queue && qp->ibqp.srq) in pagefault_data_segments() 369 ret = pagefault_single_data_segment(qp, pfault, key, io_virt, in pagefault_data_segments() [all …]
|
D | cq.c | 172 struct mlx5_ib_qp *qp) in handle_responder() argument 174 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in handle_responder() 180 if (qp->ibqp.srq || qp->ibqp.xrcd) { in handle_responder() 183 if (qp->ibqp.xrcd) { in handle_responder() 188 srq = to_msrq(qp->ibqp.srq); in handle_responder() 198 wq = &qp->rq; in handle_responder() 231 if (unlikely(is_qp1(qp->ibqp.qp_type))) { in handle_responder() 234 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, in handle_responder() 312 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) in is_atomic_response() argument 319 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) in mlx5_get_atomic_laddr() argument [all …]
|
D | mlx5_ib.h | 355 struct ib_qp *qp; member 531 int mlx5_ib_destroy_qp(struct ib_qp *qp); 536 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 537 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 612 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, 614 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); 619 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); 620 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 630 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} in mlx5_ib_odp_create_qp() argument 635 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} in mlx5_ib_qp_disable_pagefaults() argument [all …]
|
D | main.c | 1066 mlx5_ib_destroy_qp(dev->umrc.qp); in destroy_umrc_res() 1081 struct ib_qp *qp; in create_umr_res() local 1116 qp = mlx5_ib_create_qp(pd, init_attr, NULL); in create_umr_res() 1117 if (IS_ERR(qp)) { in create_umr_res() 1119 ret = PTR_ERR(qp); in create_umr_res() 1122 qp->device = &dev->ib_dev; in create_umr_res() 1123 qp->real_qp = qp; in create_umr_res() 1124 qp->uobject = NULL; in create_umr_res() 1125 qp->qp_type = MLX5_IB_QPT_REG_UMR; in create_umr_res() 1129 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | in create_umr_res() [all …]
|
D | Makefile | 3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
|
D | mr.c | 821 err = ib_post_send(umrc->qp, &umrwr.wr, &bad); in reg_umr() 952 err = ib_post_send(umrc->qp, &wr.wr, &bad); in mlx5_ib_update_mtt() 1148 err = ib_post_send(umrc->qp, &umrwr.wr, &bad); in unreg_umr()
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
D | bnx2i_hwi.c | 153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; in bnx2i_arm_cq_event_coalescing() 170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; in bnx2i_arm_cq_event_coalescing() 171 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing() 172 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing() 195 if (!bnx2i_conn->ep->qp.rqe_left) in bnx2i_get_rq_buf() 198 bnx2i_conn->ep->qp.rqe_left--; in bnx2i_get_rq_buf() 199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); in bnx2i_get_rq_buf() 200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { in bnx2i_get_rq_buf() 201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; in bnx2i_get_rq_buf() 202 bnx2i_conn->ep->qp.rq_cons_idx = 0; in bnx2i_get_rq_buf() [all …]
|
D | bnx2i.h | 760 struct qp_info qp; member
|
/linux-4.4.14/drivers/misc/mic/scif/ |
D | scif_nodeqp.c | 105 int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset, in scif_setup_qp_connect() argument 108 void *local_q = qp->inbound_q.rb_base; in scif_setup_qp_connect() 112 spin_lock_init(&qp->send_lock); in scif_setup_qp_connect() 113 spin_lock_init(&qp->recv_lock); in scif_setup_qp_connect() 124 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size); in scif_setup_qp_connect() 131 scif_rb_init(&qp->inbound_q, in scif_setup_qp_connect() 133 &qp->local_write, in scif_setup_qp_connect() 139 qp->inbound_q.read_ptr = NULL; in scif_setup_qp_connect() 140 err = scif_map_single(qp_offset, qp, in scif_setup_qp_connect() 144 qp->local_qp = *qp_offset; in scif_setup_qp_connect() [all …]
|
D | scif_epd.c | 23 struct scif_qp *qp = ep->qp_info.qp; in scif_cleanup_ep_qp() local 25 if (qp->outbound_q.rb_base) { in scif_cleanup_ep_qp() 26 scif_iounmap((void *)qp->outbound_q.rb_base, in scif_cleanup_ep_qp() 27 qp->outbound_q.size, ep->remote_dev); in scif_cleanup_ep_qp() 28 qp->outbound_q.rb_base = NULL; in scif_cleanup_ep_qp() 30 if (qp->remote_qp) { in scif_cleanup_ep_qp() 31 scif_iounmap((void *)qp->remote_qp, in scif_cleanup_ep_qp() 33 qp->remote_qp = NULL; in scif_cleanup_ep_qp() 35 if (qp->local_qp) { in scif_cleanup_ep_qp() 36 scif_unmap_single(qp->local_qp, ep->remote_dev, in scif_cleanup_ep_qp() [all …]
|
D | scif_nm.c | 62 struct scif_qp *qp = scifdev->qpairs; in scif_free_qp() local 64 if (!qp) in scif_free_qp() 66 scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size); in scif_free_qp() 67 kfree(qp->inbound_q.rb_base); in scif_free_qp() 68 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp)); in scif_free_qp() 75 struct scif_qp *qp = &dev->qpairs[0]; in scif_cleanup_qp() local 77 if (!qp) in scif_cleanup_qp() 79 scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev); in scif_cleanup_qp() 80 scif_iounmap((void *)qp->outbound_q.rb_base, in scif_cleanup_qp() 82 qp->remote_qp = NULL; in scif_cleanup_qp() [all …]
|
D | scif_nodeqp.h | 186 void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp); 187 int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp); 190 int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset, 192 int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset, 196 struct scif_qp *qp, u64 payload);
|
D | scif_api.c | 61 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL); in scif_open() 62 if (!ep->qp_info.qp) in scif_open() 80 kfree(ep->qp_info.qp); in scif_open() 434 ep->qp_info.qp = NULL; in scif_listen() 485 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset, in scif_conn_func() 528 ep->qp_info.qp, in scif_conn_func() 694 ep->qp_info.qp->magic = SCIFEP_MAGIC; in __scif_connect() 828 cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL); in scif_accept() 829 if (!cep->qp_info.qp) { in scif_accept() 838 cep->qp_info.qp->magic = SCIFEP_MAGIC; in scif_accept() [all …]
|
D | scif_epd.h | 63 struct scif_qp *qp; member
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_uverbs.c | 198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, in ehca_mmap_qp() argument 205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); in ehca_mmap_qp() 206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); in ehca_mmap_qp() 208 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 210 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); in ehca_mmap_qp() 217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, in ehca_mmap_qp() 218 &qp->mm_count_rqueue); in ehca_mmap_qp() 220 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 222 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() [all …]
|
D | ehca_reqs.c | 154 static inline int ehca_write_swqe(struct ehca_qp *qp, in ehca_write_swqe() argument 164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; in ehca_write_swqe() 167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { in ehca_write_swqe() 170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); in ehca_write_swqe() 205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) in ehca_write_swqe() 220 switch (qp->qp_type) { in ehca_write_swqe() 228 remote_qkey = qp->qkey; in ehca_write_swqe() 233 ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp); in ehca_write_swqe() 237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); in ehca_write_swqe() 255 if (qp->qp_type == IB_QPT_SMI || in ehca_write_swqe() [all …]
|
D | ehca_irq.c | 98 struct ehca_qp *qp = (struct ehca_qp *)data; in print_error_data() local 106 qp->ib_qp.qp_num, resource); in print_error_data() 179 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, in dispatch_qp_event() argument 185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed) in dispatch_qp_event() 191 if (qp->ext_type == EQPT_SRQ) { in dispatch_qp_event() 192 if (!qp->ib_srq.event_handler) in dispatch_qp_event() 195 event.element.srq = &qp->ib_srq; in dispatch_qp_event() 196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); in dispatch_qp_event() 198 if (!qp->ib_qp.event_handler) in dispatch_qp_event() 201 event.element.qp = &qp->ib_qp; in dispatch_qp_event() [all …]
|
D | ehca_iverbs.h | 104 int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 152 int ehca_destroy_qp(struct ib_qp *qp); 157 int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 160 int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, 163 int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 184 int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 186 int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 206 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
|
D | hipz_fns_core.h | 61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) in hipz_update_sqa() argument 64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, in hipz_update_sqa() 68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) in hipz_update_rqa() argument 71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, in hipz_update_rqa()
|
D | ehca_cq.c | 55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) in ehca_cq_assign_qp() argument 57 unsigned int qp_num = qp->real_qp_num; in ehca_cq_assign_qp() 62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); in ehca_cq_assign_qp() 76 struct ehca_qp *qp; in ehca_cq_unassign_qp() local 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_unassign_qp() 82 if (qp->real_qp_num == real_qp_num) { in ehca_cq_unassign_qp() 105 struct ehca_qp *qp; in ehca_cq_get_qp() local 107 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_get_qp() 108 if (qp->real_qp_num == real_qp_num) { in ehca_cq_get_qp() 109 ret = qp; in ehca_cq_get_qp()
|
D | ehca_classes.h | 230 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) argument 231 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ) argument 232 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE) argument 478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
|
D | ehca_qp.c | 400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) in ehca_add_to_err_list() argument 405 if (qp->ext_type == EQPT_LLQP) in ehca_add_to_err_list() 409 list = &qp->send_cq->sqp_err_list; in ehca_add_to_err_list() 410 node = &qp->sq_err_node; in ehca_add_to_err_list() 412 list = &qp->recv_cq->rqp_err_list; in ehca_add_to_err_list() 413 node = &qp->rq_err_node; in ehca_add_to_err_list() 1887 int ehca_query_qp(struct ib_qp *qp, in ehca_query_qp() argument 1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); in ehca_query_qp() 1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, in ehca_query_qp() 1900 ehca_err(qp->device, "Invalid attribute mask " in ehca_query_qp() [all …]
|
D | hcp_if.c | 583 struct ehca_qp *qp) in hipz_h_destroy_qp() argument 588 ret = hcp_galpas_dtor(&qp->galpas); in hipz_h_destroy_qp() 597 qp->ipz_qp_handle.handle, /* r6 */ in hipz_h_destroy_qp() 604 qp->ipz_qp_handle.handle, /* r5 */ in hipz_h_destroy_qp()
|
D | hcp_if.h | 161 struct ehca_qp *qp);
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1196 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_add_qpn_map() argument 1200 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { in ocrdma_add_qpn_map() 1201 dev->qp_tbl[qp->id] = qp; in ocrdma_add_qpn_map() 1207 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_del_qpn_map() argument 1209 dev->qp_tbl[qp->id] = NULL; in ocrdma_del_qpn_map() 1285 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, in ocrdma_copy_qp_uresp() argument 1292 struct ocrdma_pd *pd = qp->pd; in ocrdma_copy_qp_uresp() 1298 uresp.qp_id = qp->id; in ocrdma_copy_qp_uresp() 1299 uresp.sq_dbid = qp->sq.dbid; in ocrdma_copy_qp_uresp() 1301 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); in ocrdma_copy_qp_uresp() [all …]
|
D | ocrdma_hw.c | 662 struct ocrdma_qp *qp) in ocrdma_process_qpcat_error() argument 667 if (qp == NULL) in ocrdma_process_qpcat_error() 669 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps); in ocrdma_process_qpcat_error() 675 struct ocrdma_qp *qp = NULL; in ocrdma_dispatch_ibevent() local 694 qp = dev->qp_tbl[qpid]; in ocrdma_dispatch_ibevent() 695 if (qp == NULL) { in ocrdma_dispatch_ibevent() 730 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 732 ocrdma_process_qpcat_error(dev, qp); in ocrdma_dispatch_ibevent() 735 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 739 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() [all …]
|
D | ocrdma_verbs.h | 107 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
|
/linux-4.4.14/drivers/infiniband/core/ |
D | verbs.c | 536 struct ib_qp *qp = context; in __ib_shared_qp_event_handler() local 539 spin_lock_irqsave(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler() 540 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler() 541 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler() 542 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler() 543 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler() 546 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) in __ib_insert_xrcd_qp() argument 549 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); in __ib_insert_xrcd_qp() 557 struct ib_qp *qp; in __ib_open_qp() local 560 qp = kzalloc(sizeof *qp, GFP_KERNEL); in __ib_open_qp() [all …]
|
D | iwcm.c | 245 static int iwcm_modify_qp_err(struct ib_qp *qp) in iwcm_modify_qp_err() argument 249 if (!qp) in iwcm_modify_qp_err() 253 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_err() 260 static int iwcm_modify_qp_sqd(struct ib_qp *qp) in iwcm_modify_qp_sqd() argument 264 BUG_ON(qp == NULL); in iwcm_modify_qp_sqd() 266 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_sqd() 286 struct ib_qp *qp = NULL; in iw_cm_disconnect() local 299 if (cm_id_priv->qp) in iw_cm_disconnect() 300 qp = cm_id_priv->qp; in iw_cm_disconnect() 325 if (qp) { in iw_cm_disconnect() [all …]
|
D | uverbs_cmd.c | 254 static void put_qp_read(struct ib_qp *qp) in put_qp_read() argument 256 put_uobj_read(qp->uobject); in put_qp_read() 259 static void put_qp_write(struct ib_qp *qp) in put_qp_write() argument 261 put_uobj_write(qp->uobject); in put_qp_write() 1602 tmp.qp_num = wc->qp->qp_num; in copy_wc_to_user() 1763 struct ib_qp *qp; in create_qp() local 1862 qp = ib_create_qp(pd, &attr); in create_qp() 1864 qp = device->create_qp(pd, &attr, uhw); in create_qp() 1866 if (IS_ERR(qp)) { in create_qp() 1867 ret = PTR_ERR(qp); in create_qp() [all …]
|
D | mad.c | 327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent() 357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent() 523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop() 692 static void build_smp_wc(struct ib_qp *qp, in build_smp_wc() argument 703 wc->qp = qp; in build_smp_wc() 834 build_smp_wc(mad_agent_priv->agent.qp, in handle_outgoing_dr_smp() 1031 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad() 1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, in ib_send_mad() 1327 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, in ib_redirect_mad_qp() argument [all …]
|
D | uverbs_main.c | 197 static void ib_uverbs_detach_umcast(struct ib_qp *qp, in ib_uverbs_detach_umcast() argument 203 ib_detach_mcast(qp, &mcast->gid, mcast->lid); in ib_uverbs_detach_umcast() 242 struct ib_qp *qp = uobj->object; in ib_uverbs_cleanup_ucontext() local 247 if (qp != qp->real_qp) { in ib_uverbs_cleanup_ucontext() 248 ib_close_qp(qp); in ib_uverbs_cleanup_ucontext() 250 ib_uverbs_detach_umcast(qp, uqp); in ib_uverbs_cleanup_ucontext() 251 ib_destroy_qp(qp); in ib_uverbs_cleanup_ucontext() 549 if (!event->element.qp->uobject || !event->element.qp->uobject->live) in ib_uverbs_qp_event_handler() 552 uobj = container_of(event->element.qp->uobject, in ib_uverbs_qp_event_handler()
|
D | cma.c | 634 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_ud_qp() argument 644 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in cma_init_ud_qp() 649 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in cma_init_ud_qp() 655 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); in cma_init_ud_qp() 660 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_conn_qp() argument 670 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); in cma_init_conn_qp() 677 struct ib_qp *qp; in rdma_create_qp() local 684 qp = ib_create_qp(pd, qp_init_attr); in rdma_create_qp() 685 if (IS_ERR(qp)) in rdma_create_qp() 686 return PTR_ERR(qp); in rdma_create_qp() [all …]
|
D | iwcm.h | 50 struct ib_qp *qp; member
|
D | core_priv.h | 49 int ib_resolve_eth_dmac(struct ib_qp *qp,
|
D | mad_priv.h | 186 struct ib_qp *qp; member
|
D | agent.c | 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); in agent_send_response()
|
/linux-4.4.14/include/linux/ |
D | ntb_transport.h | 65 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 67 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 72 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); 73 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 77 void ntb_transport_free_queue(struct ntb_transport_qp *qp); 78 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 80 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 82 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); 83 void ntb_transport_link_up(struct ntb_transport_qp *qp); 84 void ntb_transport_link_down(struct ntb_transport_qp *qp); [all …]
|
/linux-4.4.14/drivers/net/ |
D | ntb_netdev.c | 77 struct ntb_transport_qp *qp; member 92 ntb_transport_link_query(dev->qp)); in ntb_netdev_event_handler() 95 if (ntb_transport_link_query(dev->qp)) in ntb_netdev_event_handler() 102 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, in ntb_netdev_rx_handler() argument 141 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler() 150 struct ntb_transport_qp *qp, int size) in __ntb_netdev_maybe_stop_tx() argument 160 if (likely(ntb_transport_tx_free_entry(qp) < size)) { in __ntb_netdev_maybe_stop_tx() 170 struct ntb_transport_qp *qp, int size) in ntb_netdev_maybe_stop_tx() argument 173 (ntb_transport_tx_free_entry(qp) >= size)) in ntb_netdev_maybe_stop_tx() 176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size); in ntb_netdev_maybe_stop_tx() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 73 struct mlx5_core_qp *qp; in mlx5_rsc_event() local 80 qp = (struct mlx5_core_qp *)common; in mlx5_rsc_event() 81 qp->event(qp, event_type); in mlx5_rsc_event() 97 struct mlx5_core_qp *qp = in mlx5_eq_pagefault() local 101 if (!qp) { in mlx5_eq_pagefault() 165 if (qp->pfault_handler) { in mlx5_eq_pagefault() 166 qp->pfault_handler(qp, &pfault); in mlx5_eq_pagefault() 181 struct mlx5_core_qp *qp, in mlx5_core_create_qp() argument 213 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; in mlx5_core_create_qp() 214 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); in mlx5_core_create_qp() [all …]
|
D | debugfs.c | 277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, in qp_read_field() argument 290 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); in qp_read_field() 300 param = qp->pid; in qp_read_field() 536 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) in mlx5_debug_qp_add() argument 544 &qp->dbg, qp->qpn, qp_fields, in mlx5_debug_qp_add() 545 ARRAY_SIZE(qp_fields), qp); in mlx5_debug_qp_add() 547 qp->dbg = NULL; in mlx5_debug_qp_add() 552 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) in mlx5_debug_qp_remove() argument 557 if (qp->dbg) in mlx5_debug_qp_remove() 558 rem_res_tree(qp->dbg); in mlx5_debug_qp_remove()
|
D | Makefile | 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 377 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, in ehea_qp_alloc_register() argument 398 qp->fw_handle, rpage, 1); in ehea_qp_alloc_register() 422 struct ehea_qp *qp; in ehea_create_qp() local 427 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in ehea_create_qp() 428 if (!qp) in ehea_create_qp() 431 qp->adapter = adapter; in ehea_create_qp() 434 &qp->fw_handle, &qp->epas); in ehea_create_qp() 445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, in ehea_create_qp() 454 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, in ehea_create_qp() 465 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, in ehea_create_qp() [all …]
|
D | ehea_hw.h | 218 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_sqa() argument 220 struct h_epa epa = qp->epas.kernel; in ehea_update_sqa() 225 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq3a() argument 227 struct h_epa epa = qp->epas.kernel; in ehea_update_rq3a() 232 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq2a() argument 234 struct h_epa epa = qp->epas.kernel; in ehea_update_rq2a() 239 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq1a() argument 241 struct h_epa epa = qp->epas.kernel; in ehea_update_rq1a()
|
D | ehea_qmr.h | 306 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, in ehea_get_next_rwqe() argument 312 queue = &qp->hw_rqueue1; in ehea_get_next_rwqe() 314 queue = &qp->hw_rqueue2; in ehea_get_next_rwqe() 316 queue = &qp->hw_rqueue3; in ehea_get_next_rwqe() 339 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) in ehea_poll_rq1() argument 341 struct hw_queue *queue = &qp->hw_rqueue1; in ehea_poll_rq1() 352 static inline void ehea_inc_rq1(struct ehea_qp *qp) in ehea_inc_rq1() argument 354 hw_qeit_inc(&qp->hw_rqueue1); in ehea_inc_rq1() 386 int ehea_destroy_qp(struct ehea_qp *qp);
|
D | ehea_main.c | 207 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles() 430 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1() 450 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1() 458 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def() local 500 rwqe = ehea_get_next_rwqe(qp, rq_nr); in ehea_refill_rq_def() 520 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def() 522 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def() 654 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error() 669 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes() local 685 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes() [all …]
|
D | ehea.h | 363 struct ehea_qp *qp; member
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | qp.c | 52 struct mlx4_qp *qp; in mlx4_qp_event() local 56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event() 57 if (qp) in mlx4_qp_event() 58 atomic_inc(&qp->refcount); in mlx4_qp_event() 62 if (!qp) { in mlx4_qp_event() 67 qp->event(qp, event_type); in mlx4_qp_event() 69 if (atomic_dec_and_test(&qp->refcount)) in mlx4_qp_event() 70 complete(&qp->free); in mlx4_qp_event() 74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) in is_master_qp0() argument 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0() [all …]
|
D | mcg.c | 219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry() 356 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; in promisc_steering_entry() 478 if ((be32_to_cpu(mgm->qp[i]) & in add_promisc_qp() 502 mgm->qp[members_count++] = in add_promisc_qp() 526 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in add_promisc_qp() 592 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in remove_promisc_qp() 637 if ((be32_to_cpu(mgm->qp[i]) & in remove_promisc_qp() 653 mgm->qp[loc] = mgm->qp[members_count - 1]; in remove_promisc_qp() 654 mgm->qp[members_count - 1] = 0; in remove_promisc_qp() 1097 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], in mlx4_qp_attach_common() argument [all …]
|
D | resource_tracker.c | 444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - in mlx4_init_quotas() 454 dev->quotas.qp = in mlx4_init_quotas() 1428 enum res_qp_states state, struct res_qp **qp, in qp_res_start_move_to() argument 1482 if (qp) in qp_res_start_move_to() 1483 *qp = r; in qp_res_start_move_to() 2883 struct res_qp *qp; in mlx4_RST2INIT_QP_wrapper() local 2900 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); in mlx4_RST2INIT_QP_wrapper() 2903 qp->local_qpn = local_qpn; in mlx4_RST2INIT_QP_wrapper() 2904 qp->sched_queue = 0; in mlx4_RST2INIT_QP_wrapper() 2905 qp->param3 = 0; in mlx4_RST2INIT_QP_wrapper() [all …]
|
D | en_resources.c | 92 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, in mlx4_en_change_mcast_lb() argument 102 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, in mlx4_en_change_mcast_lb() 140 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) in mlx4_en_sqp_event() argument
|
D | Makefile | 4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
|
D | en_tx.c | 123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring() 128 ring->qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring() 182 mlx4_qp_remove(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring() 183 mlx4_qp_free(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring() 210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring() 219 &ring->qp, &ring->qp_state); in mlx4_en_activate_tx_ring() 233 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); in mlx4_en_deactivate_tx_ring()
|
D | en_rx.c | 1125 struct mlx4_qp *qp) in mlx4_en_config_rss_qp() argument 1135 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp() 1140 qp->event = mlx4_en_sqp_event; in mlx4_en_config_rss_qp() 1157 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); in mlx4_en_config_rss_qp() 1159 mlx4_qp_remove(mdev->dev, qp); in mlx4_en_config_rss_qp() 1160 mlx4_qp_free(mdev->dev, qp); in mlx4_en_config_rss_qp()
|
D | mlx4_en.h | 278 struct mlx4_qp qp; member 798 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); 801 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
|
D | mlx4.h | 618 __be32 qp[MLX4_MAX_QP_PER_MGM]; member 1293 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1295 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1298 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
D | en_netdev.c | 502 struct mlx4_qp qp; in mlx4_en_uc_steer_add() local 505 qp.qpn = *qpn; in mlx4_en_uc_steer_add() 509 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); in mlx4_en_uc_steer_add() 553 struct mlx4_qp qp; in mlx4_en_uc_steer_release() local 556 qp.qpn = qpn; in mlx4_en_uc_steer_release() 560 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); in mlx4_en_uc_steer_release()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | device.c | 230 struct c4iw_qp *qp = p; in dump_qp() local 235 if (id != qp->wq.sq.qid) in dump_qp() 242 if (qp->ep) { in dump_qp() 243 if (qp->ep->com.local_addr.ss_family == AF_INET) { in dump_qp() 245 &qp->ep->com.local_addr; in dump_qp() 247 &qp->ep->com.remote_addr; in dump_qp() 249 &qp->ep->com.mapped_local_addr; in dump_qp() 251 &qp->ep->com.mapped_remote_addr; in dump_qp() 257 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 258 (int)qp->attr.state, in dump_qp() [all …]
|
D | resource.c | 43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table() 44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table() 45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table() 48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table() 49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table()
|
D | cm.c | 154 c4iw_qp_rem_ref(&ep->com.qp->ibqp); in deref_qp() 161 c4iw_qp_add_ref(&ep->com.qp->ibqp); in ref_qp() 1529 err = c4iw_modify_qp(ep->com.qp->rhp, in process_mpa_reply() 1530 ep->com.qp, mask, &attrs, 1); in process_mpa_reply() 1544 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply() 1564 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply() 1761 BUG_ON(!ep->com.qp); in rx_data() 1765 __func__, ep->com.qp->wq.sq.qid, ep, in rx_data() 1768 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in rx_data() 2026 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); in c4iw_reconnect() [all …]
|
D | Makefile | 5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
|
D | iw_cxgb4.h | 786 struct c4iw_qp *qp; member 950 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 957 void c4iw_qp_add_ref(struct ib_qp *qp); 958 void c4iw_qp_rem_ref(struct ib_qp *qp);
|
D | qp.c | 686 void c4iw_qp_add_ref(struct ib_qp *qp) in c4iw_qp_add_ref() argument 688 PDBG("%s ib_qp %p\n", __func__, qp); in c4iw_qp_add_ref() 689 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); in c4iw_qp_add_ref() 692 void c4iw_qp_rem_ref(struct ib_qp *qp) in c4iw_qp_rem_ref() argument 694 PDBG("%s ib_qp %p\n", __func__, qp); in c4iw_qp_rem_ref() 695 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) in c4iw_qp_rem_ref() 696 wake_up(&(to_c4iw_qp(qp)->wait)); in c4iw_qp_rem_ref() 940 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) in c4iw_bind_mw() argument
|
D | ev.c | 109 event.element.qp = &qhp->ibqp; in post_qp_event()
|
D | provider.c | 329 props->max_qp = dev->rdev.lldi.vr->qp.size / 2; in c4iw_query_device() 337 props->max_cq = dev->rdev.lldi.vr->qp.size; in c4iw_query_device()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 60 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); in ipoib_mcast_attach() 68 ret = ib_attach_mcast(priv->qp, mgid, mlid); in ipoib_mcast_attach() 96 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 105 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 115 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 125 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) in ipoib_init_qp() 211 priv->qp = ib_create_qp(priv->pd, &init_attr); in ipoib_transport_dev_init() 212 if (IS_ERR(priv->qp)) { in ipoib_transport_dev_init() 217 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; in ipoib_transport_dev_init() 218 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; in ipoib_transport_dev_init() [all …]
|
D | ipoib_cm.c | 128 ret = ib_post_recv(rx->qp, wr, &bad_wr); in ipoib_cm_post_receive_nonsrq() 226 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) in ipoib_cm_start_rx_drain() 273 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 286 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 298 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 318 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 420 struct ib_qp *qp, struct ib_cm_req_event_param *req, in ipoib_cm_send_rep() argument 427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep() 435 rep.qp_num = qp->qp_num; in ipoib_cm_send_rep() 459 p->qp = ipoib_cm_create_rx_qp(dev, p); in ipoib_cm_req_handler() [all …]
|
D | ipoib_ib.c | 113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); in ipoib_ib_post_receive() 210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) in ipoib_ib_handle_rx_wc() 345 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); in ipoib_qp_state_validate_work() 352 __func__, priv->qp->qp_num, qp_attr.qp_state); in ipoib_qp_state_validate_work() 358 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); in ipoib_qp_state_validate_work() 361 ret, priv->qp->qp_num); in ipoib_qp_state_validate_work() 365 __func__, priv->qp->qp_num); in ipoib_qp_state_validate_work() 368 priv->qp->qp_num, qp_attr.qp_state); in ipoib_qp_state_validate_work() 414 __func__, priv->qp->qp_num); in ipoib_ib_handle_tx_wc() 533 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); in post_send() [all …]
|
D | ipoib.h | 226 struct ib_qp *qp; member 237 struct ib_qp *qp; member 347 struct ib_qp *qp; member
|
/linux-4.4.14/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 221 struct qp_entry qp; member 239 struct qp_entry qp; member 914 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); in qp_guest_handle_to_entry() local 916 entry = qp ? container_of( in qp_guest_handle_to_entry() 917 qp, struct qp_guest_endpoint, qp) : NULL; in qp_guest_handle_to_entry() 928 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); in qp_broker_handle_to_entry() local 930 entry = qp ? container_of( in qp_broker_handle_to_entry() 931 qp, struct qp_broker_entry, qp) : NULL; in qp_broker_handle_to_entry() 986 entry->qp.peer = peer; in qp_guest_endpoint_create() 987 entry->qp.flags = flags; in qp_guest_endpoint_create() [all …]
|
/linux-4.4.14/drivers/scsi/lpfc/ |
D | lpfc_debugfs.c | 2002 struct lpfc_queue *qp = NULL; in lpfc_idiag_queinfo_read() local 2024 qp = phba->sli4_hba.hba_eq[x]; in lpfc_idiag_queinfo_read() 2025 if (!qp) in lpfc_idiag_queinfo_read() 2033 qp->q_cnt_1, qp->q_cnt_2, in lpfc_idiag_queinfo_read() 2034 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); in lpfc_idiag_queinfo_read() 2041 qp->queue_id, in lpfc_idiag_queinfo_read() 2042 qp->entry_count, in lpfc_idiag_queinfo_read() 2043 qp->entry_size, in lpfc_idiag_queinfo_read() 2044 qp->host_index, in lpfc_idiag_queinfo_read() 2045 qp->hba_index); in lpfc_idiag_queinfo_read() [all …]
|
/linux-4.4.14/lib/mpi/ |
D | mpih-div.c | 58 mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, in mpihelp_divrem() argument 87 qp += qextra_limbs; in mpihelp_divrem() 89 udiv_qrnnd(qp[i], n1, n1, np[i], d); in mpihelp_divrem() 90 qp -= qextra_limbs; in mpihelp_divrem() 93 udiv_qrnnd(qp[i], n1, n1, 0, d); in mpihelp_divrem() 135 qp[i] = q; in mpihelp_divrem() 156 qp[i] = q; in mpihelp_divrem() 229 qp[i] = q; in mpihelp_divrem()
|
D | mpi-internal.h | 224 mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
|
/linux-4.4.14/drivers/net/ethernet/sun/ |
D | sunqe.h | 298 #define TX_BUFFS_AVAIL(qp) \ argument 299 (((qp)->tx_old <= (qp)->tx_new) ? \ 300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ 301 (qp)->tx_old - (qp)->tx_new - 1)
|
D | sunhme.c | 2139 struct quattro *qp = (struct quattro *) cookie; in quattro_sbus_interrupt() local 2143 struct net_device *dev = qp->happy_meals[i]; in quattro_sbus_interrupt() 2558 struct quattro *qp; in quattro_sbus_find() local 2561 qp = platform_get_drvdata(op); in quattro_sbus_find() 2562 if (qp) in quattro_sbus_find() 2563 return qp; in quattro_sbus_find() 2565 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); in quattro_sbus_find() 2566 if (qp != NULL) { in quattro_sbus_find() 2570 qp->happy_meals[i] = NULL; in quattro_sbus_find() 2572 qp->quattro_dev = child; in quattro_sbus_find() [all …]
|
D | sunqe.c | 937 struct sunqe *qp = platform_get_drvdata(op); in qec_sbus_remove() local 938 struct net_device *net_dev = qp->dev; in qec_sbus_remove() 942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); in qec_sbus_remove() 943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); in qec_sbus_remove() 945 qp->qe_block, qp->qblock_dvma); in qec_sbus_remove() 947 qp->buffers, qp->buffers_dvma); in qec_sbus_remove()
|
/linux-4.4.14/drivers/scsi/sym53c8xx_2/ |
D | sym_misc.h | 159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ argument 160 for (qp = (head)->flink; qp != (head); qp = qp->flink)
|
D | sym_hipd.c | 1542 SYM_QUEHEAD *qp; local 1556 qp = sym_remque_head(&lp->waiting_ccbq); 1557 if (!qp) 1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); 1563 sym_insque_head(qp, &lp->waiting_ccbq); 1572 sym_insque_head(qp, &lp->waiting_ccbq); 1581 sym_insque_tail(qp, &lp->started_ccbq); 1643 SYM_QUEHEAD *qp; local 1646 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { 1648 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); [all …]
|
/linux-4.4.14/arch/ia64/kernel/ |
D | kprobes.c | 194 int qp; in unsupported_inst() local 196 qp = kprobe_inst & 0x3f; in unsupported_inst() 198 if (slot == 1 && qp) { in unsupported_inst() 205 qp = 0; in unsupported_inst() 237 if (slot == 1 && qp) { in unsupported_inst() 243 qp = 0; in unsupported_inst() 272 if (slot == 1 && qp) { in unsupported_inst() 279 qp = 0; in unsupported_inst() 284 if (slot == 1 && qp) { in unsupported_inst() 290 qp = 0; in unsupported_inst() [all …]
|
D | unwind_decoder.c | 122 unsigned char byte1, byte2, abreg, qp; in unw_decode_x3() local 129 qp = (byte1 & 0x3f); in unw_decode_x3() 133 UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); in unw_decode_x3() 135 UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); in unw_decode_x3() 142 unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; in unw_decode_x4() local 148 qp = (byte1 & 0x3f); in unw_decode_x4() 154 UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); in unw_decode_x4() 156 UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); in unw_decode_x4()
|
D | brl_emu.c | 58 unsigned long opcode, btype, qp, offset, cpl; in ia64_emulate_brl() local 83 qp = ((bundle[1] >> 23) & 0x3f); in ia64_emulate_brl() 89 tmp_taken = regs->pr & (1L << qp); in ia64_emulate_brl()
|
D | unwind.c | 1071 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) in desc_is_active() argument 1075 if (qp > 0) { in desc_is_active() 1076 if ((sr->pr_val & (1UL << qp)) == 0) in desc_is_active() 1078 sr->pr_mask |= (1UL << qp); in desc_is_active() 1084 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) in desc_restore_p() argument 1088 if (!desc_is_active(qp, t, sr)) in desc_restore_p() 1098 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, in desc_spill_reg_p() argument 1104 if (!desc_is_active(qp, t, sr)) in desc_spill_reg_p() 1119 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, in desc_spill_psprel_p() argument 1124 if (!desc_is_active(qp, t, sr)) in desc_spill_psprel_p() [all …]
|
/linux-4.4.14/drivers/crypto/ |
D | n2_core.c | 233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 236 qp->head != qp->tail) 477 static unsigned long wait_for_tail(struct spu_queue *qp) in wait_for_tail() argument 482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); in wait_for_tail() 487 if (head == qp->tail) { in wait_for_tail() 488 qp->head = head; in wait_for_tail() 495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, in submit_and_wait_for_tail() argument 498 unsigned long hv_ret = spu_queue_submit(qp, ent); in submit_and_wait_for_tail() 501 hv_ret = wait_for_tail(qp); in submit_and_wait_for_tail() 514 struct spu_queue *qp; in n2_do_async_digest() local [all …]
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.c | 681 ep->com.qp = NULL; in close_complete_upcall() 713 ep->com.qp = NULL; in peer_abort_upcall() 742 ep->com.qp = NULL; in connect_reply_upcall() 925 err = iwch_modify_qp(ep->com.qp->rhp, in process_mpa_reply() 926 ep->com.qp, mask, &attrs, 1); in process_mpa_reply() 930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { in process_mpa_reply() 1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close() 1497 if (ep->com.cm_id && ep->com.qp) { in peer_close() 1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close() 1592 if (ep->com.cm_id && ep->com.qp) { in peer_abort() [all …]
|
D | iwch_provider.h | 184 void iwch_qp_add_ref(struct ib_qp *qp); 185 void iwch_qp_rem_ref(struct ib_qp *qp); 333 int iwch_bind_mw(struct ib_qp *qp,
|
D | iwch_cm.h | 156 struct iwch_qp *qp; member
|
D | iwch_provider.c | 1114 void iwch_qp_add_ref(struct ib_qp *qp) in iwch_qp_add_ref() argument 1116 PDBG("%s ib_qp %p\n", __func__, qp); in iwch_qp_add_ref() 1117 atomic_inc(&(to_iwch_qp(qp)->refcnt)); in iwch_qp_add_ref() 1120 void iwch_qp_rem_ref(struct ib_qp *qp) in iwch_qp_rem_ref() argument 1122 PDBG("%s ib_qp %p\n", __func__, qp); in iwch_qp_rem_ref() 1123 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) in iwch_qp_rem_ref() 1124 wake_up(&(to_iwch_qp(qp)->wait)); in iwch_qp_rem_ref()
|
D | iwch_cq.c | 82 wc->qp = &qhp->ibqp; in iwch_poll_cq_one()
|
D | iwch_ev.c | 93 event.element.qp = &qhp->ibqp; in post_qp_event()
|
D | iwch_qp.c | 529 int iwch_bind_mw(struct ib_qp *qp, in iwch_bind_mw() argument 547 qhp = to_iwch_qp(qp); in iwch_bind_mw() 769 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); in iwch_post_zb_read()
|
/linux-4.4.14/include/rdma/ |
D | iw_cm.h | 106 void (*add_ref)(struct ib_qp *qp); 108 void (*rem_ref)(struct ib_qp *qp); 160 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
|
D | ib_verbs.h | 485 struct ib_qp *qp; member 766 struct ib_qp *qp; member 1501 struct ib_qp *qp; member 1694 int (*modify_qp)(struct ib_qp *qp, 1698 int (*query_qp)(struct ib_qp *qp, 1702 int (*destroy_qp)(struct ib_qp *qp); 1703 int (*post_send)(struct ib_qp *qp, 1706 int (*post_recv)(struct ib_qp *qp, 1762 int (*bind_mw)(struct ib_qp *qp, 1774 int (*attach_mcast)(struct ib_qp *qp, [all …]
|
D | ib_mad.h | 473 struct ib_qp *qp; member 691 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
|
D | rdma_cm.h | 152 struct ib_qp *qp; member
|
/linux-4.4.14/drivers/scsi/ |
D | qlogicpti.h | 504 #define for_each_qlogicpti(qp) \ argument 505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
|
D | ncr53c8xx.c | 4430 struct list_head *qp; in ncr_start_next_ccb() local 4437 qp = ncr_list_pop(&lp->wait_ccbq); in ncr_start_next_ccb() 4438 if (!qp) in ncr_start_next_ccb() 4441 cp = list_entry(qp, struct ccb, link_ccbq); in ncr_start_next_ccb() 4442 list_add_tail(qp, &lp->busy_ccbq); in ncr_start_next_ccb() 6546 struct list_head *qp; in ncr_sir_to_redo() local 6560 qp = lp->busy_ccbq.prev; in ncr_sir_to_redo() 6561 while (qp != &lp->busy_ccbq) { in ncr_sir_to_redo() 6562 cp2 = list_entry(qp, struct ccb, link_ccbq); in ncr_sir_to_redo() 6563 qp = qp->prev; in ncr_sir_to_redo() [all …]
|
/linux-4.4.14/include/linux/mlx5/ |
D | qp.h | 620 struct mlx5_core_qp *qp, 626 struct mlx5_core_qp *qp); 628 struct mlx5_core_qp *qp); 629 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 636 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 637 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
|
/linux-4.4.14/arch/sparc/kernel/ |
D | ds.c | 995 struct ds_queue_entry *qp, *tmp; in process_ds_work() local 1003 list_for_each_entry_safe(qp, tmp, &todo, list) { in process_ds_work() 1004 struct ds_data *dpkt = (struct ds_data *) qp->req; in process_ds_work() 1005 struct ds_info *dp = qp->dp; in process_ds_work() 1007 int req_len = qp->req_len; in process_ds_work() 1021 list_del(&qp->list); in process_ds_work() 1022 kfree(qp); in process_ds_work() 1048 struct ds_queue_entry *qp; in ds_data() local 1050 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); in ds_data() 1051 if (!qp) { in ds_data() [all …]
|
/linux-4.4.14/net/9p/ |
D | trans_rdma.c | 95 struct ib_qp *qp; member 383 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans() 384 ib_destroy_qp(rdma->qp); in rdma_destroy_trans() 420 return ib_post_recv(rdma->qp, &wr, &bad_wr); in post_recv() 524 err = ib_post_send(rdma->qp, &wr, &bad_wr); in rdma_request() 726 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
|
/linux-4.4.14/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 523 ib_conn->qp = ib_conn->cma_id->qp; in iser_create_ib_conn_res() 526 ib_conn->cma_id->qp); in iser_create_ib_conn_res() 641 iser_conn, ib_conn->cma_id, ib_conn->qp); in iser_free_ib_conn_res() 643 if (ib_conn->qp != NULL) { in iser_free_ib_conn_res() 646 ib_conn->qp = NULL; in iser_free_ib_conn_res() 727 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); in iser_conn_terminate() 877 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); in iser_connected_handler() 878 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); in iser_connected_handler() 1063 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); in iser_post_recvl() 1092 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); in iser_post_recvm() [all …]
|
D | iscsi_iser.h | 485 struct ib_qp *qp; member
|
/linux-4.4.14/include/linux/mlx4/ |
D | qp.h | 474 int sqd_event, struct mlx4_qp *qp); 476 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 481 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); 488 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
|
D | device.h | 811 int qp; member 884 } __packed qp; member 1102 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 1104 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 1115 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1117 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1119 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1122 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 48 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 62 int usnic_ib_destroy_qp(struct ib_qp *qp);
|
D | usnic_ib_verbs.c | 370 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, in usnic_ib_query_qp() argument 383 qp_grp = to_uqp_grp(qp); in usnic_ib_query_qp() 543 int usnic_ib_destroy_qp(struct ib_qp *qp) in usnic_ib_destroy_qp() argument 550 qp_grp = to_uqp_grp(qp); in usnic_ib_destroy_qp()
|
/linux-4.4.14/drivers/atm/ |
D | firestream.c | 631 static int qp; variable 651 pq[qp].cmd = cmd; in submit_queue() 652 pq[qp].p0 = p1; in submit_queue() 653 pq[qp].p1 = p2; in submit_queue() 654 pq[qp].p2 = p3; in submit_queue() 655 qp++; in submit_queue() 656 if (qp >= 60) qp = 0; in submit_queue() 1950 i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); in firestream_remove_one() 1951 qp++; in firestream_remove_one() 1952 if (qp >= 60) qp = 0; in firestream_remove_one()
|
/linux-4.4.14/drivers/media/pci/solo6x10/ |
D | solo6x10-enc.c | 182 unsigned int qp) in solo_s_jpeg_qp() argument 187 if ((ch > 31) || (qp > 3)) in solo_s_jpeg_qp() 206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; in solo_s_jpeg_qp()
|
D | solo6x10.h | 157 u8 mode, gop, qp, interlaced, interval; member 378 unsigned int qp);
|
D | solo6x10-v4l2-enc.c | 261 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); in solo_enc_on() 266 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); in solo_enc_on() 1103 solo_enc->qp = ctrl->val; in solo_s_ctrl() 1104 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp); in solo_s_ctrl() 1105 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp); in solo_s_ctrl() 1289 solo_enc->qp = SOLO_DEFAULT_QP; in solo_enc_alloc()
|
/linux-4.4.14/mm/ |
D | mempolicy.c | 490 struct queue_pages *qp = walk->private; in queue_pages_pte_range() local 491 unsigned long flags = qp->flags; in queue_pages_pte_range() 514 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) in queue_pages_pte_range() 518 migrate_page_add(page, qp->pagelist, flags); in queue_pages_pte_range() 530 struct queue_pages *qp = walk->private; in queue_pages_hugetlb() local 531 unsigned long flags = qp->flags; in queue_pages_hugetlb() 543 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) in queue_pages_hugetlb() 548 isolate_huge_page(page, qp->pagelist); in queue_pages_hugetlb() 590 struct queue_pages *qp = walk->private; in queue_pages_test_walk() local 592 unsigned long flags = qp->flags; in queue_pages_test_walk() [all …]
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 259 struct ib_qp *qp) in srp_init_qp() argument 280 ret = ib_modify_qp(qp, attr, in srp_init_qp() 467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); in srp_destroy_qp() 473 ret = ib_post_recv(ch->qp, &wr, &bad_wr); in srp_destroy_qp() 479 ib_destroy_qp(ch->qp); in srp_destroy_qp() 488 struct ib_qp *qp; in srp_create_ch_ib() local 530 qp = ib_create_qp(dev->pd, init_attr); in srp_create_ch_ib() 531 if (IS_ERR(qp)) { in srp_create_ch_ib() 532 ret = PTR_ERR(qp); in srp_create_ch_ib() 536 ret = srp_init_qp(target, qp); in srp_create_ch_ib() [all …]
|
D | ib_srp.h | 147 struct ib_qp *qp; member
|
/linux-4.4.14/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 176 return cma_id->qp; in isert_create_qp() 186 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); in isert_conn_setup_qp() 187 if (IS_ERR(isert_conn->qp)) { in isert_conn_setup_qp() 188 ret = PTR_ERR(isert_conn->qp); in isert_conn_setup_qp() 765 if (isert_conn->qp) { in isert_connect_release() 766 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; in isert_connect_release() 769 ib_destroy_qp(isert_conn->qp); in isert_connect_release() 783 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connected_handler() 901 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_disconnected_handler() 928 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connect_error() [all …]
|
D | ib_isert.h | 166 struct ib_qp *qp; member
|
/linux-4.4.14/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 466 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, in srpt_mad_recv_handler() 829 ret = ib_post_send(ch->qp, &wr, &bad_wr); in srpt_post_send() 950 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_init_ch_qp() argument 965 ret = ib_modify_qp(qp, attr, in srpt_init_ch_qp() 984 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_ch_qp_rtr() argument 997 ret = ib_modify_qp(qp, &qp_attr, attr_mask); in srpt_ch_qp_rtr() 1014 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_ch_qp_rts() argument 1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask); in srpt_ch_qp_rts() 1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); in srpt_ch_qp_err() 2053 ch->qp = ib_create_qp(sdev->pd, qp_init); in srpt_create_ch_ib() [all …]
|
D | ib_srpt.h | 305 struct ib_qp *qp; member
|
/linux-4.4.14/arch/ia64/include/asm/ |
D | kprobes.h | 41 unsigned long long qp : 6; member
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | verbs.c | 308 ib_query_qp(ia->ri_id->qp, attr, in rpcrdma_conn_upcall() 551 if (ia->ri_id->qp) in rpcrdma_ia_close() 713 if (ia->ri_id->qp) in rpcrdma_ep_destroy() 719 if (ia->ri_id->qp) { in rpcrdma_ep_destroy() 721 ia->ri_id->qp = NULL; in rpcrdma_ep_destroy() 1289 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); in rpcrdma_ep_post() 1318 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); in rpcrdma_ep_post_recv()
|
D | frwr_ops.c | 396 rc = ib_post_send(ia->ri_id->qp, ®_wr.wr, &bad_wr); in frwr_op_map() 442 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); in frwr_op_unmap()
|
D | svc_rdma_transport.c | 246 event->element.qp); in qp_event_handler() 258 event->element.qp); in qp_event_handler() 992 newxprt->sc_qp = newxprt->sc_cm_id->qp; in svc_rdma_accept()
|
/linux-4.4.14/net/sched/ |
D | sch_api.c | 142 struct Qdisc_ops *q, **qp; in register_qdisc() local 146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc() 172 *qp = qops; in register_qdisc() 186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local 190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc() 194 *qp = q->next; in unregister_qdisc()
|
/linux-4.4.14/net/rds/ |
D | iw_cm.c | 586 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_iw_conn_shutdown() 601 if (ic->i_cm_id->qp) { in rds_iw_conn_shutdown() 603 ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); in rds_iw_conn_shutdown() 633 if (ic->i_cm_id->qp) in rds_iw_conn_shutdown()
|
D | ib_cm.c | 86 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); in rds_ib_tune_rnr() 144 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); in rds_ib_cm_connect_complete() 716 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_ib_conn_shutdown() 747 if (ic->i_cm_id->qp) in rds_ib_conn_shutdown()
|
D | iw_rdma.c | 693 ret = ib_post_send(ibmr->cm_id->qp, ®_wr.wr, &failed_wr); 706 if (!ibmr->cm_id->qp || !ibmr->mr) 716 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
|
D | ib_send.c | 725 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); in rds_ib_xmit() 822 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); in rds_ib_xmit_atomic() 962 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); in rds_ib_xmit_rdma()
|
D | iw_recv.c | 251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); in rds_iw_recv_refill() 448 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); in rds_iw_send_ack()
|
D | ib_recv.c | 407 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); in rds_ib_recv_refill() 659 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); in rds_ib_send_ack()
|
D | iw_send.c | 743 ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr); in rds_iw_xmit() 959 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); in rds_iw_xmit_rdma()
|
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/ |
D | file.c | 2157 struct mfc_cq_sr *qp, *spuqp; in __spufs_dma_info_read() local 2166 qp = &info.dma_info_command_data[i]; in __spufs_dma_info_read() 2169 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; in __spufs_dma_info_read() 2170 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; in __spufs_dma_info_read() 2171 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; in __spufs_dma_info_read() 2172 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; in __spufs_dma_info_read() 2209 struct mfc_cq_sr *qp, *puqp; in __spufs_proxydma_info_read() local 2223 qp = &info.proxydma_info_command_data[i]; in __spufs_proxydma_info_read() 2226 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; in __spufs_proxydma_info_read() 2227 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; in __spufs_proxydma_info_read() [all …]
|
/linux-4.4.14/drivers/scsi/pm8001/ |
D | pm8001_sas.c | 216 uint32_t *qp = (uint32_t *)(((char *) in pm8001_phy_control() local 220 phy->invalid_dword_count = qp[0]; in pm8001_phy_control() 221 phy->running_disparity_error_count = qp[1]; in pm8001_phy_control() 222 phy->loss_of_dword_sync_count = qp[3]; in pm8001_phy_control() 223 phy->phy_reset_problem_count = qp[4]; in pm8001_phy_control()
|
/linux-4.4.14/net/openvswitch/ |
D | flow.c | 311 struct qtag_prefix *qp; in parse_vlan() local 320 qp = (struct qtag_prefix *) skb->data; in parse_vlan() 321 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); in parse_vlan()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 227 struct cxgb4_range qp; member
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 3079 u32 qp; in i40e_vsi_configure_msix() local 3085 qp = vsi->base_queue; in i40e_vsi_configure_msix() 3103 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); in i40e_vsi_configure_msix() 3110 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| in i40e_vsi_configure_msix() 3114 wr32(hw, I40E_QINT_RQCTL(qp), val); in i40e_vsi_configure_msix() 3119 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| in i40e_vsi_configure_msix() 3128 wr32(hw, I40E_QINT_TQCTL(qp), val); in i40e_vsi_configure_msix() 3129 qp++; in i40e_vsi_configure_msix() 3961 u32 val, qp; in i40e_vsi_free_irq() local 3994 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) in i40e_vsi_free_irq() [all …]
|