/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 57 static void start_timer(struct qib_qp *qp) in start_timer() argument 59 qp->s_flags |= QIB_S_TIMER; in start_timer() 60 qp->s_timer.function = rc_timeout; in start_timer() 62 qp->s_timer.expires = jiffies + qp->timeout_jiffies; in start_timer() 63 add_timer(&qp->s_timer); in start_timer() 77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, in qib_make_rc_ack() argument 87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) in qib_make_rc_ack() 93 switch (qp->s_ack_state) { in qib_make_rc_ack() 96 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack() [all …]
|
D | qib_qp.c | 223 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) in insert_qp() argument 225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp() 227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in insert_qp() 229 atomic_inc(&qp->refcount); in insert_qp() 232 if (qp->ibqp.qp_num == 0) in insert_qp() 233 rcu_assign_pointer(ibp->qp0, qp); in insert_qp() 234 else if (qp->ibqp.qp_num == 1) in insert_qp() 235 rcu_assign_pointer(ibp->qp1, qp); in insert_qp() 237 qp->next = dev->qp_table[n]; in insert_qp() 238 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp() [all …]
|
D | qib_uc.c | 46 int qib_make_uc_req(struct qib_qp *qp) in qib_make_uc_req() argument 54 u32 pmtu = qp->pmtu; in qib_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); in qib_make_uc_req() 59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { in qib_make_uc_req() 60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) in qib_make_uc_req() 63 if (qp->s_last == qp->s_head) in qib_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { in qib_make_uc_req() 67 qp->s_flags |= QIB_S_WAIT_DMA; in qib_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() [all …]
|
D | qib_ruc.c | 81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) in qib_init_sge() argument 89 rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_init_sge() 90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); in qib_init_sge() 91 ss = &qp->r_sge; in qib_init_sge() 92 ss->sg_list = qp->r_sg_list; in qib_init_sge() 93 qp->r_len = 0; in qib_init_sge() 101 qp->r_len += wqe->sg_list[i].length; in qib_init_sge() 105 ss->total_len = qp->r_len; in qib_init_sge() 120 wc.qp = &qp->ibqp; in qib_init_sge() 122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_init_sge() [all …]
|
D | qib_ud.c | 53 struct qib_qp *qp; in qib_ud_loopback() local 62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); in qib_ud_loopback() 63 if (!qp) { in qib_ud_loopback() 70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback() 71 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback() 74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { in qib_ud_loopback() 82 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback() 88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); in qib_ud_loopback() 94 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback() 106 if (qp->ibqp.qp_num) { in qib_ud_loopback() [all …]
|
D | qib_verbs.c | 337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, in qib_post_one_send() argument 350 spin_lock_irqsave(&qp->s_lock, flags); in qib_post_one_send() 353 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) in qib_post_one_send() 357 if (wr->num_sge > qp->s_max_sge) in qib_post_one_send() 366 if (qib_fast_reg_mr(qp, wr)) in qib_post_one_send() 368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { in qib_post_one_send() 371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { in qib_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in qib_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in qib_post_one_send() 389 next = qp->s_head + 1; in qib_post_one_send() [all …]
|
D | qib_verbs_mcast.c | 42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) in qib_mcast_qp_alloc() argument 50 mqp->qp = qp; in qib_mcast_qp_alloc() 51 atomic_inc(&qp->refcount); in qib_mcast_qp_alloc() 59 struct qib_qp *qp = mqp->qp; in qib_mcast_qp_free() local 62 if (atomic_dec_and_test(&qp->refcount)) in qib_mcast_qp_free() 63 wake_up(&qp->wait); in qib_mcast_qp_free() 182 if (p->qp == mqp->qp) { in qib_mcast_add() 227 struct qib_qp *qp = to_iqp(ibqp); in qib_multicast_attach() local 234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { in qib_multicast_attach() 248 mqp = qib_mcast_qp_alloc(qp); in qib_multicast_attach() [all …]
|
D | qib_driver.c | 309 struct qib_qp *qp = NULL; in qib_rcv_hdrerr() local 353 qp = qib_lookup_qpn(ibp, qp_num); in qib_rcv_hdrerr() 354 if (!qp) in qib_rcv_hdrerr() 361 spin_lock(&qp->r_lock); in qib_rcv_hdrerr() 364 if (!(ib_qib_state_ops[qp->state] & in qib_rcv_hdrerr() 370 switch (qp->ibqp.qp_type) { in qib_rcv_hdrerr() 376 qp, in qib_rcv_hdrerr() 384 diff = qib_cmp24(psn, qp->r_psn); in qib_rcv_hdrerr() 385 if (!qp->r_nak_state && diff >= 0) { in qib_rcv_hdrerr() 387 qp->r_nak_state = in qib_rcv_hdrerr() [all …]
|
D | qib_verbs.h | 212 struct qib_qp *qp; member 609 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, in get_swqe_ptr() argument 612 return (struct qib_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 614 qp->s_max_sge * in get_swqe_ptr() 847 static inline int qib_send_ok(struct qib_qp *qp) in qib_send_ok() argument 849 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && in qib_send_ok() 850 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || in qib_send_ok() 851 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); in qib_send_ok() 857 void qib_schedule_send(struct qib_qp *qp); 906 __be32 qib_compute_aeth(struct qib_qp *qp); [all …]
|
D | qib_sdma.c | 516 atomic_inc(&tx->qp->s_dma_busy); in complete_sdma_err_req() 539 struct qib_qp *qp; in qib_sdma_verbs_send() local 648 atomic_inc(&tx->qp->s_dma_busy); in qib_sdma_verbs_send() 665 qp = tx->qp; in qib_sdma_verbs_send() 667 spin_lock(&qp->r_lock); in qib_sdma_verbs_send() 668 spin_lock(&qp->s_lock); in qib_sdma_verbs_send() 669 if (qp->ibqp.qp_type == IB_QPT_RC) { in qib_sdma_verbs_send() 671 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) in qib_sdma_verbs_send() 672 qib_error_qp(qp, IB_WC_GENERAL_ERR); in qib_sdma_verbs_send() 673 } else if (qp->s_wqe) in qib_sdma_verbs_send() [all …]
|
D | qib_keys.c | 252 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, in qib_rkey_ok() argument 255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_rkey_ok() 266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_rkey_ok() 289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in qib_rkey_ok() 341 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) in qib_fast_reg_mr() argument 343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in qib_fast_reg_mr() 344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); in qib_fast_reg_mr() 360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) in qib_fast_reg_mr()
|
D | qib_mad.h | 306 __be32 qp; member
|
D | qib_cq.c | 91 wc->uqueue[head].qp_num = entry->qp->qp_num; in qib_cq_enter()
|
D | qib.h | 261 struct qib_qp *qp; member
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_rc.c | 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart() 67 ib_mtu_enum_to_int(qp->path_mtu)); in ipath_init_restart() 68 dev = to_idev(qp->ibqp.device); in ipath_init_restart() 70 if (list_empty(&qp->timerwait)) in ipath_init_restart() 71 list_add_tail(&qp->timerwait, in ipath_init_restart() 86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, in ipath_make_rc_ack() argument 96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) in ipath_make_rc_ack() 102 switch (qp->s_ack_state) { in ipath_make_rc_ack() 111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) in ipath_make_rc_ack() [all …]
|
D | ipath_qp.c | 209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, in ipath_alloc_qpn() argument 218 qp->ibqp.qp_num = ret; in ipath_alloc_qpn() 224 qp->next = qpt->table[ret]; in ipath_alloc_qpn() 225 qpt->table[ret] = qp; in ipath_alloc_qpn() 226 atomic_inc(&qp->refcount); in ipath_alloc_qpn() 243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) in ipath_free_qp() argument 251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; in ipath_free_qp() 253 if (q == qp) { in ipath_free_qp() 254 *qpp = qp->next; in ipath_free_qp() 255 qp->next = NULL; in ipath_free_qp() [all …]
|
D | ipath_uc.c | 46 int ipath_make_uc_req(struct ipath_qp *qp) in ipath_make_uc_req() argument 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); in ipath_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); in ipath_make_uc_req() 59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { in ipath_make_uc_req() 60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) in ipath_make_uc_req() 63 if (qp->s_last == qp->s_head) in ipath_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { in ipath_make_uc_req() 67 qp->s_flags |= IPATH_S_WAIT_DMA; in ipath_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req() [all …]
|
D | ipath_ruc.c | 87 void ipath_insert_rnr_queue(struct ipath_qp *qp) in ipath_insert_rnr_queue() argument 89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_insert_rnr_queue() 94 list_add(&qp->timerwait, &dev->rnrwait); in ipath_insert_rnr_queue() 100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { in ipath_insert_rnr_queue() 101 qp->s_rnr_timeout -= nqp->s_rnr_timeout; in ipath_insert_rnr_queue() 111 nqp->s_rnr_timeout -= qp->s_rnr_timeout; in ipath_insert_rnr_queue() 112 list_add(&qp->timerwait, l); in ipath_insert_rnr_queue() 123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument 134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, in ipath_init_sge() 149 wc.qp = &qp->ibqp; in ipath_init_sge() [all …]
|
D | ipath_ud.c | 53 struct ipath_qp *qp; in ipath_ud_loopback() local 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); in ipath_ud_loopback() 69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { in ipath_ud_loopback() 79 if (unlikely(qp->ibqp.qp_num && in ipath_ud_loopback() 81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { in ipath_ud_loopback() 107 if (qp->ibqp.srq) { in ipath_ud_loopback() 108 srq = to_isrq(qp->ibqp.srq); in ipath_ud_loopback() 114 rq = &qp->r_rq; in ipath_ud_loopback() 134 rsge.sg_list = qp->r_ud_sg_list; in ipath_ud_loopback() 135 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback() [all …]
|
D | ipath_verbs.c | 336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) in ipath_post_one_send() argument 345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send() 347 spin_lock_irqsave(&qp->s_lock, flags); in ipath_post_one_send() 349 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send() 356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) in ipath_post_one_send() 360 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send() 368 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send() 371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in ipath_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in ipath_post_one_send() [all …]
|
D | ipath_verbs_mcast.c | 53 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) in ipath_mcast_qp_alloc() argument 61 mqp->qp = qp; in ipath_mcast_qp_alloc() 62 atomic_inc(&qp->refcount); in ipath_mcast_qp_alloc() 70 struct ipath_qp *qp = mqp->qp; in ipath_mcast_qp_free() local 73 if (atomic_dec_and_test(&qp->refcount)) in ipath_mcast_qp_free() 74 wake_up(&qp->wait); in ipath_mcast_qp_free() 193 if (p->qp == mqp->qp) { in ipath_mcast_add() 238 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_attach() local 253 mqp = ipath_mcast_qp_alloc(qp); in ipath_multicast_attach() 287 struct ipath_qp *qp = to_iqp(ibqp); in ipath_multicast_detach() local [all …]
|
D | ipath_verbs.h | 158 struct ipath_qp *qp; member 480 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, in get_swqe_ptr() argument 483 return (struct ipath_swqe *)((char *)qp->s_wq + in get_swqe_ptr() 485 qp->s_max_sge * in get_swqe_ptr() 646 struct ipath_qp *qp; member 693 static inline void ipath_schedule_send(struct ipath_qp *qp) in ipath_schedule_send() argument 695 if (qp->s_flags & IPATH_S_ANY_WAIT) in ipath_schedule_send() 696 qp->s_flags &= ~IPATH_S_ANY_WAIT; in ipath_schedule_send() 697 if (!(qp->s_flags & IPATH_S_BUSY)) in ipath_schedule_send() 698 tasklet_hi_schedule(&qp->s_task); in ipath_schedule_send() [all …]
|
D | ipath_keys.c | 121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, in ipath_lkey_ok() argument 124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; in ipath_lkey_ok() 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_lkey_ok() 151 qp->ibqp.pd != mr->pd)) { in ipath_lkey_ok() 199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, in ipath_rkey_ok() argument 202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_rkey_ok() 216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); in ipath_rkey_ok() 234 qp->ibqp.pd != mr->pd)) { in ipath_rkey_ok()
|
D | ipath_cq.c | 87 wc->uqueue[head].qp_num = entry->qp->qp_num; in ipath_cq_enter()
|
D | ipath_driver.c | 1218 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff; in ipath_kreceive() local 1221 etype, opcode, qp, tlen); in ipath_kreceive()
|
/linux-4.1.27/drivers/ntb/ |
D | ntb_transport.c | 84 struct ntb_transport_qp *qp; member 109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 203 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev)) argument 398 struct ntb_transport_qp *qp; in debugfs_read() local 408 qp = filp->private_data; in debugfs_read() 413 "rx_bytes - \t%llu\n", qp->rx_bytes); in debugfs_read() 415 "rx_pkts - \t%llu\n", qp->rx_pkts); in debugfs_read() 417 "rx_memcpy - \t%llu\n", qp->rx_memcpy); in debugfs_read() 419 "rx_async - \t%llu\n", qp->rx_async); in debugfs_read() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) in is_sqp() argument 197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp() 201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument 203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0() 207 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument 209 if (qp->is_direct) in get_recv_wqe() 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() [all …]
|
D | mthca_provider.c | 515 struct mthca_qp *qp; in mthca_create_qp() local 528 qp = kmalloc(sizeof *qp, GFP_KERNEL); in mthca_create_qp() 529 if (!qp) in mthca_create_qp() 536 kfree(qp); in mthca_create_qp() 544 kfree(qp); in mthca_create_qp() 556 kfree(qp); in mthca_create_qp() 560 qp->mr.ibmr.lkey = ucmd.lkey; in mthca_create_qp() 561 qp->sq.db_index = ucmd.sq_db_index; in mthca_create_qp() 562 qp->rq.db_index = ucmd.rq_db_index; in mthca_create_qp() 569 &init_attr->cap, qp); in mthca_create_qp() [all …]
|
D | mthca_mcg.c | 43 __be32 qp[MTHCA_QP_PER_MGM]; member 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { in mthca_multicast_attach() 170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { in mthca_multicast_attach() 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); in mthca_multicast_attach() 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) in mthca_multicast_detach() 244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) in mthca_multicast_detach() 254 mgm->qp[loc] = mgm->qp[i - 1]; in mthca_multicast_detach() 255 mgm->qp[i - 1] = 0; in mthca_multicast_detach()
|
D | mthca_provider.h | 289 struct mthca_qp qp; member 339 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) in to_msqp() argument 341 return container_of(qp, struct mthca_sqp, qp); in to_msqp()
|
D | mthca_eq.c | 144 } __attribute__((packed)) qp; member 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int() 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
|
D | mthca_cq.c | 378 struct mthca_qp *qp, int wqe_index, int is_send, in handle_error_cqe() argument 466 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); in handle_error_cqe() 525 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one() 536 entry->qp = &(*cur_qp)->ibqp; in mthca_poll_one() 667 struct mthca_qp *qp = NULL; in mthca_poll_cq() local 678 err = mthca_poll_one(dev, cq, &qp, in mthca_poll_cq()
|
D | mthca_dev.h | 259 struct mthca_array qp; member 541 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 550 struct mthca_qp *qp); 560 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
|
D | mthca_mad.c | 89 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, in update_sm_ah()
|
D | mthca_cmd.c | 1906 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET); in mthca_MAD_IFC()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | qp.c | 81 struct mlx4_ib_qp qp; member 122 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp() 125 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument 130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp() 131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp() 135 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument 142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp() 143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp() 149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp() 150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp() [all …]
|
D | mad.c | 164 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); in mlx4_MAD_IFC() 207 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, in update_sm_ah() 490 tun_qp = &tun_ctx->qp[0]; in mlx4_ib_send_to_slave() 492 tun_qp = &tun_ctx->qp[1]; in mlx4_ib_send_to_slave() 511 src_qp = tun_qp->qp; in mlx4_ib_send_to_slave() 646 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad() 717 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); in mlx4_ib_demux_mad() 732 if (in_wc && in_wc->qp->qp_num) { in ib_process_mad() 737 in_wc->qp->qp_num, in ib_process_mad() 1125 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? in mlx4_ib_post_pv_qp_buf() [all …]
|
D | cq.c | 570 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, in use_tunnel_data() argument 575 ib_dma_sync_single_for_cpu(qp->ibqp.device, in use_tunnel_data() 576 qp->sqp_proxy_rcv[tail].map, in use_tunnel_data() 579 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); in use_tunnel_data() 598 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, in mlx4_ib_qp_sw_comp() argument 605 wq = is_send ? &qp->sq : &qp->rq; in mlx4_ib_qp_sw_comp() 617 wc->qp = &qp->ibqp; in mlx4_ib_qp_sw_comp() 625 struct mlx4_ib_qp *qp; in mlx4_ib_poll_sw_comp() local 631 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { in mlx4_ib_poll_sw_comp() 632 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); in mlx4_ib_poll_sw_comp() [all …]
|
D | main.c | 207 props->max_qp = dev->dev->quotas.qp; in mlx4_ib_query_device() 930 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, in __mlx4_ib_default_rules_match() argument 936 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); in __mlx4_ib_default_rules_match() 986 struct ib_qp *qp, in __mlx4_ib_create_default_rules() argument 1022 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, in __mlx4_ib_create_flow() argument 1030 struct mlx4_ib_dev *mdev = to_mdev(qp->device); in __mlx4_ib_create_flow() 1064 ctrl->qpn = cpu_to_be32(qp->qp_num); in __mlx4_ib_create_flow() 1069 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); in __mlx4_ib_create_flow() 1072 mdev, qp, default_table + default_flow, in __mlx4_ib_create_flow() 1081 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, in __mlx4_ib_create_flow() [all …]
|
D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
D | mlx4_ib.h | 393 struct ib_qp *qp; member 420 struct mlx4_ib_demux_pv_qp qp[2]; member 660 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 698 int mlx4_ib_destroy_qp(struct ib_qp *qp);
|
D | mr.c | 321 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, in mlx4_ib_bind_mw() argument 336 ret = mlx4_ib_post_send(qp, &wr, &bad_wr); in mlx4_ib_bind_mw()
|
/linux-4.1.27/net/ipv4/ |
D | ip_fragment.c | 95 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 121 const struct ipq *qp; in ip4_frag_match() local 124 qp = container_of(q, struct ipq, q); in ip4_frag_match() 125 return qp->id == arg->iph->id && in ip4_frag_match() 126 qp->saddr == arg->iph->saddr && in ip4_frag_match() 127 qp->daddr == arg->iph->daddr && in ip4_frag_match() 128 qp->protocol == arg->iph->protocol && in ip4_frag_match() 129 qp->user == arg->user; in ip4_frag_match() 134 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init() local 141 qp->protocol = arg->iph->protocol; in ip4_frag_init() [all …]
|
D | inet_fragment.c | 347 struct inet_frag_queue *qp; in inet_frag_intern() local 354 hlist_for_each_entry(qp, &hb->chain, list) { in inet_frag_intern() 355 if (qp->net == nf && f->match(qp, arg)) { in inet_frag_intern() 356 atomic_inc(&qp->refcnt); in inet_frag_intern() 360 return qp; in inet_frag_intern() 364 qp = qp_in; in inet_frag_intern() 365 if (!mod_timer(&qp->timer, jiffies + nf->timeout)) in inet_frag_intern() 366 atomic_inc(&qp->refcnt); in inet_frag_intern() 368 atomic_inc(&qp->refcnt); in inet_frag_intern() 369 hlist_add_head(&qp->list, &hb->chain); in inet_frag_intern() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_qp.c | 120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) in c2_set_qp_state() argument 126 qp, in c2_set_qp_state() 127 to_ib_state_str(qp->state), in c2_set_qp_state() 129 qp->state = new_state; in c2_set_qp_state() 134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument 146 qp, in c2_qp_modify() 147 to_ib_state_str(qp->state), in c2_qp_modify() 157 wr.qp_handle = qp->adapter_handle; in c2_qp_modify() 173 spin_lock_irqsave(&qp->lock, flags); in c2_qp_modify() 174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify() [all …]
|
D | c2_ae.c | 186 struct c2_qp *qp = (struct c2_qp *)resource_user_context; in c2_ae_event() local 187 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() 192 qp); in c2_ae_event() 205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); in c2_ae_event() 220 spin_lock_irqsave(&qp->lock, flags); in c2_ae_event() 221 if (qp->cm_id) { in c2_ae_event() 222 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event() 223 qp->cm_id = NULL; in c2_ae_event() 225 spin_unlock_irqrestore(&qp->lock, flags); in c2_ae_event() 235 ib_event.element.qp = &qp->ibqp; in c2_ae_event() [all …]
|
D | c2_cm.c | 45 struct c2_qp *qp; in c2_llp_connect() local 57 qp = to_c2qp(ibqp); in c2_llp_connect() 60 cm_id->provider_data = qp; in c2_llp_connect() 62 qp->cm_id = cm_id; in c2_llp_connect() 74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_connect() 96 wr->qp_handle = qp->adapter_handle; in c2_llp_connect() 129 qp->cm_id = NULL; in c2_llp_connect() 294 struct c2_qp *qp; in c2_llp_accept() local 304 qp = to_c2qp(ibqp); in c2_llp_accept() 307 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_accept() [all …]
|
D | c2_provider.c | 204 struct c2_qp *qp; in c2_add_ref() local 206 qp = to_c2qp(ibqp); in c2_add_ref() 207 atomic_inc(&qp->refcount); in c2_add_ref() 212 struct c2_qp *qp; in c2_rem_ref() local 214 qp = to_c2qp(ibqp); in c2_rem_ref() 215 if (atomic_dec_and_test(&qp->refcount)) in c2_rem_ref() 216 wake_up(&qp->wait); in c2_rem_ref() 222 struct c2_qp *qp; in c2_get_qp() local 224 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp() 226 __func__, qp, qpn, device, in c2_get_qp() [all …]
|
D | c2_cq.c | 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) in c2_cq_clean() argument 100 if (msg->qp_user_context == (u64) (unsigned long) qp) { in c2_cq_clean() 135 struct c2_qp *qp; in c2_poll_one() local 148 while ((qp = in c2_poll_one() 158 entry->qp = &qp->ibqp; in c2_poll_one() 190 c2_mq_lconsume(&qp->rq_mq, 1); in c2_poll_one() 192 c2_mq_lconsume(&qp->sq_mq, in c2_poll_one()
|
D | c2.h | 489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); 490 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); 492 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, 494 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, 516 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
D | c2_vq.h | 47 struct c2_qp *qp; member
|
D | c2_intr.c | 185 c2_set_qp_state(req->qp, in handle_vq()
|
D | c2_vq.c | 113 r->qp = NULL; in vq_req_alloc()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | qp.c | 89 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) in get_wqe() argument 91 return mlx5_buf_offset(&qp->buf, offset); in get_wqe() 94 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) in get_recv_wqe() argument 96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe() 99 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) in mlx5_get_send_wqe() argument 101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe() 121 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, in mlx5_ib_read_user_wqe() argument 124 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe() 126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() 129 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe() [all …]
|
D | odp.c | 161 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, in mlx5_ib_page_fault_resume() argument 164 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); in mlx5_ib_page_fault_resume() 165 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume() 170 qp->mqp.qpn); in mlx5_ib_page_fault_resume() 185 static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, in pagefault_single_data_segment() argument 190 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); in pagefault_single_data_segment() 219 if (mr->ibmr.pd != qp->ibqp.pd) { in pagefault_single_data_segment() 317 static int pagefault_data_segments(struct mlx5_ib_qp *qp, in pagefault_data_segments() argument 330 if (receive_queue && qp->ibqp.srq) in pagefault_data_segments() 376 ret = pagefault_single_data_segment(qp, pfault, key, io_virt, in pagefault_data_segments() [all …]
|
D | mlx5_ib.h | 351 struct ib_qp *qp; member 551 int mlx5_ib_destroy_qp(struct ib_qp *qp); 556 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 557 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 621 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, 623 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); 628 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); 629 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 639 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} in mlx5_ib_odp_create_qp() argument 644 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} in mlx5_ib_qp_disable_pagefaults() argument [all …]
|
D | cq.c | 171 struct mlx5_ib_qp *qp) in handle_responder() argument 173 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in handle_responder() 179 if (qp->ibqp.srq || qp->ibqp.xrcd) { in handle_responder() 182 if (qp->ibqp.xrcd) { in handle_responder() 187 srq = to_msrq(qp->ibqp.srq); in handle_responder() 197 wq = &qp->rq; in handle_responder() 304 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) in is_atomic_response() argument 311 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) in mlx5_get_atomic_laddr() argument 316 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + in mlx5_get_atomic_laddr() 323 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, in handle_atomic() argument [all …]
|
D | main.c | 956 mlx5_ib_destroy_qp(dev->umrc.qp); in destroy_umrc_res() 972 struct ib_qp *qp; in create_umr_res() local 1013 qp = mlx5_ib_create_qp(pd, init_attr, NULL); in create_umr_res() 1014 if (IS_ERR(qp)) { in create_umr_res() 1016 ret = PTR_ERR(qp); in create_umr_res() 1019 qp->device = &dev->ib_dev; in create_umr_res() 1020 qp->real_qp = qp; in create_umr_res() 1021 qp->uobject = NULL; in create_umr_res() 1022 qp->qp_type = MLX5_IB_QPT_REG_UMR; in create_umr_res() 1026 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | in create_umr_res() [all …]
|
D | Makefile | 3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
|
D | mr.c | 812 err = ib_post_send(umrc->qp, &wr, &bad); in reg_umr() 943 err = ib_post_send(umrc->qp, &wr, &bad); in mlx5_ib_update_mtt() 1139 err = ib_post_send(umrc->qp, &wr, &bad); in unreg_umr()
|
/linux-4.1.27/drivers/scsi/bnx2i/ |
D | bnx2i_hwi.c | 153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; in bnx2i_arm_cq_event_coalescing() 170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; in bnx2i_arm_cq_event_coalescing() 171 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing() 172 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing() 195 if (!bnx2i_conn->ep->qp.rqe_left) in bnx2i_get_rq_buf() 198 bnx2i_conn->ep->qp.rqe_left--; in bnx2i_get_rq_buf() 199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); in bnx2i_get_rq_buf() 200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { in bnx2i_get_rq_buf() 201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; in bnx2i_get_rq_buf() 202 bnx2i_conn->ep->qp.rq_cons_idx = 0; in bnx2i_get_rq_buf() [all …]
|
D | bnx2i.h | 760 struct qp_info qp; member
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_uverbs.c | 198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, in ehca_mmap_qp() argument 205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); in ehca_mmap_qp() 206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); in ehca_mmap_qp() 208 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 210 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); in ehca_mmap_qp() 217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, in ehca_mmap_qp() 218 &qp->mm_count_rqueue); in ehca_mmap_qp() 220 ehca_err(qp->ib_qp.device, in ehca_mmap_qp() 222 ret, qp->ib_qp.qp_num); in ehca_mmap_qp() [all …]
|
D | ehca_reqs.c | 154 static inline int ehca_write_swqe(struct ehca_qp *qp, in ehca_write_swqe() argument 164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; in ehca_write_swqe() 167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { in ehca_write_swqe() 170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); in ehca_write_swqe() 205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) in ehca_write_swqe() 220 switch (qp->qp_type) { in ehca_write_swqe() 227 remote_qkey = qp->qkey; in ehca_write_swqe() 232 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); in ehca_write_swqe() 236 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); in ehca_write_swqe() 254 if (qp->qp_type == IB_QPT_SMI || in ehca_write_swqe() [all …]
|
D | ehca_irq.c | 98 struct ehca_qp *qp = (struct ehca_qp *)data; in print_error_data() local 106 qp->ib_qp.qp_num, resource); in print_error_data() 179 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, in dispatch_qp_event() argument 185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed) in dispatch_qp_event() 191 if (qp->ext_type == EQPT_SRQ) { in dispatch_qp_event() 192 if (!qp->ib_srq.event_handler) in dispatch_qp_event() 195 event.element.srq = &qp->ib_srq; in dispatch_qp_event() 196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); in dispatch_qp_event() 198 if (!qp->ib_qp.event_handler) in dispatch_qp_event() 201 event.element.qp = &qp->ib_qp; in dispatch_qp_event() [all …]
|
D | ehca_iverbs.h | 100 int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 147 int ehca_destroy_qp(struct ib_qp *qp); 152 int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 155 int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, 158 int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 179 int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 181 int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 200 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
|
D | hipz_fns_core.h | 61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) in hipz_update_sqa() argument 64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, in hipz_update_sqa() 68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) in hipz_update_rqa() argument 71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, in hipz_update_rqa()
|
D | ehca_cq.c | 55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) in ehca_cq_assign_qp() argument 57 unsigned int qp_num = qp->real_qp_num; in ehca_cq_assign_qp() 62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); in ehca_cq_assign_qp() 76 struct ehca_qp *qp; in ehca_cq_unassign_qp() local 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_unassign_qp() 82 if (qp->real_qp_num == real_qp_num) { in ehca_cq_unassign_qp() 105 struct ehca_qp *qp; in ehca_cq_get_qp() local 107 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_get_qp() 108 if (qp->real_qp_num == real_qp_num) { in ehca_cq_get_qp() 109 ret = qp; in ehca_cq_get_qp()
|
D | ehca_classes.h | 230 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) argument 231 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ) argument 232 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE) argument 478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
|
D | ehca_qp.c | 400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) in ehca_add_to_err_list() argument 405 if (qp->ext_type == EQPT_LLQP) in ehca_add_to_err_list() 409 list = &qp->send_cq->sqp_err_list; in ehca_add_to_err_list() 410 node = &qp->sq_err_node; in ehca_add_to_err_list() 412 list = &qp->recv_cq->rqp_err_list; in ehca_add_to_err_list() 413 node = &qp->rq_err_node; in ehca_add_to_err_list() 1887 int ehca_query_qp(struct ib_qp *qp, in ehca_query_qp() argument 1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); in ehca_query_qp() 1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, in ehca_query_qp() 1900 ehca_err(qp->device, "Invalid attribute mask " in ehca_query_qp() [all …]
|
D | hcp_if.c | 583 struct ehca_qp *qp) in hipz_h_destroy_qp() argument 588 ret = hcp_galpas_dtor(&qp->galpas); in hipz_h_destroy_qp() 597 qp->ipz_qp_handle.handle, /* r6 */ in hipz_h_destroy_qp() 604 qp->ipz_qp_handle.handle, /* r5 */ in hipz_h_destroy_qp()
|
D | hcp_if.h | 161 struct ehca_qp *qp);
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1125 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_add_qpn_map() argument 1129 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { in ocrdma_add_qpn_map() 1130 dev->qp_tbl[qp->id] = qp; in ocrdma_add_qpn_map() 1136 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) in ocrdma_del_qpn_map() argument 1138 dev->qp_tbl[qp->id] = NULL; in ocrdma_del_qpn_map() 1214 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, in ocrdma_copy_qp_uresp() argument 1221 struct ocrdma_pd *pd = qp->pd; in ocrdma_copy_qp_uresp() 1227 uresp.qp_id = qp->id; in ocrdma_copy_qp_uresp() 1228 uresp.sq_dbid = qp->sq.dbid; in ocrdma_copy_qp_uresp() 1230 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); in ocrdma_copy_qp_uresp() [all …]
|
D | ocrdma_hw.c | 644 struct ocrdma_qp *qp) in ocrdma_process_qpcat_error() argument 649 if (qp == NULL) in ocrdma_process_qpcat_error() 651 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps); in ocrdma_process_qpcat_error() 657 struct ocrdma_qp *qp = NULL; in ocrdma_dispatch_ibevent() local 668 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; in ocrdma_dispatch_ibevent() 690 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 692 ocrdma_process_qpcat_error(dev, qp); in ocrdma_dispatch_ibevent() 695 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 699 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() 703 ib_evt.element.qp = &qp->ibqp; in ocrdma_dispatch_ibevent() [all …]
|
D | ocrdma_main.c | 566 struct ocrdma_qp *qp, **cur_qp; in ocrdma_close() local 576 qp = cur_qp[i]; in ocrdma_close() 577 if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { in ocrdma_close() 579 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); in ocrdma_close() 582 err_event.element.qp = &qp->ibqp; in ocrdma_close()
|
D | ocrdma_verbs.h | 75 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
|
/linux-4.1.27/drivers/infiniband/core/ |
D | verbs.c | 377 struct ib_qp *qp = context; in __ib_shared_qp_event_handler() local 380 spin_lock_irqsave(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler() 381 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler() 382 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler() 383 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler() 384 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); in __ib_shared_qp_event_handler() 387 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) in __ib_insert_xrcd_qp() argument 390 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); in __ib_insert_xrcd_qp() 398 struct ib_qp *qp; in __ib_open_qp() local 401 qp = kzalloc(sizeof *qp, GFP_KERNEL); in __ib_open_qp() [all …]
|
D | iwcm.c | 245 static int iwcm_modify_qp_err(struct ib_qp *qp) in iwcm_modify_qp_err() argument 249 if (!qp) in iwcm_modify_qp_err() 253 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_err() 260 static int iwcm_modify_qp_sqd(struct ib_qp *qp) in iwcm_modify_qp_sqd() argument 264 BUG_ON(qp == NULL); in iwcm_modify_qp_sqd() 266 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in iwcm_modify_qp_sqd() 286 struct ib_qp *qp = NULL; in iw_cm_disconnect() local 299 if (cm_id_priv->qp) in iw_cm_disconnect() 300 qp = cm_id_priv->qp; in iw_cm_disconnect() 325 if (qp) { in iw_cm_disconnect() [all …]
|
D | uverbs_cmd.c | 252 static void put_qp_read(struct ib_qp *qp) in put_qp_read() argument 254 put_uobj_read(qp->uobject); in put_qp_read() 257 static void put_qp_write(struct ib_qp *qp) in put_qp_write() argument 259 put_uobj_write(qp->uobject); in put_qp_write() 1481 tmp.qp_num = wc->qp->qp_num; in copy_wc_to_user() 1636 struct ib_qp *qp; in ib_uverbs_create_qp() local 1720 qp = ib_create_qp(pd, &attr); in ib_uverbs_create_qp() 1722 qp = device->create_qp(pd, &attr, &udata); in ib_uverbs_create_qp() 1724 if (IS_ERR(qp)) { in ib_uverbs_create_qp() 1725 ret = PTR_ERR(qp); in ib_uverbs_create_qp() [all …]
|
D | mad.c | 327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent() 341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, in ib_register_mad_agent() 364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent() 532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop() 702 static void build_smp_wc(struct ib_qp *qp, in build_smp_wc() argument 713 wc->qp = qp; in build_smp_wc() 782 build_smp_wc(mad_agent_priv->agent.qp, in handle_outgoing_dr_smp() 1100 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, in ib_send_mad() 1245 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, in ib_redirect_mad_qp() argument 1982 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) in ib_mad_recv_done_handler() [all …]
|
D | uverbs_main.c | 192 static void ib_uverbs_detach_umcast(struct ib_qp *qp, in ib_uverbs_detach_umcast() argument 198 ib_detach_mcast(qp, &mcast->gid, mcast->lid); in ib_uverbs_detach_umcast() 240 struct ib_qp *qp = uobj->object; in ib_uverbs_cleanup_ucontext() local 245 if (qp != qp->real_qp) { in ib_uverbs_cleanup_ucontext() 246 ib_close_qp(qp); in ib_uverbs_cleanup_ucontext() 248 ib_uverbs_detach_umcast(qp, uqp); in ib_uverbs_cleanup_ucontext() 249 ib_destroy_qp(qp); in ib_uverbs_cleanup_ucontext() 522 if (!event->element.qp->uobject || !event->element.qp->uobject->live) in ib_uverbs_qp_event_handler() 525 uobj = container_of(event->element.qp->uobject, in ib_uverbs_qp_event_handler()
|
D | cma.c | 520 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_ud_qp() argument 530 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in cma_init_ud_qp() 535 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); in cma_init_ud_qp() 541 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); in cma_init_ud_qp() 546 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_conn_qp() argument 556 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); in cma_init_conn_qp() 563 struct ib_qp *qp; in rdma_create_qp() local 570 qp = ib_create_qp(pd, qp_init_attr); in rdma_create_qp() 571 if (IS_ERR(qp)) in rdma_create_qp() 572 return PTR_ERR(qp); in rdma_create_qp() [all …]
|
D | core_priv.h | 52 int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
|
D | iwcm.h | 50 struct ib_qp *qp; member
|
D | mad_priv.h | 187 struct ib_qp *qp; member
|
D | agent.c | 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); in agent_send_response()
|
D | mad_rmpp.c | 160 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, in alloc_response_msg() 290 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, in create_rmpp_recv()
|
D | sa_query.c | 424 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); in update_sm_ah()
|
D | user_mad.c | 501 ah = ib_create_ah(agent->qp->pd, &ah_attr); in ib_umad_write()
|
D | cm.c | 263 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); in cm_alloc_msg() 293 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, in cm_alloc_response_msg()
|
/linux-4.1.27/include/linux/ |
D | ntb.h | 68 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 70 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 75 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); 76 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 80 void ntb_transport_free_queue(struct ntb_transport_qp *qp); 81 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 83 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 85 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); 86 void ntb_transport_link_up(struct ntb_transport_qp *qp); 87 void ntb_transport_link_down(struct ntb_transport_qp *qp); [all …]
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 73 struct mlx5_core_qp *qp; in mlx5_rsc_event() local 80 qp = (struct mlx5_core_qp *)common; in mlx5_rsc_event() 81 qp->event(qp, event_type); in mlx5_rsc_event() 97 struct mlx5_core_qp *qp = in mlx5_eq_pagefault() local 101 if (!qp) { in mlx5_eq_pagefault() 165 if (qp->pfault_handler) { in mlx5_eq_pagefault() 166 qp->pfault_handler(qp, &pfault); in mlx5_eq_pagefault() 181 struct mlx5_core_qp *qp, in mlx5_core_create_qp() argument 206 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; in mlx5_core_create_qp() 207 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); in mlx5_core_create_qp() [all …]
|
D | debugfs.c | 277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, in qp_read_field() argument 290 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); in qp_read_field() 300 param = qp->pid; in qp_read_field() 536 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) in mlx5_debug_qp_add() argument 544 &qp->dbg, qp->qpn, qp_fields, in mlx5_debug_qp_add() 545 ARRAY_SIZE(qp_fields), qp); in mlx5_debug_qp_add() 547 qp->dbg = NULL; in mlx5_debug_qp_add() 552 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) in mlx5_debug_qp_remove() argument 557 if (qp->dbg) in mlx5_debug_qp_remove() 558 rem_res_tree(qp->dbg); in mlx5_debug_qp_remove()
|
D | Makefile | 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
/linux-4.1.27/drivers/net/ |
D | ntb_netdev.c | 65 struct ntb_transport_qp *qp; member 79 ntb_transport_link_query(dev->qp)); in ntb_netdev_event_handler() 86 if (!ntb_transport_link_query(dev->qp)) in ntb_netdev_event_handler() 96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, in ntb_netdev_rx_handler() argument 128 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler() 136 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, in ntb_netdev_tx_handler() argument 165 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); in ntb_netdev_start_xmit() 191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, in ntb_netdev_open() 200 ntb_transport_link_up(dev->qp); in ntb_netdev_open() 205 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) in ntb_netdev_open() [all …]
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 377 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, in ehea_qp_alloc_register() argument 398 qp->fw_handle, rpage, 1); in ehea_qp_alloc_register() 422 struct ehea_qp *qp; in ehea_create_qp() local 427 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in ehea_create_qp() 428 if (!qp) in ehea_create_qp() 431 qp->adapter = adapter; in ehea_create_qp() 434 &qp->fw_handle, &qp->epas); in ehea_create_qp() 445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, in ehea_create_qp() 454 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, in ehea_create_qp() 465 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, in ehea_create_qp() [all …]
|
D | ehea_hw.h | 218 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_sqa() argument 220 struct h_epa epa = qp->epas.kernel; in ehea_update_sqa() 225 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq3a() argument 227 struct h_epa epa = qp->epas.kernel; in ehea_update_rq3a() 232 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq2a() argument 234 struct h_epa epa = qp->epas.kernel; in ehea_update_rq2a() 239 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) in ehea_update_rq1a() argument 241 struct h_epa epa = qp->epas.kernel; in ehea_update_rq1a()
|
D | ehea_qmr.h | 306 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, in ehea_get_next_rwqe() argument 312 queue = &qp->hw_rqueue1; in ehea_get_next_rwqe() 314 queue = &qp->hw_rqueue2; in ehea_get_next_rwqe() 316 queue = &qp->hw_rqueue3; in ehea_get_next_rwqe() 339 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) in ehea_poll_rq1() argument 341 struct hw_queue *queue = &qp->hw_rqueue1; in ehea_poll_rq1() 352 static inline void ehea_inc_rq1(struct ehea_qp *qp) in ehea_inc_rq1() argument 354 hw_qeit_inc(&qp->hw_rqueue1); in ehea_inc_rq1() 386 int ehea_destroy_qp(struct ehea_qp *qp);
|
D | ehea_main.c | 207 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles() 430 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1() 450 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1() 458 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def() local 500 rwqe = ehea_get_next_rwqe(qp, rq_nr); in ehea_refill_rq_def() 520 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def() 522 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def() 654 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error() 669 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes() local 685 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes() [all …]
|
D | ehea.h | 363 struct ehea_qp *qp; member
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | qp.c | 52 struct mlx4_qp *qp; in mlx4_qp_event() local 56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event() 57 if (qp) in mlx4_qp_event() 58 atomic_inc(&qp->refcount); in mlx4_qp_event() 62 if (!qp) { in mlx4_qp_event() 67 qp->event(qp, event_type); in mlx4_qp_event() 69 if (atomic_dec_and_test(&qp->refcount)) in mlx4_qp_event() 70 complete(&qp->free); in mlx4_qp_event() 74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) in is_master_qp0() argument 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0() [all …]
|
D | mcg.c | 219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry() 356 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; in promisc_steering_entry() 478 if ((be32_to_cpu(mgm->qp[i]) & in add_promisc_qp() 502 mgm->qp[members_count++] = in add_promisc_qp() 526 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in add_promisc_qp() 592 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); in remove_promisc_qp() 637 if ((be32_to_cpu(mgm->qp[i]) & in remove_promisc_qp() 653 mgm->qp[loc] = mgm->qp[members_count - 1]; in remove_promisc_qp() 654 mgm->qp[members_count - 1] = 0; in remove_promisc_qp() 1097 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], in mlx4_qp_attach_common() argument [all …]
|
D | resource_tracker.c | 441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - in mlx4_init_quotas() 451 dev->quotas.qp = in mlx4_init_quotas() 1267 enum res_qp_states state, struct res_qp **qp, in qp_res_start_move_to() argument 1321 if (qp) in qp_res_start_move_to() 1322 *qp = r; in qp_res_start_move_to() 2715 struct res_qp *qp; in mlx4_RST2INIT_QP_wrapper() local 2728 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); in mlx4_RST2INIT_QP_wrapper() 2731 qp->local_qpn = local_qpn; in mlx4_RST2INIT_QP_wrapper() 2732 qp->sched_queue = 0; in mlx4_RST2INIT_QP_wrapper() 2733 qp->param3 = 0; in mlx4_RST2INIT_QP_wrapper() [all …]
|
D | Makefile | 4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
|
D | en_tx.c | 123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring() 128 ring->qp.event = mlx4_en_sqp_event; in mlx4_en_create_tx_ring() 182 mlx4_qp_remove(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring() 183 mlx4_qp_free(mdev->dev, &ring->qp); in mlx4_en_destroy_tx_ring() 210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring() 219 &ring->qp, &ring->qp_state); in mlx4_en_activate_tx_ring() 233 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); in mlx4_en_deactivate_tx_ring()
|
D | en_resources.c | 115 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) in mlx4_en_sqp_event() argument
|
D | en_rx.c | 1111 struct mlx4_qp *qp) in mlx4_en_config_rss_qp() argument 1121 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp() 1126 qp->event = mlx4_en_sqp_event; in mlx4_en_config_rss_qp() 1143 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); in mlx4_en_config_rss_qp() 1145 mlx4_qp_remove(mdev->dev, qp); in mlx4_en_config_rss_qp() 1146 mlx4_qp_free(mdev->dev, qp); in mlx4_en_config_rss_qp()
|
D | mlx4.h | 607 __be32 qp[MLX4_MAX_QP_PER_MGM]; member 1275 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1277 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1280 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
D | mlx4_en.h | 277 struct mlx4_qp qp; member 795 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
|
D | en_netdev.c | 502 struct mlx4_qp qp; in mlx4_en_uc_steer_add() local 505 qp.qpn = *qpn; in mlx4_en_uc_steer_add() 509 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); in mlx4_en_uc_steer_add() 553 struct mlx4_qp qp; in mlx4_en_uc_steer_release() local 556 qp.qpn = qpn; in mlx4_en_uc_steer_release() 560 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); in mlx4_en_uc_steer_release()
|
D | fw.c | 2457 struct mlx4_qp qp; in mlx4_opreq_action() local 2504 qp.qpn = be32_to_cpu(mgm->qp[i]); in mlx4_opreq_action() 2506 err = mlx4_multicast_detach(dev, &qp, in mlx4_opreq_action() 2510 err = mlx4_multicast_attach(dev, &qp, in mlx4_opreq_action()
|
D | eq.c | 497 be32_to_cpu(eqe->event.qp.qpn) in mlx4_eq_int() 512 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & in mlx4_eq_int()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | device.c | 230 struct c4iw_qp *qp = p; in dump_qp() local 235 if (id != qp->wq.sq.qid) in dump_qp() 242 if (qp->ep) { in dump_qp() 243 if (qp->ep->com.local_addr.ss_family == AF_INET) { in dump_qp() 245 &qp->ep->com.local_addr; in dump_qp() 247 &qp->ep->com.remote_addr; in dump_qp() 249 &qp->ep->com.mapped_local_addr; in dump_qp() 251 &qp->ep->com.mapped_remote_addr; in dump_qp() 257 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 258 (int)qp->attr.state, in dump_qp() [all …]
|
D | resource.c | 43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table() 44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table() 45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table() 48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table() 49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table()
|
D | cm.c | 153 c4iw_qp_rem_ref(&ep->com.qp->ibqp); in deref_qp() 160 c4iw_qp_add_ref(&ep->com.qp->ibqp); in ref_qp() 1472 err = c4iw_modify_qp(ep->com.qp->rhp, in process_mpa_reply() 1473 ep->com.qp, mask, &attrs, 1); in process_mpa_reply() 1487 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply() 1507 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply() 1704 BUG_ON(!ep->com.qp); in rx_data() 1708 __func__, ep->com.qp->wq.sq.qid, ep, in rx_data() 1711 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in rx_data() 1967 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); in c4iw_reconnect() [all …]
|
D | Makefile | 5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
|
D | iw_cxgb4.h | 798 struct c4iw_qp *qp; member 962 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 969 void c4iw_qp_add_ref(struct ib_qp *qp); 970 void c4iw_qp_rem_ref(struct ib_qp *qp);
|
D | qp.c | 669 void c4iw_qp_add_ref(struct ib_qp *qp) in c4iw_qp_add_ref() argument 671 PDBG("%s ib_qp %p\n", __func__, qp); in c4iw_qp_add_ref() 672 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); in c4iw_qp_add_ref() 675 void c4iw_qp_rem_ref(struct ib_qp *qp) in c4iw_qp_rem_ref() argument 677 PDBG("%s ib_qp %p\n", __func__, qp); in c4iw_qp_rem_ref() 678 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) in c4iw_qp_rem_ref() 679 wake_up(&(to_c4iw_qp(qp)->wait)); in c4iw_qp_rem_ref() 927 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) in c4iw_bind_mw() argument
|
D | ev.c | 109 event.element.qp = &qhp->ibqp; in post_qp_event()
|
D | provider.c | 321 props->max_qp = dev->rdev.lldi.vr->qp.size / 2; in c4iw_query_device() 329 props->max_cq = dev->rdev.lldi.vr->qp.size; in c4iw_query_device()
|
D | cq.c | 711 wc->qp = &qhp->ibqp; in c4iw_poll_cq_one()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 60 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); in ipoib_mcast_attach() 68 ret = ib_attach_mcast(priv->qp, mgid, mlid); in ipoib_mcast_attach() 96 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 105 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 115 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); in ipoib_init_qp() 125 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) in ipoib_init_qp() 213 priv->qp = ib_create_qp(priv->pd, &init_attr); in ipoib_transport_dev_init() 214 if (IS_ERR(priv->qp)) { in ipoib_transport_dev_init() 219 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; in ipoib_transport_dev_init() 220 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; in ipoib_transport_dev_init() [all …]
|
D | ipoib_cm.c | 128 ret = ib_post_recv(rx->qp, wr, &bad_wr); in ipoib_cm_post_receive_nonsrq() 226 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) in ipoib_cm_start_rx_drain() 273 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 286 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 298 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 318 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); in ipoib_cm_modify_rx_qp() 420 struct ib_qp *qp, struct ib_cm_req_event_param *req, in ipoib_cm_send_rep() argument 427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep() 435 rep.qp_num = qp->qp_num; in ipoib_cm_send_rep() 459 p->qp = ipoib_cm_create_rx_qp(dev, p); in ipoib_cm_req_handler() [all …]
|
D | ipoib_ib.c | 113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); in ipoib_ib_post_receive() 210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) in ipoib_ib_handle_rx_wc() 345 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); in ipoib_qp_state_validate_work() 352 __func__, priv->qp->qp_num, qp_attr.qp_state); in ipoib_qp_state_validate_work() 358 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); in ipoib_qp_state_validate_work() 361 ret, priv->qp->qp_num); in ipoib_qp_state_validate_work() 365 __func__, priv->qp->qp_num); in ipoib_qp_state_validate_work() 368 priv->qp->qp_num, qp_attr.qp_state); in ipoib_qp_state_validate_work() 414 __func__, priv->qp->qp_num); in ipoib_ib_handle_tx_wc() 547 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); in post_send() [all …]
|
D | ipoib.h | 226 struct ib_qp *qp; member 237 struct ib_qp *qp; member 348 struct ib_qp *qp; member
|
D | ipoib_multicast.c | 670 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, in ipoib_mcast_leave()
|
/linux-4.1.27/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 221 struct qp_entry qp; member 239 struct qp_entry qp; member 914 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); in qp_guest_handle_to_entry() local 916 entry = qp ? container_of( in qp_guest_handle_to_entry() 917 qp, struct qp_guest_endpoint, qp) : NULL; in qp_guest_handle_to_entry() 928 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); in qp_broker_handle_to_entry() local 930 entry = qp ? container_of( in qp_broker_handle_to_entry() 931 qp, struct qp_broker_entry, qp) : NULL; in qp_broker_handle_to_entry() 986 entry->qp.peer = peer; in qp_guest_endpoint_create() 987 entry->qp.flags = flags; in qp_guest_endpoint_create() [all …]
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_debugfs.c | 2002 struct lpfc_queue *qp = NULL; in lpfc_idiag_queinfo_read() local 2024 qp = phba->sli4_hba.hba_eq[x]; in lpfc_idiag_queinfo_read() 2025 if (!qp) in lpfc_idiag_queinfo_read() 2033 qp->q_cnt_1, qp->q_cnt_2, in lpfc_idiag_queinfo_read() 2034 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); in lpfc_idiag_queinfo_read() 2041 qp->queue_id, in lpfc_idiag_queinfo_read() 2042 qp->entry_count, in lpfc_idiag_queinfo_read() 2043 qp->entry_size, in lpfc_idiag_queinfo_read() 2044 qp->host_index, in lpfc_idiag_queinfo_read() 2045 qp->hba_index); in lpfc_idiag_queinfo_read() [all …]
|
/linux-4.1.27/lib/mpi/ |
D | mpih-div.c | 58 mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, in mpihelp_divrem() argument 87 qp += qextra_limbs; in mpihelp_divrem() 89 udiv_qrnnd(qp[i], n1, n1, np[i], d); in mpihelp_divrem() 90 qp -= qextra_limbs; in mpihelp_divrem() 93 udiv_qrnnd(qp[i], n1, n1, 0, d); in mpihelp_divrem() 135 qp[i] = q; in mpihelp_divrem() 156 qp[i] = q; in mpihelp_divrem() 229 qp[i] = q; in mpihelp_divrem()
|
D | mpi-internal.h | 224 mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | sunqe.h | 298 #define TX_BUFFS_AVAIL(qp) \ argument 299 (((qp)->tx_old <= (qp)->tx_new) ? \ 300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ 301 (qp)->tx_old - (qp)->tx_new - 1)
|
D | sunhme.c | 2139 struct quattro *qp = (struct quattro *) cookie; in quattro_sbus_interrupt() local 2143 struct net_device *dev = qp->happy_meals[i]; in quattro_sbus_interrupt() 2558 struct quattro *qp; in quattro_sbus_find() local 2561 qp = platform_get_drvdata(op); in quattro_sbus_find() 2562 if (qp) in quattro_sbus_find() 2563 return qp; in quattro_sbus_find() 2565 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); in quattro_sbus_find() 2566 if (qp != NULL) { in quattro_sbus_find() 2570 qp->happy_meals[i] = NULL; in quattro_sbus_find() 2572 qp->quattro_dev = child; in quattro_sbus_find() [all …]
|
D | sunqe.c | 937 struct sunqe *qp = platform_get_drvdata(op); in qec_sbus_remove() local 938 struct net_device *net_dev = qp->dev; in qec_sbus_remove() 942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); in qec_sbus_remove() 943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); in qec_sbus_remove() 945 qp->qe_block, qp->qblock_dvma); in qec_sbus_remove() 947 qp->buffers, qp->buffers_dvma); in qec_sbus_remove()
|
/linux-4.1.27/drivers/scsi/sym53c8xx_2/ |
D | sym_misc.h | 159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ argument 160 for (qp = (head)->flink; qp != (head); qp = qp->flink)
|
D | sym_hipd.c | 1542 SYM_QUEHEAD *qp; local 1556 qp = sym_remque_head(&lp->waiting_ccbq); 1557 if (!qp) 1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); 1563 sym_insque_head(qp, &lp->waiting_ccbq); 1572 sym_insque_head(qp, &lp->waiting_ccbq); 1581 sym_insque_tail(qp, &lp->started_ccbq); 1643 SYM_QUEHEAD *qp; local 1646 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { 1648 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); [all …]
|
D | sym_glue.c | 598 SYM_QUEHEAD *qp; in sym_eh_handler() local 637 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { in sym_eh_handler() 638 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); in sym_eh_handler()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | kprobes.c | 194 int qp; in unsupported_inst() local 196 qp = kprobe_inst & 0x3f; in unsupported_inst() 198 if (slot == 1 && qp) { in unsupported_inst() 205 qp = 0; in unsupported_inst() 237 if (slot == 1 && qp) { in unsupported_inst() 243 qp = 0; in unsupported_inst() 272 if (slot == 1 && qp) { in unsupported_inst() 279 qp = 0; in unsupported_inst() 284 if (slot == 1 && qp) { in unsupported_inst() 290 qp = 0; in unsupported_inst() [all …]
|
D | unwind_decoder.c | 122 unsigned char byte1, byte2, abreg, qp; in unw_decode_x3() local 129 qp = (byte1 & 0x3f); in unw_decode_x3() 133 UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); in unw_decode_x3() 135 UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); in unw_decode_x3() 142 unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; in unw_decode_x4() local 148 qp = (byte1 & 0x3f); in unw_decode_x4() 154 UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); in unw_decode_x4() 156 UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); in unw_decode_x4()
|
D | brl_emu.c | 58 unsigned long opcode, btype, qp, offset, cpl; in ia64_emulate_brl() local 83 qp = ((bundle[1] >> 23) & 0x3f); in ia64_emulate_brl() 89 tmp_taken = regs->pr & (1L << qp); in ia64_emulate_brl()
|
D | paravirt_patch.c | 31 unsigned long long qp : 6; member 355 unsigned long qp: 6; member 414 unsigned long qp: 6; member
|
D | unwind.c | 1071 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) in desc_is_active() argument 1075 if (qp > 0) { in desc_is_active() 1076 if ((sr->pr_val & (1UL << qp)) == 0) in desc_is_active() 1078 sr->pr_mask |= (1UL << qp); in desc_is_active() 1084 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) in desc_restore_p() argument 1088 if (!desc_is_active(qp, t, sr)) in desc_restore_p() 1098 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, in desc_spill_reg_p() argument 1104 if (!desc_is_active(qp, t, sr)) in desc_spill_reg_p() 1119 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, in desc_spill_psprel_p() argument 1124 if (!desc_is_active(qp, t, sr)) in desc_spill_psprel_p() [all …]
|
D | unaligned.c | 166 unsigned long qp:6; /* [0:5] */ member 1385 "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm, in ia64_handle_unaligned()
|
/linux-4.1.27/drivers/crypto/ |
D | n2_core.c | 233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 236 qp->head != qp->tail) 477 static unsigned long wait_for_tail(struct spu_queue *qp) in wait_for_tail() argument 482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); in wait_for_tail() 487 if (head == qp->tail) { in wait_for_tail() 488 qp->head = head; in wait_for_tail() 495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, in submit_and_wait_for_tail() argument 498 unsigned long hv_ret = spu_queue_submit(qp, ent); in submit_and_wait_for_tail() 501 hv_ret = wait_for_tail(qp); in submit_and_wait_for_tail() 514 struct spu_queue *qp; in n2_do_async_digest() local [all …]
|
/linux-4.1.27/include/rdma/ |
D | ib_verbs.h | 419 struct ib_qp *qp; member 701 struct ib_qp *qp; member 1406 struct ib_qp *qp; member 1549 int (*modify_qp)(struct ib_qp *qp, 1553 int (*query_qp)(struct ib_qp *qp, 1557 int (*destroy_qp)(struct ib_qp *qp); 1558 int (*post_send)(struct ib_qp *qp, 1561 int (*post_recv)(struct ib_qp *qp, 1619 int (*bind_mw)(struct ib_qp *qp, 1631 int (*attach_mcast)(struct ib_qp *qp, [all …]
|
D | iw_cm.h | 105 void (*add_ref)(struct ib_qp *qp); 107 void (*rem_ref)(struct ib_qp *qp); 159 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
|
D | ib_mad.h | 368 struct ib_qp *qp; member 582 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
|
D | rdma_cm.h | 150 struct ib_qp *qp; member
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.c | 681 ep->com.qp = NULL; in close_complete_upcall() 713 ep->com.qp = NULL; in peer_abort_upcall() 742 ep->com.qp = NULL; in connect_reply_upcall() 925 err = iwch_modify_qp(ep->com.qp->rhp, in process_mpa_reply() 926 ep->com.qp, mask, &attrs, 1); in process_mpa_reply() 930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { in process_mpa_reply() 1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close() 1497 if (ep->com.cm_id && ep->com.qp) { in peer_close() 1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close() 1592 if (ep->com.cm_id && ep->com.qp) { in peer_abort() [all …]
|
D | iwch_provider.h | 182 void iwch_qp_add_ref(struct ib_qp *qp); 183 void iwch_qp_rem_ref(struct ib_qp *qp); 331 int iwch_bind_mw(struct ib_qp *qp,
|
D | iwch_cm.h | 156 struct iwch_qp *qp; member
|
D | iwch_provider.c | 1085 void iwch_qp_add_ref(struct ib_qp *qp) in iwch_qp_add_ref() argument 1087 PDBG("%s ib_qp %p\n", __func__, qp); in iwch_qp_add_ref() 1088 atomic_inc(&(to_iwch_qp(qp)->refcnt)); in iwch_qp_add_ref() 1091 void iwch_qp_rem_ref(struct ib_qp *qp) in iwch_qp_rem_ref() argument 1093 PDBG("%s ib_qp %p\n", __func__, qp); in iwch_qp_rem_ref() 1094 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) in iwch_qp_rem_ref() 1095 wake_up(&(to_iwch_qp(qp)->wait)); in iwch_qp_rem_ref()
|
D | iwch_cq.c | 82 wc->qp = &qhp->ibqp; in iwch_poll_cq_one()
|
D | iwch_ev.c | 93 event.element.qp = &qhp->ibqp; in post_qp_event()
|
D | iwch_qp.c | 528 int iwch_bind_mw(struct ib_qp *qp, in iwch_bind_mw() argument 546 qhp = to_iwch_qp(qp); in iwch_bind_mw() 768 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); in iwch_post_zb_read()
|
/linux-4.1.27/drivers/scsi/ |
D | qlogicpti.h | 504 #define for_each_qlogicpti(qp) \ argument 505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
|
D | ncr53c8xx.c | 4430 struct list_head *qp; in ncr_start_next_ccb() local 4437 qp = ncr_list_pop(&lp->wait_ccbq); in ncr_start_next_ccb() 4438 if (!qp) in ncr_start_next_ccb() 4441 cp = list_entry(qp, struct ccb, link_ccbq); in ncr_start_next_ccb() 4442 list_add_tail(qp, &lp->busy_ccbq); in ncr_start_next_ccb() 6546 struct list_head *qp; in ncr_sir_to_redo() local 6560 qp = lp->busy_ccbq.prev; in ncr_sir_to_redo() 6561 while (qp != &lp->busy_ccbq) { in ncr_sir_to_redo() 6562 cp2 = list_entry(qp, struct ccb, link_ccbq); in ncr_sir_to_redo() 6563 qp = qp->prev; in ncr_sir_to_redo() [all …]
|
/linux-4.1.27/include/linux/mlx5/ |
D | qp.h | 595 struct mlx5_core_qp *qp, 601 struct mlx5_core_qp *qp); 603 struct mlx5_core_qp *qp); 604 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 611 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 612 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
|
/linux-4.1.27/arch/sparc/kernel/ |
D | ds.c | 995 struct ds_queue_entry *qp, *tmp; in process_ds_work() local 1003 list_for_each_entry_safe(qp, tmp, &todo, list) { in process_ds_work() 1004 struct ds_data *dpkt = (struct ds_data *) qp->req; in process_ds_work() 1005 struct ds_info *dp = qp->dp; in process_ds_work() 1007 int req_len = qp->req_len; in process_ds_work() 1021 list_del(&qp->list); in process_ds_work() 1022 kfree(qp); in process_ds_work() 1048 struct ds_queue_entry *qp; in ds_data() local 1050 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); in ds_data() 1051 if (!qp) { in ds_data() [all …]
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 508 ib_conn->qp = ib_conn->cma_id->qp; in iser_create_ib_conn_res() 511 ib_conn->cma_id->qp); in iser_create_ib_conn_res() 626 iser_conn, ib_conn->cma_id, ib_conn->qp); in iser_free_ib_conn_res() 628 if (ib_conn->qp != NULL) { in iser_free_ib_conn_res() 631 ib_conn->qp = NULL; in iser_free_ib_conn_res() 712 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); in iser_conn_terminate() 835 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); in iser_connected_handler() 836 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); in iser_connected_handler() 1020 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); in iser_post_recvl() 1049 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); in iser_post_recvm() [all …]
|
D | iscsi_iser.h | 432 struct ib_qp *qp; member
|
D | iser_memory.c | 693 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); in iser_reg_sig_mr() 769 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); in iser_fast_reg_mr()
|
/linux-4.1.27/net/9p/ |
D | trans_rdma.c | 95 struct ib_qp *qp; member 388 if (rdma->qp && !IS_ERR(rdma->qp)) in rdma_destroy_trans() 389 ib_destroy_qp(rdma->qp); in rdma_destroy_trans() 425 return ib_post_recv(rdma->qp, &wr, &bad_wr); in post_recv() 529 err = ib_post_send(rdma->qp, &wr, &bad_wr); in rdma_request() 746 rdma->qp = rdma->cm_id->qp; in rdma_create_trans()
|
/linux-4.1.27/include/linux/mlx4/ |
D | qp.h | 459 int sqd_event, struct mlx4_qp *qp); 461 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 466 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); 473 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
|
D | device.h | 786 int qp; member 852 } __packed qp; member 1069 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 1071 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 1082 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1084 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1086 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1089 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 30 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 44 int usnic_ib_destroy_qp(struct ib_qp *qp);
|
D | usnic_ib_verbs.c | 351 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, in usnic_ib_query_qp() argument 364 qp_grp = to_uqp_grp(qp); in usnic_ib_query_qp() 524 int usnic_ib_destroy_qp(struct ib_qp *qp) in usnic_ib_destroy_qp() argument 531 qp_grp = to_uqp_grp(qp); in usnic_ib_destroy_qp()
|
D | usnic_ib_qp_grp.c | 478 ib_event.element.qp = &qp_grp->ibqp; in usnic_ib_qp_grp_modify()
|
/linux-4.1.27/drivers/atm/ |
D | firestream.c | 631 static int qp; variable 651 pq[qp].cmd = cmd; in submit_queue() 652 pq[qp].p0 = p1; in submit_queue() 653 pq[qp].p1 = p2; in submit_queue() 654 pq[qp].p2 = p3; in submit_queue() 655 qp++; in submit_queue() 656 if (qp >= 60) qp = 0; in submit_queue() 1950 i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); in firestream_remove_one() 1951 qp++; in firestream_remove_one() 1952 if (qp >= 60) qp = 0; in firestream_remove_one()
|
/linux-4.1.27/Documentation/RCU/ |
D | trace.txt | 59 …0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=7457… 60 …1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123… 61 …2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=8013… 62 …3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=15… 63 …4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 … 64 …5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 c… 65 …6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607… 66 …7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115… 101 o "qp" indicates that RCU still expects a quiescent state from 103 well have qp=1, which is OK: RCU is still ignoring them. [all …]
|
/linux-4.1.27/drivers/media/pci/solo6x10/ |
D | solo6x10-enc.c | 182 unsigned int qp) in solo_s_jpeg_qp() argument 187 if ((ch > 31) || (qp > 3)) in solo_s_jpeg_qp() 206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; in solo_s_jpeg_qp()
|
D | solo6x10.h | 157 u8 mode, gop, qp, interlaced, interval; member 402 unsigned int qp);
|
D | solo6x10-v4l2-enc.c | 261 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); in solo_enc_on() 266 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); in solo_enc_on() 1097 solo_enc->qp = ctrl->val; in solo_s_ctrl() 1098 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp); in solo_s_ctrl() 1099 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp); in solo_s_ctrl() 1283 solo_enc->qp = SOLO_DEFAULT_QP; in solo_enc_alloc()
|
/linux-4.1.27/mm/ |
D | mempolicy.c | 490 struct queue_pages *qp = walk->private; in queue_pages_pte_range() local 491 unsigned long flags = qp->flags; in queue_pages_pte_range() 514 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) in queue_pages_pte_range() 518 migrate_page_add(page, qp->pagelist, flags); in queue_pages_pte_range() 530 struct queue_pages *qp = walk->private; in queue_pages_hugetlb() local 531 unsigned long flags = qp->flags; in queue_pages_hugetlb() 543 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) in queue_pages_hugetlb() 548 isolate_huge_page(page, qp->pagelist); in queue_pages_hugetlb() 590 struct queue_pages *qp = walk->private; in queue_pages_test_walk() local 592 unsigned long flags = qp->flags; in queue_pages_test_walk() [all …]
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 260 struct ib_qp *qp) in srp_init_qp() argument 281 ret = ib_modify_qp(qp, attr, in srp_init_qp() 476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); in srp_destroy_qp() 482 ret = ib_post_recv(ch->qp, &wr, &bad_wr); in srp_destroy_qp() 488 ib_destroy_qp(ch->qp); in srp_destroy_qp() 497 struct ib_qp *qp; in srp_create_ch_ib() local 534 qp = ib_create_qp(dev->pd, init_attr); in srp_create_ch_ib() 535 if (IS_ERR(qp)) { in srp_create_ch_ib() 536 ret = PTR_ERR(qp); in srp_create_ch_ib() 540 ret = srp_init_qp(target, qp); in srp_create_ch_ib() [all …]
|
D | ib_srp.h | 147 struct ib_qp *qp; member
|
/linux-4.1.27/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, in srpt_mad_recv_handler() 830 ret = ib_post_send(ch->qp, &wr, &bad_wr); in srpt_post_send() 951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_init_ch_qp() argument 966 ret = ib_modify_qp(qp, attr, in srpt_init_ch_qp() 985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_ch_qp_rtr() argument 998 ret = ib_modify_qp(qp, &qp_attr, attr_mask); in srpt_ch_qp_rtr() 1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) in srpt_ch_qp_rts() argument 1028 ret = ib_modify_qp(qp, &qp_attr, attr_mask); in srpt_ch_qp_rts() 1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); in srpt_ch_qp_err() 2056 ch->qp = ib_create_qp(sdev->pd, qp_init); in srpt_create_ch_ib() [all …]
|
D | ib_srpt.h | 306 struct ib_qp *qp; member
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | kprobes.h | 41 unsigned long long qp : 6; member
|
/linux-4.1.27/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 180 return cma_id->qp; in isert_create_qp() 190 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); in isert_conn_setup_qp() 191 if (IS_ERR(isert_conn->qp)) { in isert_conn_setup_qp() 192 ret = PTR_ERR(isert_conn->qp); in isert_conn_setup_qp() 812 if (isert_conn->qp) { in isert_connect_release() 813 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; in isert_connect_release() 816 ib_destroy_qp(isert_conn->qp); in isert_connect_release() 830 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connected_handler() 931 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_disconnected_handler() 960 struct isert_conn *isert_conn = cma_id->qp->qp_context; in isert_connect_error() [all …]
|
D | ib_isert.h | 172 struct ib_qp *qp; member
|
/linux-4.1.27/net/sched/ |
D | sch_api.c | 142 struct Qdisc_ops *q, **qp; in register_qdisc() local 146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc() 172 *qp = qops; in register_qdisc() 186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local 190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc() 194 *qp = q->next; in unregister_qdisc()
|
/linux-4.1.27/net/rds/ |
D | ib_cm.c | 116 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); in rds_ib_tune_rnr() 174 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); in rds_ib_cm_connect_complete() 640 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_ib_conn_shutdown() 692 if (ic->i_cm_id->qp) in rds_ib_conn_shutdown()
|
D | iw_cm.c | 582 ic->i_cm_id ? ic->i_cm_id->qp : NULL); in rds_iw_conn_shutdown() 597 if (ic->i_cm_id->qp) { in rds_iw_conn_shutdown() 599 ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); in rds_iw_conn_shutdown() 629 if (ic->i_cm_id->qp) in rds_iw_conn_shutdown()
|
D | iw_rdma.c | 725 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); 738 if (!ibmr->cm_id->qp || !ibmr->mr) 748 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
|
D | ib_send.c | 757 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); in rds_ib_xmit() 854 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); in rds_ib_xmit_atomic() 991 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); in rds_ib_xmit_rdma()
|
D | iw_recv.c | 251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); in rds_iw_recv_refill() 448 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); in rds_iw_send_ack()
|
D | ib_recv.c | 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); in rds_ib_recv_refill() 617 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); in rds_ib_send_ack()
|
D | iw_send.c | 747 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); in rds_iw_xmit() 952 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); in rds_iw_xmit_rdma()
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
D | file.c | 2212 struct mfc_cq_sr *qp, *spuqp; in __spufs_dma_info_read() local 2221 qp = &info.dma_info_command_data[i]; in __spufs_dma_info_read() 2224 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; in __spufs_dma_info_read() 2225 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; in __spufs_dma_info_read() 2226 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; in __spufs_dma_info_read() 2227 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; in __spufs_dma_info_read() 2264 struct mfc_cq_sr *qp, *puqp; in __spufs_proxydma_info_read() local 2278 qp = &info.proxydma_info_command_data[i]; in __spufs_proxydma_info_read() 2281 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; in __spufs_proxydma_info_read() 2282 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; in __spufs_proxydma_info_read() [all …]
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | verbs.c | 447 ib_query_qp(ia->ri_id->qp, attr, in rpcrdma_conn_upcall() 706 if (ia->ri_id->qp) in rpcrdma_ia_close() 859 if (ia->ri_id->qp) { in rpcrdma_ep_destroy() 862 ia->ri_id->qp = NULL; in rpcrdma_ep_destroy() 1245 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); in rpcrdma_retry_local_inv() 1615 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); in rpcrdma_ep_post() 1644 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); in rpcrdma_ep_post_recv()
|
D | frwr_ops.c | 237 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); in frwr_op_map() 280 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); in frwr_op_unmap()
|
D | svc_rdma_transport.c | 195 event->event, event->element.qp); in qp_event_handler() 206 event->event, event->element.qp); in qp_event_handler() 953 newxprt->sc_qp = newxprt->sc_cm_id->qp; in svc_rdma_accept()
|
/linux-4.1.27/drivers/scsi/pm8001/ |
D | pm8001_sas.c | 216 uint32_t *qp = (uint32_t *)(((char *) in pm8001_phy_control() local 220 phy->invalid_dword_count = qp[0]; in pm8001_phy_control() 221 phy->running_disparity_error_count = qp[1]; in pm8001_phy_control() 222 phy->loss_of_dword_sync_count = qp[3]; in pm8001_phy_control() 223 phy->phy_reset_problem_count = qp[4]; in pm8001_phy_control()
|
/linux-4.1.27/net/openvswitch/ |
D | flow.c | 304 struct qtag_prefix *qp; in parse_vlan() local 313 qp = (struct qtag_prefix *) skb->data; in parse_vlan() 314 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); in parse_vlan()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 218 struct cxgb4_range qp; member
|
D | t4_hw.c | 2956 __be32 *qp = &cmd.iq0_to_iq2; in t4_config_rss_range() local 2977 *qp++ = htonl(v); in t4_config_rss_range()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 2802 u32 qp; in i40e_vsi_configure_msix() local 2808 qp = vsi->base_queue; in i40e_vsi_configure_msix() 2822 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); in i40e_vsi_configure_msix() 2827 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| in i40e_vsi_configure_msix() 2831 wr32(hw, I40E_QINT_RQCTL(qp), val); in i40e_vsi_configure_msix() 2836 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| in i40e_vsi_configure_msix() 2845 wr32(hw, I40E_QINT_TQCTL(qp), val); in i40e_vsi_configure_msix() 2846 qp++; in i40e_vsi_configure_msix() 3689 u32 val, qp; in i40e_vsi_free_irq() local 3722 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) in i40e_vsi_free_irq() [all …]
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | t4vf_hw.c | 955 __be32 *qp = &cmd.iq0_to_iq2; in t4vf_config_rss_range() local 996 *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | in t4vf_config_rss_range()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.h | 769 ib_modify_qp(conn->ibc_cmid->qp, in kiblnd_abort_receives()
|
D | o2iblnd_cb.c | 182 rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); in kiblnd_post_rx() 881 rc = ib_post_send(conn->ibc_cmid->qp, in kiblnd_post_tx_locked()
|