Searched refs:qp (Results 1 - 200 of 250) sorted by relevance

12

/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_qp.c223 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) insert_qp() argument
225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); insert_qp()
227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); insert_qp()
229 atomic_inc(&qp->refcount); insert_qp()
232 if (qp->ibqp.qp_num == 0) insert_qp()
233 rcu_assign_pointer(ibp->qp0, qp); insert_qp()
234 else if (qp->ibqp.qp_num == 1) insert_qp()
235 rcu_assign_pointer(ibp->qp1, qp); insert_qp()
237 qp->next = dev->qp_table[n]; insert_qp()
238 rcu_assign_pointer(dev->qp_table[n], qp); insert_qp()
248 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) remove_qp() argument
250 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); remove_qp()
251 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); remove_qp()
258 lockdep_is_held(&dev->qpt_lock)) == qp) { remove_qp()
261 lockdep_is_held(&dev->qpt_lock)) == qp) { remove_qp()
272 if (q == qp) { remove_qp()
274 rcu_dereference_protected(qp->next, remove_qp()
284 atomic_dec(&qp->refcount); remove_qp()
299 struct qib_qp *qp; qib_free_all_qps() local
317 qp = rcu_dereference_protected(dev->qp_table[n], qib_free_all_qps()
321 for (; qp; qp = rcu_dereference_protected(qp->next, qib_free_all_qps()
341 struct qib_qp *qp = NULL; qib_lookup_qpn() local
346 qp = rcu_dereference(ibp->qp0); qib_lookup_qpn()
348 qp = rcu_dereference(ibp->qp1); qib_lookup_qpn()
349 if (qp) qib_lookup_qpn()
350 atomic_inc(&qp->refcount); qib_lookup_qpn()
355 for (qp = rcu_dereference(dev->qp_table[n]); qp; qib_lookup_qpn()
356 qp = rcu_dereference(qp->next)) qib_lookup_qpn()
357 if (qp->ibqp.qp_num == qpn) { qib_lookup_qpn()
358 atomic_inc(&qp->refcount); qib_lookup_qpn()
363 return qp; qib_lookup_qpn()
368 * @qp: the QP to reset
371 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) qib_reset_qp() argument
373 qp->remote_qpn = 0; qib_reset_qp()
374 qp->qkey = 0; qib_reset_qp()
375 qp->qp_access_flags = 0; qib_reset_qp()
376 atomic_set(&qp->s_dma_busy, 0); qib_reset_qp()
377 qp->s_flags &= QIB_S_SIGNAL_REQ_WR; qib_reset_qp()
378 qp->s_hdrwords = 0; qib_reset_qp()
379 qp->s_wqe = NULL; qib_reset_qp()
380 qp->s_draining = 0; qib_reset_qp()
381 qp->s_next_psn = 0; qib_reset_qp()
382 qp->s_last_psn = 0; qib_reset_qp()
383 qp->s_sending_psn = 0; qib_reset_qp()
384 qp->s_sending_hpsn = 0; qib_reset_qp()
385 qp->s_psn = 0; qib_reset_qp()
386 qp->r_psn = 0; qib_reset_qp()
387 qp->r_msn = 0; qib_reset_qp()
389 qp->s_state = IB_OPCODE_RC_SEND_LAST; qib_reset_qp()
390 qp->r_state = IB_OPCODE_RC_SEND_LAST; qib_reset_qp()
392 qp->s_state = IB_OPCODE_UC_SEND_LAST; qib_reset_qp()
393 qp->r_state = IB_OPCODE_UC_SEND_LAST; qib_reset_qp()
395 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qib_reset_qp()
396 qp->r_nak_state = 0; qib_reset_qp()
397 qp->r_aflags = 0; qib_reset_qp()
398 qp->r_flags = 0; qib_reset_qp()
399 qp->s_head = 0; qib_reset_qp()
400 qp->s_tail = 0; qib_reset_qp()
401 qp->s_cur = 0; qib_reset_qp()
402 qp->s_acked = 0; qib_reset_qp()
403 qp->s_last = 0; qib_reset_qp()
404 qp->s_ssn = 1; qib_reset_qp()
405 qp->s_lsn = 0; qib_reset_qp()
406 qp->s_mig_state = IB_MIG_MIGRATED; qib_reset_qp()
407 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); qib_reset_qp()
408 qp->r_head_ack_queue = 0; qib_reset_qp()
409 qp->s_tail_ack_queue = 0; qib_reset_qp()
410 qp->s_num_rd_atomic = 0; qib_reset_qp()
411 if (qp->r_rq.wq) { qib_reset_qp()
412 qp->r_rq.wq->head = 0; qib_reset_qp()
413 qp->r_rq.wq->tail = 0; qib_reset_qp()
415 qp->r_sge.num_sge = 0; qib_reset_qp()
418 static void clear_mr_refs(struct qib_qp *qp, int clr_sends) clear_mr_refs() argument
422 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) clear_mr_refs()
423 qib_put_ss(&qp->s_rdma_read_sge); clear_mr_refs()
425 qib_put_ss(&qp->r_sge); clear_mr_refs()
428 while (qp->s_last != qp->s_head) { clear_mr_refs()
429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); clear_mr_refs()
437 if (qp->ibqp.qp_type == IB_QPT_UD || clear_mr_refs()
438 qp->ibqp.qp_type == IB_QPT_SMI || clear_mr_refs()
439 qp->ibqp.qp_type == IB_QPT_GSI) clear_mr_refs()
441 if (++qp->s_last >= qp->s_size) clear_mr_refs()
442 qp->s_last = 0; clear_mr_refs()
444 if (qp->s_rdma_mr) { clear_mr_refs()
445 qib_put_mr(qp->s_rdma_mr); clear_mr_refs()
446 qp->s_rdma_mr = NULL; clear_mr_refs()
450 if (qp->ibqp.qp_type != IB_QPT_RC) clear_mr_refs()
453 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { clear_mr_refs()
454 struct qib_ack_entry *e = &qp->s_ack_queue[n]; clear_mr_refs()
466 * @qp: the QP to put into the error state
474 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) qib_error_qp() argument
476 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_error_qp()
480 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) qib_error_qp()
483 qp->state = IB_QPS_ERR; qib_error_qp()
485 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { qib_error_qp()
486 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); qib_error_qp()
487 del_timer(&qp->s_timer); qib_error_qp()
490 if (qp->s_flags & QIB_S_ANY_WAIT_SEND) qib_error_qp()
491 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; qib_error_qp()
494 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { qib_error_qp()
495 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; qib_error_qp()
496 list_del_init(&qp->iowait); qib_error_qp()
500 if (!(qp->s_flags & QIB_S_BUSY)) { qib_error_qp()
501 qp->s_hdrwords = 0; qib_error_qp()
502 if (qp->s_rdma_mr) { qib_error_qp()
503 qib_put_mr(qp->s_rdma_mr); qib_error_qp()
504 qp->s_rdma_mr = NULL; qib_error_qp()
506 if (qp->s_tx) { qib_error_qp()
507 qib_put_txreq(qp->s_tx); qib_error_qp()
508 qp->s_tx = NULL; qib_error_qp()
513 if (qp->s_last != qp->s_head) qib_error_qp()
514 qib_schedule_send(qp); qib_error_qp()
516 clear_mr_refs(qp, 0); qib_error_qp()
519 wc.qp = &qp->ibqp; qib_error_qp()
522 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { qib_error_qp()
523 wc.wr_id = qp->r_wr_id; qib_error_qp()
525 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_error_qp()
529 if (qp->r_rq.wq) { qib_error_qp()
534 spin_lock(&qp->r_rq.lock); qib_error_qp()
537 wq = qp->r_rq.wq; qib_error_qp()
539 if (head >= qp->r_rq.size) qib_error_qp()
542 if (tail >= qp->r_rq.size) qib_error_qp()
545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; qib_error_qp()
546 if (++tail >= qp->r_rq.size) qib_error_qp()
548 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_error_qp()
552 spin_unlock(&qp->r_rq.lock); qib_error_qp()
553 } else if (qp->ibqp.event_handler) qib_error_qp()
573 struct qib_qp *qp = to_iqp(ibqp); qib_modify_qp() local
581 spin_lock_irq(&qp->r_lock); qib_modify_qp()
582 spin_lock(&qp->s_lock); qib_modify_qp()
585 attr->cur_qp_state : qp->state; qib_modify_qp()
595 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) qib_modify_qp()
602 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) qib_modify_qp()
617 if (qp->ibqp.qp_type == IB_QPT_SMI || qib_modify_qp()
618 qp->ibqp.qp_type == IB_QPT_GSI || qib_modify_qp()
638 * that to a small mtu. We'll set qp->path_mtu qib_modify_qp()
645 int mtu, pidx = qp->port_num - 1; qib_modify_qp()
676 if (qp->s_mig_state == IB_MIG_ARMED) qib_modify_qp()
681 if (qp->s_mig_state == IB_MIG_REARM) qib_modify_qp()
685 if (qp->s_mig_state == IB_MIG_ARMED) qib_modify_qp()
697 if (qp->state != IB_QPS_RESET) { qib_modify_qp()
698 qp->state = IB_QPS_RESET; qib_modify_qp()
700 if (!list_empty(&qp->iowait)) qib_modify_qp()
701 list_del_init(&qp->iowait); qib_modify_qp()
703 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qib_modify_qp()
704 spin_unlock(&qp->s_lock); qib_modify_qp()
705 spin_unlock_irq(&qp->r_lock); qib_modify_qp()
707 cancel_work_sync(&qp->s_work); qib_modify_qp()
708 del_timer_sync(&qp->s_timer); qib_modify_qp()
709 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); qib_modify_qp()
710 if (qp->s_tx) { qib_modify_qp()
711 qib_put_txreq(qp->s_tx); qib_modify_qp()
712 qp->s_tx = NULL; qib_modify_qp()
714 remove_qp(dev, qp); qib_modify_qp()
715 wait_event(qp->wait, !atomic_read(&qp->refcount)); qib_modify_qp()
716 spin_lock_irq(&qp->r_lock); qib_modify_qp()
717 spin_lock(&qp->s_lock); qib_modify_qp()
718 clear_mr_refs(qp, 1); qib_modify_qp()
719 qib_reset_qp(qp, ibqp->qp_type); qib_modify_qp()
725 qp->r_flags &= ~QIB_R_COMM_EST; qib_modify_qp()
726 qp->state = new_state; qib_modify_qp()
730 qp->s_draining = qp->s_last != qp->s_cur; qib_modify_qp()
731 qp->state = new_state; qib_modify_qp()
735 if (qp->ibqp.qp_type == IB_QPT_RC) qib_modify_qp()
737 qp->state = new_state; qib_modify_qp()
741 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_modify_qp()
745 qp->state = new_state; qib_modify_qp()
750 qp->s_pkey_index = attr->pkey_index; qib_modify_qp()
753 qp->port_num = attr->port_num; qib_modify_qp()
756 qp->remote_qpn = attr->dest_qp_num; qib_modify_qp()
759 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; qib_modify_qp()
760 qp->s_psn = qp->s_next_psn; qib_modify_qp()
761 qp->s_sending_psn = qp->s_next_psn; qib_modify_qp()
762 qp->s_last_psn = qp->s_next_psn - 1; qib_modify_qp()
763 qp->s_sending_hpsn = qp->s_last_psn; qib_modify_qp()
767 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; qib_modify_qp()
770 qp->qp_access_flags = attr->qp_access_flags; qib_modify_qp()
773 qp->remote_ah_attr = attr->ah_attr; qib_modify_qp()
774 qp->s_srate = attr->ah_attr.static_rate; qib_modify_qp()
778 qp->alt_ah_attr = attr->alt_ah_attr; qib_modify_qp()
779 qp->s_alt_pkey_index = attr->alt_pkey_index; qib_modify_qp()
783 qp->s_mig_state = attr->path_mig_state; qib_modify_qp()
785 qp->remote_ah_attr = qp->alt_ah_attr; qib_modify_qp()
786 qp->port_num = qp->alt_ah_attr.port_num; qib_modify_qp()
787 qp->s_pkey_index = qp->s_alt_pkey_index; qib_modify_qp()
792 qp->path_mtu = pmtu; qib_modify_qp()
793 qp->pmtu = ib_mtu_enum_to_int(pmtu); qib_modify_qp()
797 qp->s_retry_cnt = attr->retry_cnt; qib_modify_qp()
798 qp->s_retry = attr->retry_cnt; qib_modify_qp()
802 qp->s_rnr_retry_cnt = attr->rnr_retry; qib_modify_qp()
803 qp->s_rnr_retry = attr->rnr_retry; qib_modify_qp()
807 qp->r_min_rnr_timer = attr->min_rnr_timer; qib_modify_qp()
810 qp->timeout = attr->timeout; qib_modify_qp()
811 qp->timeout_jiffies = qib_modify_qp()
812 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / qib_modify_qp()
817 qp->qkey = attr->qkey; qib_modify_qp()
820 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; qib_modify_qp()
823 qp->s_max_rd_atomic = attr->max_rd_atomic; qib_modify_qp()
825 spin_unlock(&qp->s_lock); qib_modify_qp()
826 spin_unlock_irq(&qp->r_lock); qib_modify_qp()
829 insert_qp(dev, qp); qib_modify_qp()
832 ev.device = qp->ibqp.device; qib_modify_qp()
833 ev.element.qp = &qp->ibqp; qib_modify_qp()
835 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_modify_qp()
838 ev.device = qp->ibqp.device; qib_modify_qp()
839 ev.element.qp = &qp->ibqp; qib_modify_qp()
841 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_modify_qp()
847 spin_unlock(&qp->s_lock); qib_modify_qp()
848 spin_unlock_irq(&qp->r_lock); qib_modify_qp()
858 struct qib_qp *qp = to_iqp(ibqp); qib_query_qp() local
860 attr->qp_state = qp->state; qib_query_qp()
862 attr->path_mtu = qp->path_mtu; qib_query_qp()
863 attr->path_mig_state = qp->s_mig_state; qib_query_qp()
864 attr->qkey = qp->qkey; qib_query_qp()
865 attr->rq_psn = qp->r_psn & QIB_PSN_MASK; qib_query_qp()
866 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; qib_query_qp()
867 attr->dest_qp_num = qp->remote_qpn; qib_query_qp()
868 attr->qp_access_flags = qp->qp_access_flags; qib_query_qp()
869 attr->cap.max_send_wr = qp->s_size - 1; qib_query_qp()
870 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; qib_query_qp()
871 attr->cap.max_send_sge = qp->s_max_sge; qib_query_qp()
872 attr->cap.max_recv_sge = qp->r_rq.max_sge; qib_query_qp()
874 attr->ah_attr = qp->remote_ah_attr; qib_query_qp()
875 attr->alt_ah_attr = qp->alt_ah_attr; qib_query_qp()
876 attr->pkey_index = qp->s_pkey_index; qib_query_qp()
877 attr->alt_pkey_index = qp->s_alt_pkey_index; qib_query_qp()
879 attr->sq_draining = qp->s_draining; qib_query_qp()
880 attr->max_rd_atomic = qp->s_max_rd_atomic; qib_query_qp()
881 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; qib_query_qp()
882 attr->min_rnr_timer = qp->r_min_rnr_timer; qib_query_qp()
883 attr->port_num = qp->port_num; qib_query_qp()
884 attr->timeout = qp->timeout; qib_query_qp()
885 attr->retry_cnt = qp->s_retry_cnt; qib_query_qp()
886 attr->rnr_retry = qp->s_rnr_retry_cnt; qib_query_qp()
887 attr->alt_port_num = qp->alt_ah_attr.port_num; qib_query_qp()
888 attr->alt_timeout = qp->alt_timeout; qib_query_qp()
890 init_attr->event_handler = qp->ibqp.event_handler; qib_query_qp()
891 init_attr->qp_context = qp->ibqp.qp_context; qib_query_qp()
892 init_attr->send_cq = qp->ibqp.send_cq; qib_query_qp()
893 init_attr->recv_cq = qp->ibqp.recv_cq; qib_query_qp()
894 init_attr->srq = qp->ibqp.srq; qib_query_qp()
896 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) qib_query_qp()
900 init_attr->qp_type = qp->ibqp.qp_type; qib_query_qp()
901 init_attr->port_num = qp->port_num; qib_query_qp()
907 * @qp: the queue pair to compute the AETH for
911 __be32 qib_compute_aeth(struct qib_qp *qp) qib_compute_aeth() argument
913 u32 aeth = qp->r_msn & QIB_MSN_MASK; qib_compute_aeth()
915 if (qp->ibqp.srq) { qib_compute_aeth()
924 struct qib_rwq *wq = qp->r_rq.wq; qib_compute_aeth()
930 if (head >= qp->r_rq.size) qib_compute_aeth()
933 if (tail >= qp->r_rq.size) qib_compute_aeth()
942 credits += qp->r_rq.size; qib_compute_aeth()
979 struct qib_qp *qp; qib_create_qp() local
1039 sz = sizeof(*qp); qib_create_qp()
1045 sg_list_sz = sizeof(*qp->r_sg_list) * qib_create_qp()
1048 sg_list_sz = sizeof(*qp->r_sg_list) * qib_create_qp()
1050 qp = kzalloc(sz + sg_list_sz, gfp); qib_create_qp()
1051 if (!qp) { qib_create_qp()
1055 RCU_INIT_POINTER(qp->next, NULL); qib_create_qp()
1056 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); qib_create_qp()
1057 if (!qp->s_hdr) { qib_create_qp()
1061 qp->timeout_jiffies = qib_create_qp()
1062 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / qib_create_qp()
1067 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qib_create_qp()
1068 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qib_create_qp()
1069 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + qib_create_qp()
1072 qp->r_rq.wq = vmalloc_user( qib_create_qp()
1074 qp->r_rq.size * sz); qib_create_qp()
1076 qp->r_rq.wq = __vmalloc( qib_create_qp()
1078 qp->r_rq.size * sz, qib_create_qp()
1081 if (!qp->r_rq.wq) { qib_create_qp()
1088 * ib_create_qp() will initialize qp->ibqp qib_create_qp()
1089 * except for qp->ibqp.qp_num. qib_create_qp()
1091 spin_lock_init(&qp->r_lock); qib_create_qp()
1092 spin_lock_init(&qp->s_lock); qib_create_qp()
1093 spin_lock_init(&qp->r_rq.lock); qib_create_qp()
1094 atomic_set(&qp->refcount, 0); qib_create_qp()
1095 init_waitqueue_head(&qp->wait); qib_create_qp()
1096 init_waitqueue_head(&qp->wait_dma); qib_create_qp()
1097 init_timer(&qp->s_timer); qib_create_qp()
1098 qp->s_timer.data = (unsigned long)qp; qib_create_qp()
1099 INIT_WORK(&qp->s_work, qib_do_send); qib_create_qp()
1100 INIT_LIST_HEAD(&qp->iowait); qib_create_qp()
1101 INIT_LIST_HEAD(&qp->rspwait); qib_create_qp()
1102 qp->state = IB_QPS_RESET; qib_create_qp()
1103 qp->s_wq = swq; qib_create_qp()
1104 qp->s_size = init_attr->cap.max_send_wr + 1; qib_create_qp()
1105 qp->s_max_sge = init_attr->cap.max_send_sge; qib_create_qp()
1107 qp->s_flags = QIB_S_SIGNAL_REQ_WR; qib_create_qp()
1114 vfree(qp->r_rq.wq); qib_create_qp()
1117 qp->ibqp.qp_num = err; qib_create_qp()
1118 qp->port_num = init_attr->port_num; qib_create_qp()
1119 qib_reset_qp(qp, init_attr->qp_type); qib_create_qp()
1135 if (!qp->r_rq.wq) { qib_create_qp()
1145 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; qib_create_qp()
1147 qp->ip = qib_create_mmap_info(dev, s, qib_create_qp()
1149 qp->r_rq.wq); qib_create_qp()
1150 if (!qp->ip) { qib_create_qp()
1155 err = ib_copy_to_udata(udata, &(qp->ip->offset), qib_create_qp()
1156 sizeof(qp->ip->offset)); qib_create_qp()
1174 if (qp->ip) { qib_create_qp()
1176 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); qib_create_qp()
1180 ret = &qp->ibqp; qib_create_qp()
1184 if (qp->ip) qib_create_qp()
1185 kref_put(&qp->ip->ref, qib_release_mmap_info); qib_create_qp()
1187 vfree(qp->r_rq.wq); qib_create_qp()
1188 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); qib_create_qp()
1190 kfree(qp->s_hdr); qib_create_qp()
1191 kfree(qp); qib_create_qp()
1209 struct qib_qp *qp = to_iqp(ibqp); qib_destroy_qp() local
1213 spin_lock_irq(&qp->s_lock); qib_destroy_qp()
1214 if (qp->state != IB_QPS_RESET) { qib_destroy_qp()
1215 qp->state = IB_QPS_RESET; qib_destroy_qp()
1217 if (!list_empty(&qp->iowait)) qib_destroy_qp()
1218 list_del_init(&qp->iowait); qib_destroy_qp()
1220 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qib_destroy_qp()
1221 spin_unlock_irq(&qp->s_lock); qib_destroy_qp()
1222 cancel_work_sync(&qp->s_work); qib_destroy_qp()
1223 del_timer_sync(&qp->s_timer); qib_destroy_qp()
1224 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); qib_destroy_qp()
1225 if (qp->s_tx) { qib_destroy_qp()
1226 qib_put_txreq(qp->s_tx); qib_destroy_qp()
1227 qp->s_tx = NULL; qib_destroy_qp()
1229 remove_qp(dev, qp); qib_destroy_qp()
1230 wait_event(qp->wait, !atomic_read(&qp->refcount)); qib_destroy_qp()
1231 clear_mr_refs(qp, 1); qib_destroy_qp()
1233 spin_unlock_irq(&qp->s_lock); qib_destroy_qp()
1236 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); qib_destroy_qp()
1241 if (qp->ip) qib_destroy_qp()
1242 kref_put(&qp->ip->ref, qib_release_mmap_info); qib_destroy_qp()
1244 vfree(qp->r_rq.wq); qib_destroy_qp()
1245 vfree(qp->s_wq); qib_destroy_qp()
1246 kfree(qp->s_hdr); qib_destroy_qp()
1247 kfree(qp); qib_destroy_qp()
1278 * @qp: the qp who's send work queue to flush
1283 void qib_get_credit(struct qib_qp *qp, u32 aeth) qib_get_credit() argument
1293 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { qib_get_credit()
1294 qp->s_flags |= QIB_S_UNLIMITED_CREDIT; qib_get_credit()
1295 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { qib_get_credit()
1296 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qib_get_credit()
1297 qib_schedule_send(qp); qib_get_credit()
1300 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { qib_get_credit()
1303 if (qib_cmp24(credit, qp->s_lsn) > 0) { qib_get_credit()
1304 qp->s_lsn = credit; qib_get_credit()
1305 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { qib_get_credit()
1306 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qib_get_credit()
1307 qib_schedule_send(qp); qib_get_credit()
1317 struct qib_qp *qp; member in struct:qib_qp_iter
1343 struct qib_qp *pqp = iter->qp; qib_qp_iter_next()
1344 struct qib_qp *qp; qib_qp_iter_next() local
1348 qp = rcu_dereference(pqp->next); qib_qp_iter_next()
1350 qp = rcu_dereference(dev->qp_table[n]); qib_qp_iter_next()
1351 pqp = qp; qib_qp_iter_next()
1352 if (qp) { qib_qp_iter_next()
1353 iter->qp = qp; qib_qp_iter_next()
1368 struct qib_qp *qp = iter->qp; qib_qp_iter_print() local
1370 wqe = get_swqe_ptr(qp, qp->s_last); qib_qp_iter_print()
1374 qp->ibqp.qp_num, qib_qp_iter_print()
1375 qp_type_str[qp->ibqp.qp_type], qib_qp_iter_print()
1376 qp->state, qib_qp_iter_print()
1378 qp->s_hdrwords, qib_qp_iter_print()
1379 qp->s_flags, qib_qp_iter_print()
1380 atomic_read(&qp->s_dma_busy), qib_qp_iter_print()
1381 !list_empty(&qp->iowait), qib_qp_iter_print()
1382 qp->timeout, qib_qp_iter_print()
1384 qp->s_lsn, qib_qp_iter_print()
1385 qp->s_last_psn, qib_qp_iter_print()
1386 qp->s_psn, qp->s_next_psn, qib_qp_iter_print()
1387 qp->s_sending_psn, qp->s_sending_hpsn, qib_qp_iter_print()
1388 qp->s_last, qp->s_acked, qp->s_cur, qib_qp_iter_print()
1389 qp->s_tail, qp->s_head, qp->s_size, qib_qp_iter_print()
1390 qp->remote_qpn, qib_qp_iter_print()
1391 qp->remote_ah_attr.dlid); qib_qp_iter_print()
H A Dqib_rc.c57 static void start_timer(struct qib_qp *qp) start_timer() argument
59 qp->s_flags |= QIB_S_TIMER; start_timer()
60 qp->s_timer.function = rc_timeout; start_timer()
61 /* 4.096 usec. * (1 << qp->timeout) */ start_timer()
62 qp->s_timer.expires = jiffies + qp->timeout_jiffies; start_timer()
63 add_timer(&qp->s_timer); start_timer()
69 * @qp: a pointer to the QP
77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, qib_make_rc_ack() argument
87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_make_rc_ack()
93 switch (qp->s_ack_state) { qib_make_rc_ack()
96 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; qib_make_rc_ack()
108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qib_make_rc_ack()
109 qp->s_tail_ack_queue = 0; qib_make_rc_ack()
114 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { OP()
115 if (qp->s_flags & QIB_S_ACK_PENDING) OP()
120 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
130 qp->s_tail_ack_queue = qp->r_head_ack_queue;
134 qp->s_rdma_mr = e->rdma_sge.mr;
135 if (qp->s_rdma_mr)
136 qib_get_mr(qp->s_rdma_mr);
137 qp->s_ack_rdma_sge.sge = e->rdma_sge;
138 qp->s_ack_rdma_sge.num_sge = 1;
139 qp->s_cur_sge = &qp->s_ack_rdma_sge;
142 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
144 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
147 ohdr->u.aeth = qib_compute_aeth(qp);
149 qp->s_ack_rdma_psn = e->psn;
150 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
153 qp->s_cur_sge = NULL;
155 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
156 ohdr->u.at.aeth = qib_compute_aeth(qp);
165 bth0 = qp->s_ack_state << 24;
169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
172 qp->s_cur_sge = &qp->s_ack_rdma_sge;
173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
174 if (qp->s_rdma_mr)
175 qib_get_mr(qp->s_rdma_mr);
176 len = qp->s_ack_rdma_sge.sge.sge_length;
180 ohdr->u.aeth = qib_compute_aeth(qp);
182 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
183 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
186 bth0 = qp->s_ack_state << 24;
187 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
198 qp->s_ack_state = OP(SEND_ONLY);
199 qp->s_flags &= ~QIB_S_ACK_PENDING;
200 qp->s_cur_sge = NULL;
201 if (qp->s_nak_state)
203 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
204 (qp->s_nak_state <<
207 ohdr->u.aeth = qib_compute_aeth(qp);
211 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
213 qp->s_rdma_ack_cnt++;
214 qp->s_hdrwords = hwords;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0, bth2);
220 qp->s_ack_state = OP(ACKNOWLEDGE);
221 qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
227 * @qp: a pointer to the QP
231 int qib_make_rc_req(struct qib_qp *qp) qib_make_rc_req() argument
233 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_make_rc_req()
241 u32 pmtu = qp->pmtu; qib_make_rc_req()
247 ohdr = &qp->s_hdr->u.oth; qib_make_rc_req()
248 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_make_rc_req()
249 ohdr = &qp->s_hdr->u.l.oth; qib_make_rc_req()
255 spin_lock_irqsave(&qp->s_lock, flags); qib_make_rc_req()
258 if ((qp->s_flags & QIB_S_RESP_PENDING) && qib_make_rc_req()
259 qib_make_rc_ack(dev, qp, ohdr, pmtu)) qib_make_rc_req()
262 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { qib_make_rc_req()
263 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_rc_req()
266 if (qp->s_last == qp->s_head) qib_make_rc_req()
269 if (atomic_read(&qp->s_dma_busy)) { qib_make_rc_req()
270 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_rc_req()
273 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_rc_req()
274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? qib_make_rc_req()
280 if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) qib_make_rc_req()
283 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { qib_make_rc_req()
284 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { qib_make_rc_req()
285 qp->s_flags |= QIB_S_WAIT_PSN; qib_make_rc_req()
288 qp->s_sending_psn = qp->s_psn; qib_make_rc_req()
289 qp->s_sending_hpsn = qp->s_psn - 1; qib_make_rc_req()
297 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_rc_req()
298 switch (qp->s_state) { qib_make_rc_req()
300 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) qib_make_rc_req()
310 if (qp->s_cur == qp->s_tail) { qib_make_rc_req()
312 if (qp->s_tail == qp->s_head) qib_make_rc_req()
319 qp->s_num_rd_atomic) { qib_make_rc_req()
320 qp->s_flags |= QIB_S_WAIT_FENCE; qib_make_rc_req()
323 wqe->psn = qp->s_next_psn; qib_make_rc_req()
332 ss = &qp->s_sge; qib_make_rc_req()
333 bth2 = qp->s_psn & QIB_PSN_MASK; qib_make_rc_req()
338 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_make_rc_req()
339 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req()
340 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qib_make_rc_req()
346 qp->s_state = OP(SEND_FIRST); qib_make_rc_req()
351 qp->s_state = OP(SEND_ONLY); qib_make_rc_req()
353 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); qib_make_rc_req()
361 if (++qp->s_cur == qp->s_size) qib_make_rc_req()
362 qp->s_cur = 0; qib_make_rc_req()
366 if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req()
367 qp->s_lsn++; qib_make_rc_req()
371 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_make_rc_req()
372 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req()
373 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qib_make_rc_req()
386 qp->s_state = OP(RDMA_WRITE_FIRST); qib_make_rc_req()
391 qp->s_state = OP(RDMA_WRITE_ONLY); qib_make_rc_req()
393 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); qib_make_rc_req()
402 if (++qp->s_cur == qp->s_size) qib_make_rc_req()
403 qp->s_cur = 0; qib_make_rc_req()
412 if (qp->s_num_rd_atomic >= qib_make_rc_req()
413 qp->s_max_rd_atomic) { qib_make_rc_req()
414 qp->s_flags |= QIB_S_WAIT_RDMAR; qib_make_rc_req()
417 qp->s_num_rd_atomic++; qib_make_rc_req()
418 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req()
419 qp->s_lsn++; qib_make_rc_req()
425 qp->s_next_psn += (len - 1) / pmtu; qib_make_rc_req()
426 wqe->lpsn = qp->s_next_psn++; qib_make_rc_req()
434 qp->s_state = OP(RDMA_READ_REQUEST); qib_make_rc_req()
439 if (++qp->s_cur == qp->s_size) qib_make_rc_req()
440 qp->s_cur = 0; qib_make_rc_req()
450 if (qp->s_num_rd_atomic >= qib_make_rc_req()
451 qp->s_max_rd_atomic) { qib_make_rc_req()
452 qp->s_flags |= QIB_S_WAIT_RDMAR; qib_make_rc_req()
455 qp->s_num_rd_atomic++; qib_make_rc_req()
456 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req()
457 qp->s_lsn++; qib_make_rc_req()
461 qp->s_state = OP(COMPARE_SWAP); qib_make_rc_req()
467 qp->s_state = OP(FETCH_ADD); qib_make_rc_req()
482 if (++qp->s_cur == qp->s_size) qib_make_rc_req()
483 qp->s_cur = 0; qib_make_rc_req()
489 qp->s_sge.sge = wqe->sg_list[0]; qib_make_rc_req()
490 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_rc_req()
491 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_rc_req()
492 qp->s_sge.total_len = wqe->length; qib_make_rc_req()
493 qp->s_len = wqe->length; qib_make_rc_req()
495 qp->s_tail++; qib_make_rc_req()
496 if (qp->s_tail >= qp->s_size) qib_make_rc_req()
497 qp->s_tail = 0; qib_make_rc_req()
500 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req()
502 qp->s_psn++; qib_make_rc_req()
503 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req()
504 qp->s_next_psn = qp->s_psn; qib_make_rc_req()
510 * qp->s_state is normally set to the opcode of the qib_make_rc_req()
518 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req()
521 qp->s_state = OP(SEND_MIDDLE); qib_make_rc_req()
524 bth2 = qp->s_psn++ & QIB_PSN_MASK; qib_make_rc_req()
525 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req()
526 qp->s_next_psn = qp->s_psn; qib_make_rc_req()
527 ss = &qp->s_sge; qib_make_rc_req()
528 len = qp->s_len; qib_make_rc_req()
534 qp->s_state = OP(SEND_LAST); qib_make_rc_req()
536 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); qib_make_rc_req()
544 qp->s_cur++; qib_make_rc_req()
545 if (qp->s_cur >= qp->s_size) qib_make_rc_req()
546 qp->s_cur = 0; qib_make_rc_req()
551 * qp->s_state is normally set to the opcode of the qib_make_rc_req()
559 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req()
562 qp->s_state = OP(RDMA_WRITE_MIDDLE); qib_make_rc_req()
565 bth2 = qp->s_psn++ & QIB_PSN_MASK; qib_make_rc_req()
566 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req()
567 qp->s_next_psn = qp->s_psn; qib_make_rc_req()
568 ss = &qp->s_sge; qib_make_rc_req()
569 len = qp->s_len; qib_make_rc_req()
575 qp->s_state = OP(RDMA_WRITE_LAST); qib_make_rc_req()
577 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); qib_make_rc_req()
585 qp->s_cur++; qib_make_rc_req()
586 if (qp->s_cur >= qp->s_size) qib_make_rc_req()
587 qp->s_cur = 0; qib_make_rc_req()
592 * qp->s_state is normally set to the opcode of the qib_make_rc_req()
600 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; qib_make_rc_req()
606 qp->s_state = OP(RDMA_READ_REQUEST); qib_make_rc_req()
608 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK; qib_make_rc_req()
609 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req()
612 qp->s_cur++; qib_make_rc_req()
613 if (qp->s_cur == qp->s_size) qib_make_rc_req()
614 qp->s_cur = 0; qib_make_rc_req()
617 qp->s_sending_hpsn = bth2; qib_make_rc_req()
621 if (qp->s_flags & QIB_S_SEND_ONE) { qib_make_rc_req()
622 qp->s_flags &= ~QIB_S_SEND_ONE; qib_make_rc_req()
623 qp->s_flags |= QIB_S_WAIT_ACK; qib_make_rc_req()
626 qp->s_len -= len; qib_make_rc_req()
627 qp->s_hdrwords = hwords; qib_make_rc_req()
628 qp->s_cur_sge = ss; qib_make_rc_req()
629 qp->s_cur_size = len; qib_make_rc_req()
630 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); qib_make_rc_req()
636 qp->s_flags &= ~QIB_S_BUSY; qib_make_rc_req()
638 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_rc_req()
644 * @qp: a pointer to the QP
650 void qib_send_rc_ack(struct qib_qp *qp) qib_send_rc_ack() argument
652 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_send_rc_ack()
653 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_send_rc_ack()
666 spin_lock_irqsave(&qp->s_lock, flags); qib_send_rc_ack()
668 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_send_rc_ack()
672 if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) qib_send_rc_ack()
680 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qib_send_rc_ack()
682 &qp->remote_ah_attr.grh, hwords, 0); qib_send_rc_ack()
687 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); qib_send_rc_ack()
688 if (qp->s_mig_state == IB_MIG_MIGRATED) qib_send_rc_ack()
690 if (qp->r_nak_state) qib_send_rc_ack()
691 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | qib_send_rc_ack()
692 (qp->r_nak_state << qib_send_rc_ack()
695 ohdr->u.aeth = qib_compute_aeth(qp); qib_send_rc_ack()
696 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qib_send_rc_ack()
697 qp->remote_ah_attr.sl << 4; qib_send_rc_ack()
699 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qib_send_rc_ack()
701 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); qib_send_rc_ack()
703 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); qib_send_rc_ack()
704 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK); qib_send_rc_ack()
706 spin_unlock_irqrestore(&qp->s_lock, flags); qib_send_rc_ack()
713 qp->s_srate, lrh0 >> 12); qib_send_rc_ack()
726 spin_lock_irqsave(&qp->s_lock, flags); qib_send_rc_ack()
761 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { qib_send_rc_ack()
763 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; qib_send_rc_ack()
764 qp->s_nak_state = qp->r_nak_state; qib_send_rc_ack()
765 qp->s_ack_psn = qp->r_ack_psn; qib_send_rc_ack()
768 qib_schedule_send(qp); qib_send_rc_ack()
771 spin_unlock_irqrestore(&qp->s_lock, flags); qib_send_rc_ack()
778 * @qp: the QP
785 static void reset_psn(struct qib_qp *qp, u32 psn) reset_psn() argument
787 u32 n = qp->s_acked; reset_psn()
788 struct qib_swqe *wqe = get_swqe_ptr(qp, n); reset_psn()
791 qp->s_cur = n; reset_psn()
798 qp->s_state = OP(SEND_LAST); reset_psn()
807 if (++n == qp->s_size) reset_psn()
809 if (n == qp->s_tail) reset_psn()
811 wqe = get_swqe_ptr(qp, n); reset_psn()
815 qp->s_cur = n; reset_psn()
821 qp->s_state = OP(SEND_LAST); reset_psn()
835 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); reset_psn()
840 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); reset_psn()
844 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); reset_psn()
852 qp->s_state = OP(SEND_LAST); reset_psn()
855 qp->s_psn = psn; reset_psn()
861 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && reset_psn()
862 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) reset_psn()
863 qp->s_flags |= QIB_S_WAIT_PSN; reset_psn()
870 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) qib_restart_rc() argument
872 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); qib_restart_rc()
875 if (qp->s_retry == 0) { qib_restart_rc()
876 if (qp->s_mig_state == IB_MIG_ARMED) { qib_restart_rc()
877 qib_migrate_qp(qp); qib_restart_rc()
878 qp->s_retry = qp->s_retry_cnt; qib_restart_rc()
879 } else if (qp->s_last == qp->s_acked) { qib_restart_rc()
880 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); qib_restart_rc()
881 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_restart_rc()
886 qp->s_retry--; qib_restart_rc()
888 ibp = to_iport(qp->ibqp.device, qp->port_num); qib_restart_rc()
892 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; qib_restart_rc()
894 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | qib_restart_rc()
898 qp->s_flags |= QIB_S_SEND_ONE; qib_restart_rc()
899 reset_psn(qp, psn); qib_restart_rc()
907 struct qib_qp *qp = (struct qib_qp *)arg; rc_timeout() local
911 spin_lock_irqsave(&qp->r_lock, flags); rc_timeout()
912 spin_lock(&qp->s_lock); rc_timeout()
913 if (qp->s_flags & QIB_S_TIMER) { rc_timeout()
914 ibp = to_iport(qp->ibqp.device, qp->port_num); rc_timeout()
916 qp->s_flags &= ~QIB_S_TIMER; rc_timeout()
917 del_timer(&qp->s_timer); rc_timeout()
918 qib_restart_rc(qp, qp->s_last_psn + 1, 1); rc_timeout()
919 qib_schedule_send(qp); rc_timeout()
921 spin_unlock(&qp->s_lock); rc_timeout()
922 spin_unlock_irqrestore(&qp->r_lock, flags); rc_timeout()
930 struct qib_qp *qp = (struct qib_qp *)arg; qib_rc_rnr_retry() local
933 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rnr_retry()
934 if (qp->s_flags & QIB_S_WAIT_RNR) { qib_rc_rnr_retry()
935 qp->s_flags &= ~QIB_S_WAIT_RNR; qib_rc_rnr_retry()
936 del_timer(&qp->s_timer); qib_rc_rnr_retry()
937 qib_schedule_send(qp); qib_rc_rnr_retry()
939 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rnr_retry()
943 * Set qp->s_sending_psn to the next PSN after the given one.
946 static void reset_sending_psn(struct qib_qp *qp, u32 psn) reset_sending_psn() argument
949 u32 n = qp->s_last; reset_sending_psn()
953 wqe = get_swqe_ptr(qp, n); reset_sending_psn()
956 qp->s_sending_psn = wqe->lpsn + 1; reset_sending_psn()
958 qp->s_sending_psn = psn + 1; reset_sending_psn()
961 if (++n == qp->s_size) reset_sending_psn()
963 if (n == qp->s_tail) reset_sending_psn()
971 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) qib_rc_send_complete() argument
980 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) qib_rc_send_complete()
992 WARN_ON(!qp->s_rdma_ack_cnt); qib_rc_send_complete()
993 qp->s_rdma_ack_cnt--; qib_rc_send_complete()
998 reset_sending_psn(qp, psn); qib_rc_send_complete()
1004 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && qib_rc_send_complete()
1005 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && qib_rc_send_complete()
1006 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_rc_send_complete()
1007 start_timer(qp); qib_rc_send_complete()
1009 while (qp->s_last != qp->s_acked) { qib_rc_send_complete()
1010 wqe = get_swqe_ptr(qp, qp->s_last); qib_rc_send_complete()
1011 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_rc_send_complete()
1012 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) qib_rc_send_complete()
1020 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || qib_rc_send_complete()
1027 wc.qp = &qp->ibqp; qib_rc_send_complete()
1028 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); qib_rc_send_complete()
1030 if (++qp->s_last >= qp->s_size) qib_rc_send_complete()
1031 qp->s_last = 0; qib_rc_send_complete()
1037 if (qp->s_flags & QIB_S_WAIT_PSN && qib_rc_send_complete()
1038 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { qib_rc_send_complete()
1039 qp->s_flags &= ~QIB_S_WAIT_PSN; qib_rc_send_complete()
1040 qp->s_sending_psn = qp->s_psn; qib_rc_send_complete()
1041 qp->s_sending_hpsn = qp->s_psn - 1; qib_rc_send_complete()
1042 qib_schedule_send(qp); qib_rc_send_complete()
1046 static inline void update_last_psn(struct qib_qp *qp, u32 psn) update_last_psn() argument
1048 qp->s_last_psn = psn; update_last_psn()
1056 static struct qib_swqe *do_rc_completion(struct qib_qp *qp, do_rc_completion() argument
1068 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || do_rc_completion()
1069 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { do_rc_completion()
1076 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || do_rc_completion()
1083 wc.qp = &qp->ibqp; do_rc_completion()
1084 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); do_rc_completion()
1086 if (++qp->s_last >= qp->s_size) do_rc_completion()
1087 qp->s_last = 0; do_rc_completion()
1091 qp->s_retry = qp->s_retry_cnt; do_rc_completion()
1092 update_last_psn(qp, wqe->lpsn); do_rc_completion()
1099 if (qp->s_acked == qp->s_cur) { do_rc_completion()
1100 if (++qp->s_cur >= qp->s_size) do_rc_completion()
1101 qp->s_cur = 0; do_rc_completion()
1102 qp->s_acked = qp->s_cur; do_rc_completion()
1103 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_completion()
1104 if (qp->s_acked != qp->s_tail) { do_rc_completion()
1105 qp->s_state = OP(SEND_LAST); do_rc_completion()
1106 qp->s_psn = wqe->psn; do_rc_completion()
1109 if (++qp->s_acked >= qp->s_size) do_rc_completion()
1110 qp->s_acked = 0; do_rc_completion()
1111 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) do_rc_completion()
1112 qp->s_draining = 0; do_rc_completion()
1113 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_completion()
1120 * @qp: the QP the ACK came in on
1129 static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument
1140 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { do_rc_ack()
1141 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); do_rc_ack()
1142 del_timer(&qp->s_timer); do_rc_ack()
1154 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_ack()
1155 ibp = to_iport(qp->ibqp.device, qp->port_num); do_rc_ack()
1189 if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { do_rc_ack()
1190 qp->r_flags |= QIB_R_RDMAR_SEQ; do_rc_ack()
1191 qib_restart_rc(qp, qp->s_last_psn + 1, 0); do_rc_ack()
1192 if (list_empty(&qp->rspwait)) { do_rc_ack()
1193 qp->r_flags |= QIB_R_RSP_SEND; do_rc_ack()
1194 atomic_inc(&qp->refcount); do_rc_ack()
1195 list_add_tail(&qp->rspwait, do_rc_ack()
1210 if (qp->s_num_rd_atomic && do_rc_ack()
1214 qp->s_num_rd_atomic--; do_rc_ack()
1216 if ((qp->s_flags & QIB_S_WAIT_FENCE) && do_rc_ack()
1217 !qp->s_num_rd_atomic) { do_rc_ack()
1218 qp->s_flags &= ~(QIB_S_WAIT_FENCE | do_rc_ack()
1220 qib_schedule_send(qp); do_rc_ack()
1221 } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { do_rc_ack()
1222 qp->s_flags &= ~(QIB_S_WAIT_RDMAR | do_rc_ack()
1224 qib_schedule_send(qp); do_rc_ack()
1227 wqe = do_rc_completion(qp, wqe, ibp); do_rc_ack()
1228 if (qp->s_acked == qp->s_tail) do_rc_ack()
1235 if (qp->s_acked != qp->s_tail) { do_rc_ack()
1240 start_timer(qp); do_rc_ack()
1245 if (qib_cmp24(qp->s_psn, psn) <= 0) do_rc_ack()
1246 reset_psn(qp, psn + 1); do_rc_ack()
1247 } else if (qib_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack()
1248 qp->s_state = OP(SEND_LAST); do_rc_ack()
1249 qp->s_psn = psn + 1; do_rc_ack()
1251 if (qp->s_flags & QIB_S_WAIT_ACK) { do_rc_ack()
1252 qp->s_flags &= ~QIB_S_WAIT_ACK; do_rc_ack()
1253 qib_schedule_send(qp); do_rc_ack()
1255 qib_get_credit(qp, aeth); do_rc_ack()
1256 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1257 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
1258 update_last_psn(qp, psn); do_rc_ack()
1264 if (qp->s_acked == qp->s_tail) do_rc_ack()
1266 if (qp->s_flags & QIB_S_WAIT_RNR) do_rc_ack()
1268 if (qp->s_rnr_retry == 0) { do_rc_ack()
1272 if (qp->s_rnr_retry_cnt < 7) do_rc_ack()
1273 qp->s_rnr_retry--; do_rc_ack()
1276 update_last_psn(qp, psn - 1); do_rc_ack()
1278 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; do_rc_ack()
1280 reset_psn(qp, psn); do_rc_ack()
1282 qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); do_rc_ack()
1283 qp->s_flags |= QIB_S_WAIT_RNR; do_rc_ack()
1284 qp->s_timer.function = qib_rc_rnr_retry; do_rc_ack()
1285 qp->s_timer.expires = jiffies + usecs_to_jiffies( do_rc_ack()
1288 add_timer(&qp->s_timer); do_rc_ack()
1292 if (qp->s_acked == qp->s_tail) do_rc_ack()
1295 update_last_psn(qp, psn - 1); do_rc_ack()
1306 qib_restart_rc(qp, psn, 0); do_rc_ack()
1307 qib_schedule_send(qp); do_rc_ack()
1324 if (qp->s_last == qp->s_acked) { do_rc_ack()
1325 qib_send_complete(qp, wqe, status); do_rc_ack()
1326 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); do_rc_ack()
1334 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
1335 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1352 static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, rdma_seq_err() argument
1358 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { rdma_seq_err()
1359 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); rdma_seq_err()
1360 del_timer(&qp->s_timer); rdma_seq_err()
1363 wqe = get_swqe_ptr(qp, qp->s_acked); rdma_seq_err()
1370 wqe = do_rc_completion(qp, wqe, ibp); rdma_seq_err()
1374 qp->r_flags |= QIB_R_RDMAR_SEQ; rdma_seq_err()
1375 qib_restart_rc(qp, qp->s_last_psn + 1, 0); rdma_seq_err()
1376 if (list_empty(&qp->rspwait)) { rdma_seq_err()
1377 qp->r_flags |= QIB_R_RSP_SEND; rdma_seq_err()
1378 atomic_inc(&qp->refcount); rdma_seq_err()
1379 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); rdma_seq_err()
1389 * @qp: the QP for this packet
1402 struct qib_qp *qp, qib_rc_rcv_resp()
1421 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && qib_rc_rcv_resp()
1422 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { qib_rc_rcv_resp()
1428 if (!(qp->s_flags & QIB_S_BUSY)) { qib_rc_rcv_resp()
1439 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rcv_resp()
1440 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_rc_rcv_resp()
1444 if (qib_cmp24(psn, qp->s_next_psn) >= 0) qib_rc_rcv_resp()
1448 diff = qib_cmp24(psn, qp->s_last_psn); qib_rc_rcv_resp()
1454 qib_get_credit(qp, aeth); qib_rc_rcv_resp()
1463 if (qp->r_flags & QIB_R_RDMAR_SEQ) { qib_rc_rcv_resp()
1464 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) qib_rc_rcv_resp()
1466 qp->r_flags &= ~QIB_R_RDMAR_SEQ; qib_rc_rcv_resp()
1469 if (unlikely(qp->s_acked == qp->s_tail)) qib_rc_rcv_resp()
1471 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp()
1486 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || qib_rc_rcv_resp()
1490 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp()
1498 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, qib_rc_rcv_resp()
1504 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp()
1511 if (unlikely(pmtu >= qp->s_rdma_read_len)) qib_rc_rcv_resp()
1516 * 4.096 usec. * (1 << qp->timeout) qib_rc_rcv_resp()
1518 qp->s_flags |= QIB_S_TIMER; qib_rc_rcv_resp()
1519 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); qib_rc_rcv_resp()
1520 if (qp->s_flags & QIB_S_WAIT_ACK) { qib_rc_rcv_resp()
1521 qp->s_flags &= ~QIB_S_WAIT_ACK; qib_rc_rcv_resp()
1522 qib_schedule_send(qp); qib_rc_rcv_resp()
1526 qp->s_retry = qp->s_retry_cnt; qib_rc_rcv_resp()
1532 qp->s_rdma_read_len -= pmtu; qib_rc_rcv_resp()
1533 update_last_psn(qp, psn); qib_rc_rcv_resp()
1534 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rcv_resp()
1535 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); qib_rc_rcv_resp()
1540 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) qib_rc_rcv_resp()
1556 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp()
1557 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, qib_rc_rcv_resp()
1563 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp()
1578 if (unlikely(tlen != qp->s_rdma_read_len)) qib_rc_rcv_resp()
1581 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); qib_rc_rcv_resp()
1582 WARN_ON(qp->s_rdma_read_sge.num_sge); qib_rc_rcv_resp()
1583 (void) do_rc_ack(qp, aeth, psn, qib_rc_rcv_resp()
1593 rdma_seq_err(qp, ibp, psn, rcd); qib_rc_rcv_resp()
1599 if (qp->s_last == qp->s_acked) { qib_rc_rcv_resp()
1600 qib_send_complete(qp, wqe, status); qib_rc_rcv_resp()
1601 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_rc_rcv_resp()
1604 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rcv_resp()
1613 * @qp: the QP for this packet
1626 struct qib_qp *qp, qib_rc_rcv_error()
1632 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_rc_rcv_error()
1644 if (!qp->r_nak_state) { qib_rc_rcv_error()
1646 qp->r_nak_state = IB_NAK_PSN_ERROR; qib_rc_rcv_error()
1648 qp->r_ack_psn = qp->r_psn; qib_rc_rcv_error()
1654 if (list_empty(&qp->rspwait)) { qib_rc_rcv_error()
1655 qp->r_flags |= QIB_R_RSP_NAK; qib_rc_rcv_error()
1656 atomic_inc(&qp->refcount); qib_rc_rcv_error()
1657 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); qib_rc_rcv_error()
1683 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rcv_error()
1685 for (i = qp->r_head_ack_queue; ; i = prev) { qib_rc_rcv_error()
1686 if (i == qp->s_tail_ack_queue) qib_rc_rcv_error()
1692 if (prev == qp->r_head_ack_queue) { qib_rc_rcv_error()
1696 e = &qp->s_ack_queue[prev]; qib_rc_rcv_error()
1702 if (prev == qp->s_tail_ack_queue && qib_rc_rcv_error()
1730 qp->pmtu; OP()
1743 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, OP()
1755 qp->s_tail_ack_queue = prev; OP()
1768 qp->s_tail_ack_queue = prev; OP()
1783 if (i == qp->r_head_ack_queue) {
1784 spin_unlock_irqrestore(&qp->s_lock, flags);
1785 qp->r_nak_state = 0;
1786 qp->r_ack_psn = qp->r_psn - 1;
1794 if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
1795 spin_unlock_irqrestore(&qp->s_lock, flags);
1796 qp->r_nak_state = 0;
1797 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1804 qp->s_tail_ack_queue = i;
1807 qp->s_ack_state = OP(ACKNOWLEDGE);
1808 qp->s_flags |= QIB_S_RESP_PENDING;
1809 qp->r_nak_state = 0;
1810 qib_schedule_send(qp);
1813 spin_unlock_irqrestore(&qp->s_lock, flags);
1821 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) qib_rc_error() argument
1826 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_error()
1827 lastwqe = qib_error_qp(qp, err); qib_rc_error()
1828 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_error()
1833 ev.device = qp->ibqp.device; qib_rc_error()
1834 ev.element.qp = &qp->ibqp; qib_rc_error()
1836 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_rc_error()
1840 static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) qib_update_ack_queue() argument
1847 qp->s_tail_ack_queue = next; qib_update_ack_queue()
1848 qp->s_ack_state = OP(ACKNOWLEDGE); qib_update_ack_queue()
1858 * @qp: the QP for this packet
1865 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_rc_rcv()
1874 u32 pmtu = qp->pmtu; qib_rc_rcv()
1890 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) qib_rc_rcv()
1904 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, qib_rc_rcv()
1910 diff = qib_cmp24(psn, qp->r_psn); qib_rc_rcv()
1912 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) qib_rc_rcv()
1918 switch (qp->r_state) { qib_rc_rcv()
1951 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qib_rc_rcv()
1952 qp->r_flags |= QIB_R_COMM_EST; qib_rc_rcv()
1953 if (qp->ibqp.event_handler) { qib_rc_rcv()
1956 ev.device = qp->ibqp.device; qib_rc_rcv()
1957 ev.element.qp = &qp->ibqp; qib_rc_rcv()
1959 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_rc_rcv()
1966 ret = qib_get_rwqe(qp, 0); qib_rc_rcv()
1971 qp->r_rcv_len = 0; qib_rc_rcv()
1979 qp->r_rcv_len += pmtu; qib_rc_rcv()
1980 if (unlikely(qp->r_rcv_len > qp->r_len)) qib_rc_rcv()
1982 qib_copy_sge(&qp->r_sge, data, pmtu, 1); qib_rc_rcv()
1987 ret = qib_get_rwqe(qp, 1); qib_rc_rcv()
1996 ret = qib_get_rwqe(qp, 0); qib_rc_rcv()
2001 qp->r_rcv_len = 0; qib_rc_rcv()
2025 wc.byte_len = tlen + qp->r_rcv_len; qib_rc_rcv()
2026 if (unlikely(wc.byte_len > qp->r_len)) qib_rc_rcv()
2028 qib_copy_sge(&qp->r_sge, data, tlen, 1); qib_rc_rcv()
2029 qib_put_ss(&qp->r_sge); qib_rc_rcv()
2030 qp->r_msn++; qib_rc_rcv()
2031 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_rc_rcv()
2033 wc.wr_id = qp->r_wr_id; qib_rc_rcv()
2040 wc.qp = &qp->ibqp; qib_rc_rcv()
2041 wc.src_qp = qp->remote_qpn; qib_rc_rcv()
2042 wc.slid = qp->remote_ah_attr.dlid; qib_rc_rcv()
2043 wc.sl = qp->remote_ah_attr.sl; qib_rc_rcv()
2050 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_rc_rcv()
2058 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_rc_rcv()
2063 qp->r_len = be32_to_cpu(reth->length); qib_rc_rcv()
2064 qp->r_rcv_len = 0; qib_rc_rcv()
2065 qp->r_sge.sg_list = NULL; qib_rc_rcv()
2066 if (qp->r_len != 0) { qib_rc_rcv()
2072 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, qib_rc_rcv()
2076 qp->r_sge.num_sge = 1; qib_rc_rcv()
2078 qp->r_sge.num_sge = 0; qib_rc_rcv()
2079 qp->r_sge.sge.mr = NULL; qib_rc_rcv()
2080 qp->r_sge.sge.vaddr = NULL; qib_rc_rcv()
2081 qp->r_sge.sge.length = 0; qib_rc_rcv()
2082 qp->r_sge.sge.sge_length = 0; qib_rc_rcv()
2088 ret = qib_get_rwqe(qp, 1); qib_rc_rcv()
2103 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) OP()
2105 next = qp->r_head_ack_queue + 1; OP()
2109 spin_lock_irqsave(&qp->s_lock, flags); OP()
2110 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
2111 if (!qp->s_ack_queue[next].sent) OP()
2113 qib_update_ack_queue(qp, next); OP()
2115 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
2128 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, OP()
2137 qp->r_psn += (len - 1) / pmtu; OP()
2147 e->lpsn = qp->r_psn; OP()
2153 qp->r_msn++; OP()
2154 qp->r_psn++; OP()
2155 qp->r_state = opcode; OP()
2156 qp->r_nak_state = 0; OP()
2157 qp->r_head_ack_queue = next; OP()
2160 qp->s_flags |= QIB_S_RESP_PENDING; OP()
2161 qib_schedule_send(qp); OP()
2176 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) OP()
2178 next = qp->r_head_ack_queue + 1; OP()
2181 spin_lock_irqsave(&qp->s_lock, flags); OP()
2182 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
2183 if (!qp->s_ack_queue[next].sent) OP()
2185 qib_update_ack_queue(qp, next); OP()
2187 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
2199 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), OP()
2204 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; OP()
2208 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, OP()
2211 qib_put_mr(qp->r_sge.sge.mr); OP()
2212 qp->r_sge.num_sge = 0; OP()
2217 qp->r_msn++; OP()
2218 qp->r_psn++; OP()
2219 qp->r_state = opcode; OP()
2220 qp->r_nak_state = 0; OP()
2221 qp->r_head_ack_queue = next; OP()
2224 qp->s_flags |= QIB_S_RESP_PENDING; OP()
2225 qib_schedule_send(qp); OP()
2234 qp->r_psn++;
2235 qp->r_state = opcode;
2236 qp->r_ack_psn = psn;
2237 qp->r_nak_state = 0;
2244 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2245 qp->r_ack_psn = qp->r_psn;
2247 if (list_empty(&qp->rspwait)) {
2248 qp->r_flags |= QIB_R_RSP_NAK;
2249 atomic_inc(&qp->refcount);
2250 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2255 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2256 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2257 qp->r_ack_psn = qp->r_psn;
2259 if (list_empty(&qp->rspwait)) {
2260 qp->r_flags |= QIB_R_RSP_NAK;
2261 atomic_inc(&qp->refcount);
2262 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2267 spin_unlock_irqrestore(&qp->s_lock, flags);
2269 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2270 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2271 qp->r_ack_psn = qp->r_psn;
2273 if (list_empty(&qp->rspwait)) {
2274 qp->r_flags |= QIB_R_RSP_NAK;
2275 atomic_inc(&qp->refcount);
2276 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2281 spin_unlock_irqrestore(&qp->s_lock, flags);
2283 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2284 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2285 qp->r_ack_psn = qp->r_psn;
2287 qib_send_rc_ack(qp);
2291 spin_unlock_irqrestore(&qp->s_lock, flags);
1399 qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, struct qib_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) qib_rc_rcv_resp() argument
1624 qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, struct qib_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) qib_rc_rcv_error() argument
1864 qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_rc_rcv() argument
H A Dqib_uc.c42 * @qp: a pointer to the QP
46 int qib_make_uc_req(struct qib_qp *qp) qib_make_uc_req() argument
54 u32 pmtu = qp->pmtu; qib_make_uc_req()
57 spin_lock_irqsave(&qp->s_lock, flags); qib_make_uc_req()
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { qib_make_uc_req()
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_uc_req()
63 if (qp->s_last == qp->s_head) qib_make_uc_req()
66 if (atomic_read(&qp->s_dma_busy)) { qib_make_uc_req()
67 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_uc_req()
70 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_uc_req()
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_uc_req()
75 ohdr = &qp->s_hdr->u.oth; qib_make_uc_req()
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_make_uc_req()
77 ohdr = &qp->s_hdr->u.l.oth; qib_make_uc_req()
84 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_uc_req()
85 qp->s_wqe = NULL; qib_make_uc_req()
86 switch (qp->s_state) { qib_make_uc_req()
88 if (!(ib_qib_state_ops[qp->state] & qib_make_uc_req()
92 if (qp->s_cur == qp->s_head) qib_make_uc_req()
97 wqe->psn = qp->s_next_psn; qib_make_uc_req()
98 qp->s_psn = qp->s_next_psn; qib_make_uc_req()
99 qp->s_sge.sge = wqe->sg_list[0]; qib_make_uc_req()
100 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_uc_req()
101 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_uc_req()
102 qp->s_sge.total_len = wqe->length; qib_make_uc_req()
104 qp->s_len = len; qib_make_uc_req()
109 qp->s_state = OP(SEND_FIRST); qib_make_uc_req()
114 qp->s_state = OP(SEND_ONLY); qib_make_uc_req()
116 qp->s_state = qib_make_uc_req()
124 qp->s_wqe = wqe; qib_make_uc_req()
125 if (++qp->s_cur >= qp->s_size) qib_make_uc_req()
126 qp->s_cur = 0; qib_make_uc_req()
138 qp->s_state = OP(RDMA_WRITE_FIRST); qib_make_uc_req()
143 qp->s_state = OP(RDMA_WRITE_ONLY); qib_make_uc_req()
145 qp->s_state = qib_make_uc_req()
153 qp->s_wqe = wqe; qib_make_uc_req()
154 if (++qp->s_cur >= qp->s_size) qib_make_uc_req()
155 qp->s_cur = 0; qib_make_uc_req()
164 qp->s_state = OP(SEND_MIDDLE); qib_make_uc_req()
167 len = qp->s_len; qib_make_uc_req()
173 qp->s_state = OP(SEND_LAST); qib_make_uc_req()
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); qib_make_uc_req()
182 qp->s_wqe = wqe; qib_make_uc_req()
183 if (++qp->s_cur >= qp->s_size) qib_make_uc_req()
184 qp->s_cur = 0; qib_make_uc_req()
188 qp->s_state = OP(RDMA_WRITE_MIDDLE); qib_make_uc_req()
191 len = qp->s_len; qib_make_uc_req()
197 qp->s_state = OP(RDMA_WRITE_LAST); qib_make_uc_req()
199 qp->s_state = qib_make_uc_req()
207 qp->s_wqe = wqe; qib_make_uc_req()
208 if (++qp->s_cur >= qp->s_size) qib_make_uc_req()
209 qp->s_cur = 0; qib_make_uc_req()
212 qp->s_len -= len; qib_make_uc_req()
213 qp->s_hdrwords = hwords; qib_make_uc_req()
214 qp->s_cur_sge = &qp->s_sge; qib_make_uc_req()
215 qp->s_cur_size = len; qib_make_uc_req()
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), qib_make_uc_req()
217 qp->s_next_psn++ & QIB_PSN_MASK); qib_make_uc_req()
223 qp->s_flags &= ~QIB_S_BUSY; qib_make_uc_req()
225 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_uc_req()
236 * @qp: the QP for this packet.
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_uc_rcv()
251 u32 pmtu = qp->pmtu; qib_uc_rcv()
265 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) qib_uc_rcv()
272 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { qib_uc_rcv()
277 qp->r_psn = psn; qib_uc_rcv()
279 if (qp->r_state == OP(SEND_FIRST) || qib_uc_rcv()
280 qp->r_state == OP(SEND_MIDDLE)) { qib_uc_rcv()
281 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); qib_uc_rcv()
282 qp->r_sge.num_sge = 0; qib_uc_rcv()
284 qib_put_ss(&qp->r_sge); qib_uc_rcv()
285 qp->r_state = OP(SEND_LAST); qib_uc_rcv()
303 switch (qp->r_state) { qib_uc_rcv()
331 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qib_uc_rcv()
332 qp->r_flags |= QIB_R_COMM_EST; qib_uc_rcv()
333 if (qp->ibqp.event_handler) { qib_uc_rcv()
336 ev.device = qp->ibqp.device; qib_uc_rcv()
337 ev.element.qp = &qp->ibqp; qib_uc_rcv()
339 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_uc_rcv()
349 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) qib_uc_rcv()
350 qp->r_sge = qp->s_rdma_read_sge; qib_uc_rcv()
352 ret = qib_get_rwqe(qp, 0); qib_uc_rcv()
358 * qp->s_rdma_read_sge will be the owner qib_uc_rcv()
361 qp->s_rdma_read_sge = qp->r_sge; qib_uc_rcv()
363 qp->r_rcv_len = 0; qib_uc_rcv()
373 qp->r_rcv_len += pmtu; qib_uc_rcv()
374 if (unlikely(qp->r_rcv_len > qp->r_len)) qib_uc_rcv()
376 qib_copy_sge(&qp->r_sge, data, pmtu, 0); qib_uc_rcv()
398 wc.byte_len = tlen + qp->r_rcv_len; qib_uc_rcv()
399 if (unlikely(wc.byte_len > qp->r_len)) qib_uc_rcv()
402 qib_copy_sge(&qp->r_sge, data, tlen, 0); qib_uc_rcv()
403 qib_put_ss(&qp->s_rdma_read_sge); qib_uc_rcv()
405 wc.wr_id = qp->r_wr_id; qib_uc_rcv()
407 wc.qp = &qp->ibqp; qib_uc_rcv()
408 wc.src_qp = qp->remote_qpn; qib_uc_rcv()
409 wc.slid = qp->remote_ah_attr.dlid; qib_uc_rcv()
410 wc.sl = qp->remote_ah_attr.sl; qib_uc_rcv()
417 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_uc_rcv()
426 if (unlikely(!(qp->qp_access_flags & OP()
432 qp->r_len = be32_to_cpu(reth->length);
433 qp->r_rcv_len = 0;
434 qp->r_sge.sg_list = NULL;
435 if (qp->r_len != 0) {
441 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
445 qp->r_sge.num_sge = 1;
447 qp->r_sge.num_sge = 0;
448 qp->r_sge.sge.mr = NULL;
449 qp->r_sge.sge.vaddr = NULL;
450 qp->r_sge.sge.length = 0;
451 qp->r_sge.sge.sge_length = 0;
464 qp->r_rcv_len += pmtu;
465 if (unlikely(qp->r_rcv_len > qp->r_len))
467 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
484 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
486 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
487 qib_put_ss(&qp->s_rdma_read_sge);
489 ret = qib_get_rwqe(qp, 1);
495 wc.byte_len = qp->r_len;
497 qib_copy_sge(&qp->r_sge, data, tlen, 1);
498 qib_put_ss(&qp->r_sge);
511 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
513 qib_copy_sge(&qp->r_sge, data, tlen, 1);
514 qib_put_ss(&qp->r_sge);
521 qp->r_psn++;
522 qp->r_state = opcode;
526 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
527 qp->r_sge.num_sge = 0;
533 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
242 qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_uc_rcv() argument
H A Dqib_ruc.c82 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) qib_init_sge() argument
90 rkt = &to_idev(qp->ibqp.device)->lk_table; qib_init_sge()
91 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); qib_init_sge()
92 ss = &qp->r_sge; qib_init_sge()
93 ss->sg_list = qp->r_sg_list; qib_init_sge()
94 qp->r_len = 0; qib_init_sge()
102 qp->r_len += wqe->sg_list[i].length; qib_init_sge()
106 ss->total_len = qp->r_len; qib_init_sge()
121 wc.qp = &qp->ibqp; qib_init_sge()
123 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_init_sge()
131 * @qp: the QP
132 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
139 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) qib_get_rwqe() argument
150 if (qp->ibqp.srq) { qib_get_rwqe()
151 srq = to_isrq(qp->ibqp.srq); qib_get_rwqe()
157 rq = &qp->r_rq; qib_get_rwqe()
161 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_get_rwqe()
186 if (!wr_id_only && !qib_init_sge(qp, wqe)) { qib_get_rwqe()
190 qp->r_wr_id = wqe->wr_id; qib_get_rwqe()
193 set_bit(QIB_R_WRID_VALID, &qp->r_aflags); qib_get_rwqe()
213 ev.device = qp->ibqp.device; qib_get_rwqe()
214 ev.element.srq = qp->ibqp.srq; qib_get_rwqe()
230 void qib_migrate_qp(struct qib_qp *qp) qib_migrate_qp() argument
234 qp->s_mig_state = IB_MIG_MIGRATED; qib_migrate_qp()
235 qp->remote_ah_attr = qp->alt_ah_attr; qib_migrate_qp()
236 qp->port_num = qp->alt_ah_attr.port_num; qib_migrate_qp()
237 qp->s_pkey_index = qp->s_alt_pkey_index; qib_migrate_qp()
239 ev.device = qp->ibqp.device; qib_migrate_qp()
240 ev.element.qp = &qp->ibqp; qib_migrate_qp()
242 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_migrate_qp()
269 int has_grh, struct qib_qp *qp, u32 bth0) qib_ruc_check_hdr()
274 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { qib_ruc_check_hdr()
276 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) qib_ruc_check_hdr()
279 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) qib_ruc_check_hdr()
281 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); qib_ruc_check_hdr()
285 qp->alt_ah_attr.grh.dgid.global.subnet_prefix, qib_ruc_check_hdr()
286 qp->alt_ah_attr.grh.dgid.global.interface_id)) qib_ruc_check_hdr()
290 qib_get_pkey(ibp, qp->s_alt_pkey_index))) { qib_ruc_check_hdr()
294 0, qp->ibqp.qp_num, qib_ruc_check_hdr()
299 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || qib_ruc_check_hdr()
300 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) qib_ruc_check_hdr()
302 spin_lock_irqsave(&qp->s_lock, flags); qib_ruc_check_hdr()
303 qib_migrate_qp(qp); qib_ruc_check_hdr()
304 spin_unlock_irqrestore(&qp->s_lock, flags); qib_ruc_check_hdr()
307 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_ruc_check_hdr()
310 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) qib_ruc_check_hdr()
313 qp->remote_ah_attr.grh.sgid_index); qib_ruc_check_hdr()
317 qp->remote_ah_attr.grh.dgid.global.subnet_prefix, qib_ruc_check_hdr()
318 qp->remote_ah_attr.grh.dgid.global.interface_id)) qib_ruc_check_hdr()
322 qib_get_pkey(ibp, qp->s_pkey_index))) { qib_ruc_check_hdr()
326 0, qp->ibqp.qp_num, qib_ruc_check_hdr()
331 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || qib_ruc_check_hdr()
332 ppd_from_ibp(ibp)->port != qp->port_num) qib_ruc_check_hdr()
334 if (qp->s_mig_state == IB_MIG_REARM && qib_ruc_check_hdr()
336 qp->s_mig_state = IB_MIG_ARMED; qib_ruc_check_hdr()
359 struct qib_qp *qp; qib_ruc_loopback() local
374 qp = qib_lookup_qpn(ibp, sqp->remote_qpn); qib_ruc_loopback()
410 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || qib_ruc_loopback()
411 qp->ibqp.qp_type != sqp->ibqp.qp_type) { qib_ruc_loopback()
438 ret = qib_get_rwqe(qp, 0); qib_ruc_loopback()
446 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_ruc_loopback()
450 ret = qib_get_rwqe(qp, 1); qib_ruc_loopback()
457 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_ruc_loopback()
461 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, qib_ruc_loopback()
466 qp->r_sge.sg_list = NULL; qib_ruc_loopback()
467 qp->r_sge.num_sge = 1; qib_ruc_loopback()
468 qp->r_sge.total_len = wqe->length; qib_ruc_loopback()
472 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) qib_ruc_loopback()
474 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, qib_ruc_loopback()
482 qp->r_sge.sge = wqe->sg_list[0]; qib_ruc_loopback()
483 qp->r_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback()
484 qp->r_sge.num_sge = wqe->wr.num_sge; qib_ruc_loopback()
485 qp->r_sge.total_len = wqe->length; qib_ruc_loopback()
490 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) qib_ruc_loopback()
492 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), qib_ruc_loopback()
498 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; qib_ruc_loopback()
503 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, qib_ruc_loopback()
505 qib_put_mr(qp->r_sge.sge.mr); qib_ruc_loopback()
506 qp->r_sge.num_sge = 0; qib_ruc_loopback()
523 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); qib_ruc_loopback()
546 qib_put_ss(&qp->r_sge); qib_ruc_loopback()
548 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ruc_loopback()
555 wc.wr_id = qp->r_wr_id; qib_ruc_loopback()
558 wc.qp = &qp->ibqp; qib_ruc_loopback()
559 wc.src_qp = qp->remote_qpn; qib_ruc_loopback()
560 wc.slid = qp->remote_ah_attr.dlid; qib_ruc_loopback()
561 wc.sl = qp->remote_ah_attr.sl; qib_ruc_loopback()
564 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ruc_loopback()
577 if (qp->ibqp.qp_type == IB_QPT_UC) qib_ruc_loopback()
596 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); qib_ruc_loopback()
615 qib_rc_error(qp, wc.status); qib_ruc_loopback()
629 ev.element.qp = &sqp->ibqp; qib_ruc_loopback()
640 if (qp && atomic_dec_and_test(&qp->refcount)) qib_ruc_loopback()
641 wake_up(&qp->wait); qib_ruc_loopback()
675 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, qib_make_ruc_header() argument
678 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_make_ruc_header()
684 extra_bytes = -qp->s_cur_size & 3; qib_make_ruc_header()
685 nwords = (qp->s_cur_size + extra_bytes) >> 2; qib_make_ruc_header()
687 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qib_make_ruc_header()
688 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qib_make_ruc_header()
689 &qp->remote_ah_attr.grh, qib_make_ruc_header()
690 qp->s_hdrwords, nwords); qib_make_ruc_header()
693 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qib_make_ruc_header()
694 qp->remote_ah_attr.sl << 4; qib_make_ruc_header()
695 qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); qib_make_ruc_header()
696 qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qib_make_ruc_header()
697 qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qib_make_ruc_header()
698 qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qib_make_ruc_header()
699 qp->remote_ah_attr.src_path_bits); qib_make_ruc_header()
700 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); qib_make_ruc_header()
702 if (qp->s_mig_state == IB_MIG_MIGRATED) qib_make_ruc_header()
705 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); qib_make_ruc_header()
720 struct qib_qp *qp = container_of(work, struct qib_qp, s_work); qib_do_send() local
721 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_do_send()
723 int (*make_req)(struct qib_qp *qp); qib_do_send()
726 if ((qp->ibqp.qp_type == IB_QPT_RC || qib_do_send()
727 qp->ibqp.qp_type == IB_QPT_UC) && qib_do_send()
728 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { qib_do_send()
729 qib_ruc_loopback(qp); qib_do_send()
733 if (qp->ibqp.qp_type == IB_QPT_RC) qib_do_send()
735 else if (qp->ibqp.qp_type == IB_QPT_UC) qib_do_send()
740 spin_lock_irqsave(&qp->s_lock, flags); qib_do_send()
743 if (!qib_send_ok(qp)) { qib_do_send()
744 spin_unlock_irqrestore(&qp->s_lock, flags); qib_do_send()
748 qp->s_flags |= QIB_S_BUSY; qib_do_send()
750 spin_unlock_irqrestore(&qp->s_lock, flags); qib_do_send()
754 if (qp->s_hdrwords != 0) { qib_do_send()
759 if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, qib_do_send()
760 qp->s_cur_sge, qp->s_cur_size)) qib_do_send()
763 qp->s_hdrwords = 0; qib_do_send()
765 } while (make_req(qp)); qib_do_send()
771 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, qib_send_complete() argument
777 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) qib_send_complete()
785 if (qp->ibqp.qp_type == IB_QPT_UD || qib_send_complete()
786 qp->ibqp.qp_type == IB_QPT_SMI || qib_send_complete()
787 qp->ibqp.qp_type == IB_QPT_GSI) qib_send_complete()
791 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || qib_send_complete()
800 wc.qp = &qp->ibqp; qib_send_complete()
803 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, qib_send_complete()
807 last = qp->s_last; qib_send_complete()
809 if (++last >= qp->s_size) qib_send_complete()
811 qp->s_last = last; qib_send_complete()
812 if (qp->s_acked == old_last) qib_send_complete()
813 qp->s_acked = last; qib_send_complete()
814 if (qp->s_cur == old_last) qib_send_complete()
815 qp->s_cur = last; qib_send_complete()
816 if (qp->s_tail == old_last) qib_send_complete()
817 qp->s_tail = last; qib_send_complete()
818 if (qp->state == IB_QPS_SQD && last == qp->s_cur) qib_send_complete()
819 qp->s_draining = 0; qib_send_complete()
268 qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) qib_ruc_check_hdr() argument
H A Dqib_ud.c53 struct qib_qp *qp; qib_ud_loopback() local
62 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); qib_ud_loopback()
63 if (!qp) { qib_ud_loopback()
70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_loopback()
71 IB_QPT_UD : qp->ibqp.qp_type; qib_ud_loopback()
74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_ud_loopback()
82 if (qp->ibqp.qp_num > 1) { qib_ud_loopback()
88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); qib_ud_loopback()
94 sqp->ibqp.qp_num, qp->ibqp.qp_num, qib_ud_loopback()
106 if (qp->ibqp.qp_num) { qib_ud_loopback()
111 if (unlikely(qkey != qp->qkey)) { qib_ud_loopback()
118 sqp->ibqp.qp_num, qp->ibqp.qp_num, qib_ud_loopback()
138 spin_lock_irqsave(&qp->r_lock, flags); qib_ud_loopback()
143 if (qp->r_flags & QIB_R_REUSE_SGE) qib_ud_loopback()
144 qp->r_flags &= ~QIB_R_REUSE_SGE; qib_ud_loopback()
148 ret = qib_get_rwqe(qp, 0); qib_ud_loopback()
150 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qib_ud_loopback()
154 if (qp->ibqp.qp_num == 0) qib_ud_loopback()
160 if (unlikely(wc.byte_len > qp->r_len)) { qib_ud_loopback()
161 qp->r_flags |= QIB_R_REUSE_SGE; qib_ud_loopback()
167 qib_copy_sge(&qp->r_sge, &ah_attr->grh, qib_ud_loopback()
171 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_ud_loopback()
184 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); qib_ud_loopback()
204 qib_put_ss(&qp->r_sge); qib_ud_loopback()
205 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ud_loopback()
207 wc.wr_id = qp->r_wr_id; qib_ud_loopback()
210 wc.qp = &qp->ibqp; qib_ud_loopback()
212 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_loopback()
217 wc.port_num = qp->port_num; qib_ud_loopback()
219 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ud_loopback()
223 spin_unlock_irqrestore(&qp->r_lock, flags); qib_ud_loopback()
225 if (atomic_dec_and_test(&qp->refcount)) qib_ud_loopback()
226 wake_up(&qp->wait); qib_ud_loopback()
231 * @qp: the QP
235 int qib_make_ud_req(struct qib_qp *qp) qib_make_ud_req() argument
251 spin_lock_irqsave(&qp->s_lock, flags); qib_make_ud_req()
253 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) { qib_make_ud_req()
254 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_ud_req()
257 if (qp->s_last == qp->s_head) qib_make_ud_req()
260 if (atomic_read(&qp->s_dma_busy)) { qib_make_ud_req()
261 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_ud_req()
264 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_ud_req()
265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_ud_req()
269 if (qp->s_cur == qp->s_head) qib_make_ud_req()
272 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_ud_req()
273 next_cur = qp->s_cur + 1; qib_make_ud_req()
274 if (next_cur >= qp->s_size) qib_make_ud_req()
278 ibp = to_iport(qp->ibqp.device, qp->port_num); qib_make_ud_req()
297 if (atomic_read(&qp->s_dma_busy)) { qib_make_ud_req()
298 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_ud_req()
301 qp->s_cur = next_cur; qib_make_ud_req()
302 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_ud_req()
303 qib_ud_loopback(qp, wqe); qib_make_ud_req()
304 spin_lock_irqsave(&qp->s_lock, flags); qib_make_ud_req()
305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); qib_make_ud_req()
310 qp->s_cur = next_cur; qib_make_ud_req()
315 qp->s_hdrwords = 7; qib_make_ud_req()
316 qp->s_cur_size = wqe->length; qib_make_ud_req()
317 qp->s_cur_sge = &qp->s_sge; qib_make_ud_req()
318 qp->s_srate = ah_attr->static_rate; qib_make_ud_req()
319 qp->s_wqe = wqe; qib_make_ud_req()
320 qp->s_sge.sge = wqe->sg_list[0]; qib_make_ud_req()
321 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_ud_req()
322 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_ud_req()
323 qp->s_sge.total_len = wqe->length; qib_make_ud_req()
327 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qib_make_ud_req()
329 qp->s_hdrwords, nwords); qib_make_ud_req()
331 ohdr = &qp->s_hdr->u.l.oth; qib_make_ud_req()
339 ohdr = &qp->s_hdr->u.oth; qib_make_ud_req()
342 qp->s_hdrwords++; qib_make_ud_req()
348 if (qp->ibqp.qp_type == IB_QPT_SMI) qib_make_ud_req()
352 qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); qib_make_ud_req()
353 qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ qib_make_ud_req()
354 qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qib_make_ud_req()
358 qp->s_hdr->lrh[3] = cpu_to_be16(lid); qib_make_ud_req()
360 qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE; qib_make_ud_req()
364 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : qib_make_ud_req()
365 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? qib_make_ud_req()
366 wqe->ud_wr.pkey_index : qp->s_pkey_index); qib_make_ud_req()
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); qib_make_ud_req()
381 qp->qkey : wqe->ud_wr.remote_qkey); qib_make_ud_req()
382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); qib_make_ud_req()
389 qp->s_flags &= ~QIB_S_BUSY; qib_make_ud_req()
391 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_ud_req()
422 * @qp: the QP the packet came on
429 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_ud_rcv()
465 if (qp->ibqp.qp_num) { qib_ud_rcv()
469 if (qp->ibqp.qp_num > 1) { qib_ud_rcv()
473 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); qib_ud_rcv()
479 src_qp, qp->ibqp.qp_num, qib_ud_rcv()
484 if (unlikely(qkey != qp->qkey)) { qib_ud_rcv()
487 src_qp, qp->ibqp.qp_num, qib_ud_rcv()
492 if (unlikely(qp->ibqp.qp_num == 1 && qib_ud_rcv()
514 if (qp->ibqp.qp_num > 1 && qib_ud_rcv()
534 if (qp->r_flags & QIB_R_REUSE_SGE) qib_ud_rcv()
535 qp->r_flags &= ~QIB_R_REUSE_SGE; qib_ud_rcv()
539 ret = qib_get_rwqe(qp, 0); qib_ud_rcv()
541 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qib_ud_rcv()
545 if (qp->ibqp.qp_num == 0) qib_ud_rcv()
551 if (unlikely(wc.byte_len > qp->r_len)) { qib_ud_rcv()
552 qp->r_flags |= QIB_R_REUSE_SGE; qib_ud_rcv()
556 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, qib_ud_rcv()
560 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_ud_rcv()
561 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); qib_ud_rcv()
562 qib_put_ss(&qp->r_sge); qib_ud_rcv()
563 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ud_rcv()
565 wc.wr_id = qp->r_wr_id; qib_ud_rcv()
569 wc.qp = &qp->ibqp; qib_ud_rcv()
571 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_rcv()
581 wc.port_num = qp->port_num; qib_ud_rcv()
583 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ud_rcv()
428 qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_ud_rcv() argument
H A Dqib_verbs.c334 * @qp: the QP to post on
337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, qib_post_one_send() argument
350 spin_lock_irqsave(&qp->s_lock, flags); qib_post_one_send()
353 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) qib_post_one_send()
357 if (wr->num_sge > qp->s_max_sge) qib_post_one_send()
366 if (qib_reg_mr(qp, reg_wr(wr))) qib_post_one_send()
368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { qib_post_one_send()
371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { qib_post_one_send()
377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) qib_post_one_send()
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) qib_post_one_send()
389 next = qp->s_head + 1; qib_post_one_send()
390 if (next >= qp->s_size) qib_post_one_send()
392 if (next == qp->s_last) { qib_post_one_send()
397 rkt = &to_idev(qp->ibqp.device)->lk_table; qib_post_one_send()
398 pd = to_ipd(qp->ibqp.pd); qib_post_one_send()
399 wqe = get_swqe_ptr(qp, qp->s_head); qib_post_one_send()
401 if (qp->ibqp.qp_type != IB_QPT_UC && qib_post_one_send()
402 qp->ibqp.qp_type != IB_QPT_RC) qib_post_one_send()
437 if (qp->ibqp.qp_type == IB_QPT_UC || qib_post_one_send()
438 qp->ibqp.qp_type == IB_QPT_RC) { qib_post_one_send()
441 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + qib_post_one_send()
442 qp->port_num - 1)->ibmtu) qib_post_one_send()
446 wqe->ssn = qp->s_ssn++; qib_post_one_send()
447 qp->s_head = next; qib_post_one_send()
463 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { qib_post_one_send()
464 qib_schedule_send(qp); qib_post_one_send()
467 spin_unlock_irqrestore(&qp->s_lock, flags); qib_post_one_send()
482 struct qib_qp *qp = to_iqp(ibqp); qib_post_send() local
487 err = qib_post_one_send(qp, wr, &scheduled); qib_post_send()
496 qib_do_send(&qp->s_work); qib_post_send()
513 struct qib_qp *qp = to_iqp(ibqp); qib_post_receive() local
514 struct qib_rwq *wq = qp->r_rq.wq; qib_post_receive()
519 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { qib_post_receive()
530 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { qib_post_receive()
536 spin_lock_irqsave(&qp->r_rq.lock, flags); qib_post_receive()
538 if (next >= qp->r_rq.size) qib_post_receive()
541 spin_unlock_irqrestore(&qp->r_rq.lock, flags); qib_post_receive()
547 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); qib_post_receive()
555 spin_unlock_irqrestore(&qp->r_rq.lock, flags); qib_post_receive()
570 * @qp: the QP the packet came on
577 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_qp_rcv()
581 spin_lock(&qp->r_lock); qib_qp_rcv()
584 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_qp_rcv()
589 switch (qp->ibqp.qp_type) { qib_qp_rcv()
596 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); qib_qp_rcv()
600 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); qib_qp_rcv()
604 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); qib_qp_rcv()
612 spin_unlock(&qp->r_lock); qib_qp_rcv()
631 struct qib_qp *qp; qib_ib_rcv() local
684 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); qib_ib_rcv()
702 qp = qib_lookup_qpn(ibp, qp_num); qib_ib_rcv()
703 if (!qp) qib_ib_rcv()
705 rcd->lookaside_qp = qp; qib_ib_rcv()
708 qp = rcd->lookaside_qp; qib_ib_rcv()
710 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); qib_ib_rcv()
726 struct qib_qp *qp = NULL; mem_timer() local
731 qp = list_entry(list->next, struct qib_qp, iowait); mem_timer()
732 list_del_init(&qp->iowait); mem_timer()
733 atomic_inc(&qp->refcount); mem_timer()
739 if (qp) { mem_timer()
740 spin_lock_irqsave(&qp->s_lock, flags); mem_timer()
741 if (qp->s_flags & QIB_S_WAIT_KMEM) { mem_timer()
742 qp->s_flags &= ~QIB_S_WAIT_KMEM; mem_timer()
743 qib_schedule_send(qp); mem_timer()
745 spin_unlock_irqrestore(&qp->s_lock, flags); mem_timer()
746 if (atomic_dec_and_test(&qp->refcount)) mem_timer()
747 wake_up(&qp->wait); mem_timer()
945 struct qib_qp *qp) __get_txreq()
950 spin_lock_irqsave(&qp->s_lock, flags); __get_txreq()
958 spin_unlock_irqrestore(&qp->s_lock, flags); __get_txreq()
961 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && __get_txreq()
962 list_empty(&qp->iowait)) { __get_txreq()
964 qp->s_flags |= QIB_S_WAIT_TX; __get_txreq()
965 list_add_tail(&qp->iowait, &dev->txwait); __get_txreq()
967 qp->s_flags &= ~QIB_S_BUSY; __get_txreq()
969 spin_unlock_irqrestore(&qp->s_lock, flags); __get_txreq()
976 struct qib_qp *qp) get_txreq()
992 tx = __get_txreq(dev, qp); get_txreq()
1000 struct qib_qp *qp; qib_put_txreq() local
1003 qp = tx->qp; qib_put_txreq()
1004 dev = to_idev(qp->ibqp.device); qib_put_txreq()
1006 if (atomic_dec_and_test(&qp->refcount)) qib_put_txreq()
1007 wake_up(&qp->wait); qib_put_txreq()
1027 qp = list_entry(dev->txwait.next, struct qib_qp, iowait); qib_put_txreq()
1028 list_del_init(&qp->iowait); qib_put_txreq()
1029 atomic_inc(&qp->refcount); qib_put_txreq()
1032 spin_lock_irqsave(&qp->s_lock, flags); qib_put_txreq()
1033 if (qp->s_flags & QIB_S_WAIT_TX) { qib_put_txreq()
1034 qp->s_flags &= ~QIB_S_WAIT_TX; qib_put_txreq()
1035 qib_schedule_send(qp); qib_put_txreq()
1037 spin_unlock_irqrestore(&qp->s_lock, flags); qib_put_txreq()
1039 if (atomic_dec_and_test(&qp->refcount)) qib_put_txreq()
1040 wake_up(&qp->wait); qib_put_txreq()
1053 struct qib_qp *qp, *nqp; qib_verbs_sdma_desc_avail() local
1063 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { qib_verbs_sdma_desc_avail()
1064 if (qp->port_num != ppd->port) qib_verbs_sdma_desc_avail()
1068 if (qp->s_tx->txreq.sg_count > avail) qib_verbs_sdma_desc_avail()
1070 avail -= qp->s_tx->txreq.sg_count; qib_verbs_sdma_desc_avail()
1071 list_del_init(&qp->iowait); qib_verbs_sdma_desc_avail()
1072 atomic_inc(&qp->refcount); qib_verbs_sdma_desc_avail()
1073 qps[n++] = qp; qib_verbs_sdma_desc_avail()
1079 qp = qps[i]; qib_verbs_sdma_desc_avail()
1080 spin_lock(&qp->s_lock); qib_verbs_sdma_desc_avail()
1081 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { qib_verbs_sdma_desc_avail()
1082 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; qib_verbs_sdma_desc_avail()
1083 qib_schedule_send(qp); qib_verbs_sdma_desc_avail()
1085 spin_unlock(&qp->s_lock); qib_verbs_sdma_desc_avail()
1086 if (atomic_dec_and_test(&qp->refcount)) qib_verbs_sdma_desc_avail()
1087 wake_up(&qp->wait); qib_verbs_sdma_desc_avail()
1098 struct qib_qp *qp = tx->qp; sdma_complete() local
1100 spin_lock(&qp->s_lock); sdma_complete()
1102 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); sdma_complete()
1103 else if (qp->ibqp.qp_type == IB_QPT_RC) { sdma_complete()
1109 struct qib_ibdev *dev = to_idev(qp->ibqp.device); sdma_complete()
1113 qib_rc_send_complete(qp, hdr); sdma_complete()
1115 if (atomic_dec_and_test(&qp->s_dma_busy)) { sdma_complete()
1116 if (qp->state == IB_QPS_RESET) sdma_complete()
1117 wake_up(&qp->wait_dma); sdma_complete()
1118 else if (qp->s_flags & QIB_S_WAIT_DMA) { sdma_complete()
1119 qp->s_flags &= ~QIB_S_WAIT_DMA; sdma_complete()
1120 qib_schedule_send(qp); sdma_complete()
1123 spin_unlock(&qp->s_lock); sdma_complete()
1128 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) wait_kmem() argument
1133 spin_lock_irqsave(&qp->s_lock, flags); wait_kmem()
1134 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { wait_kmem()
1136 if (list_empty(&qp->iowait)) { wait_kmem()
1139 qp->s_flags |= QIB_S_WAIT_KMEM; wait_kmem()
1140 list_add_tail(&qp->iowait, &dev->memwait); wait_kmem()
1143 qp->s_flags &= ~QIB_S_BUSY; wait_kmem()
1146 spin_unlock_irqrestore(&qp->s_lock, flags); wait_kmem()
1151 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, qib_verbs_send_dma() argument
1155 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_verbs_send_dma()
1157 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_verbs_send_dma()
1165 tx = qp->s_tx; qib_verbs_send_dma()
1167 qp->s_tx = NULL; qib_verbs_send_dma()
1173 tx = get_txreq(dev, qp); qib_verbs_send_dma()
1177 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, qib_verbs_send_dma()
1179 tx->qp = qp; qib_verbs_send_dma()
1180 atomic_inc(&qp->refcount); qib_verbs_send_dma()
1181 tx->wqe = qp->s_wqe; qib_verbs_send_dma()
1182 tx->mr = qp->s_rdma_mr; qib_verbs_send_dma()
1183 if (qp->s_rdma_mr) qib_verbs_send_dma()
1184 qp->s_rdma_mr = NULL; qib_verbs_send_dma()
1241 ret = wait_kmem(dev, qp); qib_verbs_send_dma()
1255 static int no_bufs_available(struct qib_qp *qp) no_bufs_available() argument
1257 struct qib_ibdev *dev = to_idev(qp->ibqp.device); no_bufs_available()
1268 spin_lock_irqsave(&qp->s_lock, flags); no_bufs_available()
1269 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { no_bufs_available()
1271 if (list_empty(&qp->iowait)) { no_bufs_available()
1273 qp->s_flags |= QIB_S_WAIT_PIO; no_bufs_available()
1274 list_add_tail(&qp->iowait, &dev->piowait); no_bufs_available()
1279 qp->s_flags &= ~QIB_S_BUSY; no_bufs_available()
1282 spin_unlock_irqrestore(&qp->s_lock, flags); no_bufs_available()
1286 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, qib_verbs_send_pio() argument
1290 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_verbs_send_pio()
1291 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; qib_verbs_send_pio()
1301 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, qib_verbs_send_pio()
1306 return no_bufs_available(qp); qib_verbs_send_pio()
1367 if (qp->s_rdma_mr) { qib_verbs_send_pio()
1368 qib_put_mr(qp->s_rdma_mr); qib_verbs_send_pio()
1369 qp->s_rdma_mr = NULL; qib_verbs_send_pio()
1371 if (qp->s_wqe) { qib_verbs_send_pio()
1372 spin_lock_irqsave(&qp->s_lock, flags); qib_verbs_send_pio()
1373 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); qib_verbs_send_pio()
1374 spin_unlock_irqrestore(&qp->s_lock, flags); qib_verbs_send_pio()
1375 } else if (qp->ibqp.qp_type == IB_QPT_RC) { qib_verbs_send_pio()
1376 spin_lock_irqsave(&qp->s_lock, flags); qib_verbs_send_pio()
1377 qib_rc_send_complete(qp, ibhdr); qib_verbs_send_pio()
1378 spin_unlock_irqrestore(&qp->s_lock, flags); qib_verbs_send_pio()
1385 * @qp: the QP to send on
1392 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1394 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, qib_verbs_send() argument
1397 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_verbs_send()
1413 if (qp->ibqp.qp_type == IB_QPT_SMI || qib_verbs_send()
1415 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, qib_verbs_send()
1418 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, qib_verbs_send()
1528 struct qib_qp *qp; qib_ib_piobufavail() local
1545 qp = list_entry(list->next, struct qib_qp, iowait); qib_ib_piobufavail()
1546 list_del_init(&qp->iowait); qib_ib_piobufavail()
1547 atomic_inc(&qp->refcount); qib_ib_piobufavail()
1548 qps[n++] = qp; qib_ib_piobufavail()
1555 qp = qps[i]; qib_ib_piobufavail()
1557 spin_lock_irqsave(&qp->s_lock, flags); qib_ib_piobufavail()
1558 if (qp->s_flags & QIB_S_WAIT_PIO) { qib_ib_piobufavail()
1559 qp->s_flags &= ~QIB_S_WAIT_PIO; qib_ib_piobufavail()
1560 qib_schedule_send(qp); qib_ib_piobufavail()
1562 spin_unlock_irqrestore(&qp->s_lock, flags); qib_ib_piobufavail()
1565 if (atomic_dec_and_test(&qp->refcount)) qib_ib_piobufavail()
1566 wake_up(&qp->wait); qib_ib_piobufavail()
2373 void qib_schedule_send(struct qib_qp *qp) qib_schedule_send() argument
2375 if (qib_send_ok(qp)) { qib_schedule_send()
2377 to_iport(qp->ibqp.device, qp->port_num); qib_schedule_send()
2380 queue_work(ppd->qib_wq, &qp->s_work); qib_schedule_send()
576 qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_qp_rcv() argument
944 __get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) __get_txreq() argument
975 get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) get_txreq() argument
H A Dqib_verbs_mcast.c40 * @qp: the QP to link
42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) qib_mcast_qp_alloc() argument
50 mqp->qp = qp; qib_mcast_qp_alloc()
51 atomic_inc(&qp->refcount); qib_mcast_qp_alloc()
59 struct qib_qp *qp = mqp->qp; qib_mcast_qp_free() local
62 if (atomic_dec_and_test(&qp->refcount)) qib_mcast_qp_free()
63 wake_up(&qp->wait); qib_mcast_qp_free()
182 if (p->qp == mqp->qp) { qib_mcast_add()
227 struct qib_qp *qp = to_iqp(ibqp); qib_multicast_attach() local
234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { qib_multicast_attach()
248 mqp = qib_mcast_qp_alloc(qp); qib_multicast_attach()
254 ibp = to_iport(ibqp->device, qp->port_num); qib_multicast_attach()
285 struct qib_qp *qp = to_iqp(ibqp); qib_multicast_detach() local
287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); qib_multicast_detach()
294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) qib_multicast_detach()
320 if (p->qp != qp) qib_multicast_detach()
H A Dqib_driver.c309 struct qib_qp *qp = NULL; qib_rcv_hdrerr() local
353 qp = qib_lookup_qpn(ibp, qp_num); qib_rcv_hdrerr()
354 if (!qp) qib_rcv_hdrerr()
361 spin_lock(&qp->r_lock); qib_rcv_hdrerr()
364 if (!(ib_qib_state_ops[qp->state] & qib_rcv_hdrerr()
370 switch (qp->ibqp.qp_type) { qib_rcv_hdrerr()
376 qp, qib_rcv_hdrerr()
384 diff = qib_cmp24(psn, qp->r_psn); qib_rcv_hdrerr()
385 if (!qp->r_nak_state && diff >= 0) { qib_rcv_hdrerr()
387 qp->r_nak_state = qib_rcv_hdrerr()
390 qp->r_ack_psn = qp->r_psn; qib_rcv_hdrerr()
399 if (list_empty(&qp->rspwait)) { qib_rcv_hdrerr()
400 qp->r_flags |= qib_rcv_hdrerr()
403 &qp->refcount); qib_rcv_hdrerr()
405 &qp->rspwait, qib_rcv_hdrerr()
421 spin_unlock(&qp->r_lock); qib_rcv_hdrerr()
426 if (atomic_dec_and_test(&qp->refcount)) qib_rcv_hdrerr()
427 wake_up(&qp->wait); qib_rcv_hdrerr()
459 struct qib_qp *qp, *nqp; qib_kreceive() local
568 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { qib_kreceive()
569 list_del_init(&qp->rspwait); qib_kreceive()
570 if (qp->r_flags & QIB_R_RSP_NAK) { qib_kreceive()
571 qp->r_flags &= ~QIB_R_RSP_NAK; qib_kreceive()
572 qib_send_rc_ack(qp); qib_kreceive()
574 if (qp->r_flags & QIB_R_RSP_SEND) { qib_kreceive()
577 qp->r_flags &= ~QIB_R_RSP_SEND; qib_kreceive()
578 spin_lock_irqsave(&qp->s_lock, flags); qib_kreceive()
579 if (ib_qib_state_ops[qp->state] & qib_kreceive()
581 qib_schedule_send(qp); qib_kreceive()
582 spin_unlock_irqrestore(&qp->s_lock, flags); qib_kreceive()
584 if (atomic_dec_and_test(&qp->refcount)) qib_kreceive()
585 wake_up(&qp->wait); qib_kreceive()
H A Dqib_verbs.h212 struct qib_qp *qp; member in struct:qib_mcast_qp
340 * in qp->s_max_sge.
360 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
617 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, get_swqe_ptr() argument
620 return (struct qib_swqe *)((char *)qp->s_wq + get_swqe_ptr()
622 qp->s_max_sge * get_swqe_ptr()
855 static inline int qib_send_ok(struct qib_qp *qp) qib_send_ok() argument
857 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && qib_send_ok()
858 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || qib_send_ok()
859 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); qib_send_ok()
865 void qib_schedule_send(struct qib_qp *qp);
916 __be32 qib_compute_aeth(struct qib_qp *qp);
926 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
952 void qib_get_credit(struct qib_qp *qp, u32 aeth);
960 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
969 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
972 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
980 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
982 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
984 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
987 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
996 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
1053 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
1099 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1101 void qib_migrate_qp(struct qib_qp *qp);
1104 int has_grh, struct qib_qp *qp, u32 bth0);
1109 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1114 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1117 void qib_send_rc_ack(struct qib_qp *qp);
1119 int qib_make_rc_req(struct qib_qp *qp);
1121 int qib_make_uc_req(struct qib_qp *qp);
1123 int qib_make_ud_req(struct qib_qp *qp);
H A Dqib_keys.c241 * @qp: qp for validation
252 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, qib_rkey_ok() argument
255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; qib_rkey_ok()
266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_rkey_ok()
289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) qib_rkey_ok()
341 int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr) qib_reg_mr() argument
343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; qib_reg_mr()
344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_reg_mr()
361 if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd)) qib_reg_mr()
H A Dqib_sdma.c516 atomic_inc(&tx->qp->s_dma_busy); complete_sdma_err_req()
539 struct qib_qp *qp; qib_sdma_verbs_send() local
648 atomic_inc(&tx->qp->s_dma_busy); qib_sdma_verbs_send()
665 qp = tx->qp; qib_sdma_verbs_send()
667 spin_lock(&qp->r_lock); qib_sdma_verbs_send()
668 spin_lock(&qp->s_lock); qib_sdma_verbs_send()
669 if (qp->ibqp.qp_type == IB_QPT_RC) { qib_sdma_verbs_send()
671 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) qib_sdma_verbs_send()
672 qib_error_qp(qp, IB_WC_GENERAL_ERR); qib_sdma_verbs_send()
673 } else if (qp->s_wqe) qib_sdma_verbs_send()
674 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); qib_sdma_verbs_send()
675 spin_unlock(&qp->s_lock); qib_sdma_verbs_send()
676 spin_unlock(&qp->r_lock); qib_sdma_verbs_send()
681 qp = tx->qp; qib_sdma_verbs_send()
682 spin_lock(&qp->s_lock); qib_sdma_verbs_send()
683 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { qib_sdma_verbs_send()
693 qp->s_tx = tx; qib_sdma_verbs_send()
696 if (list_empty(&qp->iowait)) { qib_sdma_verbs_send()
701 qp->s_flags |= QIB_S_WAIT_DMA_DESC; qib_sdma_verbs_send()
702 list_add_tail(&qp->iowait, &dev->dmawait); qib_sdma_verbs_send()
705 qp->s_flags &= ~QIB_S_BUSY; qib_sdma_verbs_send()
706 spin_unlock(&qp->s_lock); qib_sdma_verbs_send()
709 spin_unlock(&qp->s_lock); qib_sdma_verbs_send()
H A Dqib_cq.c49 * This may be called with qp->s_lock held.
91 wc->uqueue[head].qp_num = entry->qp->qp_num; qib_cq_enter()
H A Dqib_mad.h175 __be32 qp; member in struct:ib_cc_trap_key_violation_attr
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dqp.c59 #include "qp.h"
70 static void flush_tx_list(struct hfi1_qp *qp);
232 static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) insert_qp() argument
234 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); insert_qp()
237 atomic_inc(&qp->refcount); insert_qp()
240 if (qp->ibqp.qp_num <= 1) { insert_qp()
241 rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp); insert_qp()
243 u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); insert_qp()
245 qp->next = dev->qp_dev->qp_table[n]; insert_qp()
246 rcu_assign_pointer(dev->qp_dev->qp_table[n], qp); insert_qp()
247 trace_hfi1_qpinsert(qp, n); insert_qp()
257 static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) remove_qp() argument
259 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); remove_qp()
260 u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); remove_qp()
266 if (rcu_dereference_protected(ibp->qp[0], remove_qp()
267 lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { remove_qp()
268 RCU_INIT_POINTER(ibp->qp[0], NULL); remove_qp()
269 } else if (rcu_dereference_protected(ibp->qp[1], remove_qp()
270 lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { remove_qp()
271 RCU_INIT_POINTER(ibp->qp[1], NULL); remove_qp()
282 if (q == qp) { remove_qp()
284 rcu_dereference_protected(qp->next, remove_qp()
287 trace_hfi1_qpremove(qp, n); remove_qp()
295 if (atomic_dec_and_test(&qp->refcount)) remove_qp()
296 wake_up(&qp->wait); remove_qp()
311 struct hfi1_qp *qp; free_all_qps() local
320 if (rcu_dereference(ibp->qp[0])) free_all_qps()
322 if (rcu_dereference(ibp->qp[1])) free_all_qps()
331 qp = rcu_dereference_protected(dev->qp_dev->qp_table[n], free_all_qps()
335 for (; qp; qp = rcu_dereference_protected(qp->next, free_all_qps()
347 * @qp: the QP to reset
350 static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type) reset_qp() argument
352 qp->remote_qpn = 0; reset_qp()
353 qp->qkey = 0; reset_qp()
354 qp->qp_access_flags = 0; reset_qp()
356 &qp->s_iowait, reset_qp()
361 qp->s_flags &= HFI1_S_SIGNAL_REQ_WR; reset_qp()
362 qp->s_hdrwords = 0; reset_qp()
363 qp->s_wqe = NULL; reset_qp()
364 qp->s_draining = 0; reset_qp()
365 qp->s_next_psn = 0; reset_qp()
366 qp->s_last_psn = 0; reset_qp()
367 qp->s_sending_psn = 0; reset_qp()
368 qp->s_sending_hpsn = 0; reset_qp()
369 qp->s_psn = 0; reset_qp()
370 qp->r_psn = 0; reset_qp()
371 qp->r_msn = 0; reset_qp()
373 qp->s_state = IB_OPCODE_RC_SEND_LAST; reset_qp()
374 qp->r_state = IB_OPCODE_RC_SEND_LAST; reset_qp()
376 qp->s_state = IB_OPCODE_UC_SEND_LAST; reset_qp()
377 qp->r_state = IB_OPCODE_UC_SEND_LAST; reset_qp()
379 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; reset_qp()
380 qp->r_nak_state = 0; reset_qp()
381 qp->r_aflags = 0; reset_qp()
382 qp->r_flags = 0; reset_qp()
383 qp->s_head = 0; reset_qp()
384 qp->s_tail = 0; reset_qp()
385 qp->s_cur = 0; reset_qp()
386 qp->s_acked = 0; reset_qp()
387 qp->s_last = 0; reset_qp()
388 qp->s_ssn = 1; reset_qp()
389 qp->s_lsn = 0; reset_qp()
390 clear_ahg(qp); reset_qp()
391 qp->s_mig_state = IB_MIG_MIGRATED; reset_qp()
392 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); reset_qp()
393 qp->r_head_ack_queue = 0; reset_qp()
394 qp->s_tail_ack_queue = 0; reset_qp()
395 qp->s_num_rd_atomic = 0; reset_qp()
396 if (qp->r_rq.wq) { reset_qp()
397 qp->r_rq.wq->head = 0; reset_qp()
398 qp->r_rq.wq->tail = 0; reset_qp()
400 qp->r_sge.num_sge = 0; reset_qp()
403 static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) clear_mr_refs() argument
407 if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) clear_mr_refs()
408 hfi1_put_ss(&qp->s_rdma_read_sge); clear_mr_refs()
410 hfi1_put_ss(&qp->r_sge); clear_mr_refs()
413 while (qp->s_last != qp->s_head) { clear_mr_refs()
414 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); clear_mr_refs()
422 if (qp->ibqp.qp_type == IB_QPT_UD || clear_mr_refs()
423 qp->ibqp.qp_type == IB_QPT_SMI || clear_mr_refs()
424 qp->ibqp.qp_type == IB_QPT_GSI) clear_mr_refs()
426 if (++qp->s_last >= qp->s_size) clear_mr_refs()
427 qp->s_last = 0; clear_mr_refs()
429 if (qp->s_rdma_mr) { clear_mr_refs()
430 hfi1_put_mr(qp->s_rdma_mr); clear_mr_refs()
431 qp->s_rdma_mr = NULL; clear_mr_refs()
435 if (qp->ibqp.qp_type != IB_QPT_RC) clear_mr_refs()
438 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { clear_mr_refs()
439 struct hfi1_ack_entry *e = &qp->s_ack_queue[n]; clear_mr_refs()
451 * @qp: the QP to put into the error state
459 int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) hfi1_error_qp() argument
461 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); hfi1_error_qp()
465 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) hfi1_error_qp()
468 qp->state = IB_QPS_ERR; hfi1_error_qp()
470 if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { hfi1_error_qp()
471 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); hfi1_error_qp()
472 del_timer(&qp->s_timer); hfi1_error_qp()
475 if (qp->s_flags & HFI1_S_ANY_WAIT_SEND) hfi1_error_qp()
476 qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND; hfi1_error_qp()
479 if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { hfi1_error_qp()
480 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; hfi1_error_qp()
481 list_del_init(&qp->s_iowait.list); hfi1_error_qp()
482 if (atomic_dec_and_test(&qp->refcount)) hfi1_error_qp()
483 wake_up(&qp->wait); hfi1_error_qp()
487 if (!(qp->s_flags & HFI1_S_BUSY)) { hfi1_error_qp()
488 qp->s_hdrwords = 0; hfi1_error_qp()
489 if (qp->s_rdma_mr) { hfi1_error_qp()
490 hfi1_put_mr(qp->s_rdma_mr); hfi1_error_qp()
491 qp->s_rdma_mr = NULL; hfi1_error_qp()
493 flush_tx_list(qp); hfi1_error_qp()
497 if (qp->s_last != qp->s_head) hfi1_error_qp()
498 hfi1_schedule_send(qp); hfi1_error_qp()
500 clear_mr_refs(qp, 0); hfi1_error_qp()
503 wc.qp = &qp->ibqp; hfi1_error_qp()
506 if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) { hfi1_error_qp()
507 wc.wr_id = qp->r_wr_id; hfi1_error_qp()
509 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); hfi1_error_qp()
513 if (qp->r_rq.wq) { hfi1_error_qp()
518 spin_lock(&qp->r_rq.lock); hfi1_error_qp()
521 wq = qp->r_rq.wq; hfi1_error_qp()
523 if (head >= qp->r_rq.size) hfi1_error_qp()
526 if (tail >= qp->r_rq.size) hfi1_error_qp()
529 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; hfi1_error_qp()
530 if (++tail >= qp->r_rq.size) hfi1_error_qp()
532 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); hfi1_error_qp()
536 spin_unlock(&qp->r_rq.lock); hfi1_error_qp()
537 } else if (qp->ibqp.event_handler) hfi1_error_qp()
544 static void flush_tx_list(struct hfi1_qp *qp) flush_tx_list() argument
546 while (!list_empty(&qp->s_iowait.tx_head)) { flush_tx_list()
550 &qp->s_iowait.tx_head, flush_tx_list()
559 static void flush_iowait(struct hfi1_qp *qp) flush_iowait() argument
561 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); flush_iowait()
565 if (!list_empty(&qp->s_iowait.list)) { flush_iowait()
566 list_del_init(&qp->s_iowait.list); flush_iowait()
567 if (atomic_dec_and_test(&qp->refcount)) flush_iowait()
568 wake_up(&qp->wait); flush_iowait()
613 struct hfi1_qp *qp = to_iqp(ibqp); hfi1_modify_qp() local
622 spin_lock_irq(&qp->r_lock); hfi1_modify_qp()
623 spin_lock(&qp->s_lock); hfi1_modify_qp()
626 attr->cur_qp_state : qp->state; hfi1_modify_qp()
636 if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr)) hfi1_modify_qp()
643 if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) hfi1_modify_qp()
658 if (qp->ibqp.qp_type == IB_QPT_SMI || hfi1_modify_qp()
659 qp->ibqp.qp_type == IB_QPT_GSI || hfi1_modify_qp()
679 * that to a small mtu. We'll set qp->path_mtu hfi1_modify_qp()
685 int mtu, pidx = qp->port_num - 1; hfi1_modify_qp()
700 if (qp->s_mig_state == IB_MIG_ARMED) hfi1_modify_qp()
705 if (qp->s_mig_state == IB_MIG_REARM) hfi1_modify_qp()
709 if (qp->s_mig_state == IB_MIG_ARMED) hfi1_modify_qp()
721 if (qp->state != IB_QPS_RESET) { hfi1_modify_qp()
722 qp->state = IB_QPS_RESET; hfi1_modify_qp()
723 flush_iowait(qp); hfi1_modify_qp()
724 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); hfi1_modify_qp()
725 spin_unlock(&qp->s_lock); hfi1_modify_qp()
726 spin_unlock_irq(&qp->r_lock); hfi1_modify_qp()
728 cancel_work_sync(&qp->s_iowait.iowork); hfi1_modify_qp()
729 del_timer_sync(&qp->s_timer); hfi1_modify_qp()
730 iowait_sdma_drain(&qp->s_iowait); hfi1_modify_qp()
731 flush_tx_list(qp); hfi1_modify_qp()
732 remove_qp(dev, qp); hfi1_modify_qp()
733 wait_event(qp->wait, !atomic_read(&qp->refcount)); hfi1_modify_qp()
734 spin_lock_irq(&qp->r_lock); hfi1_modify_qp()
735 spin_lock(&qp->s_lock); hfi1_modify_qp()
736 clear_mr_refs(qp, 1); hfi1_modify_qp()
737 clear_ahg(qp); hfi1_modify_qp()
738 reset_qp(qp, ibqp->qp_type); hfi1_modify_qp()
744 qp->r_flags &= ~HFI1_R_COMM_EST; hfi1_modify_qp()
745 qp->state = new_state; hfi1_modify_qp()
749 qp->s_draining = qp->s_last != qp->s_cur; hfi1_modify_qp()
750 qp->state = new_state; hfi1_modify_qp()
754 if (qp->ibqp.qp_type == IB_QPT_RC) hfi1_modify_qp()
756 qp->state = new_state; hfi1_modify_qp()
760 lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); hfi1_modify_qp()
764 qp->state = new_state; hfi1_modify_qp()
769 qp->s_pkey_index = attr->pkey_index; hfi1_modify_qp()
772 qp->port_num = attr->port_num; hfi1_modify_qp()
775 qp->remote_qpn = attr->dest_qp_num; hfi1_modify_qp()
778 qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK; hfi1_modify_qp()
779 qp->s_psn = qp->s_next_psn; hfi1_modify_qp()
780 qp->s_sending_psn = qp->s_next_psn; hfi1_modify_qp()
781 qp->s_last_psn = qp->s_next_psn - 1; hfi1_modify_qp()
782 qp->s_sending_hpsn = qp->s_last_psn; hfi1_modify_qp()
786 qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK; hfi1_modify_qp()
789 qp->qp_access_flags = attr->qp_access_flags; hfi1_modify_qp()
792 qp->remote_ah_attr = attr->ah_attr; hfi1_modify_qp()
793 qp->s_srate = attr->ah_attr.static_rate; hfi1_modify_qp()
794 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); hfi1_modify_qp()
798 qp->alt_ah_attr = attr->alt_ah_attr; hfi1_modify_qp()
799 qp->s_alt_pkey_index = attr->alt_pkey_index; hfi1_modify_qp()
803 qp->s_mig_state = attr->path_mig_state; hfi1_modify_qp()
805 qp->remote_ah_attr = qp->alt_ah_attr; hfi1_modify_qp()
806 qp->port_num = qp->alt_ah_attr.port_num; hfi1_modify_qp()
807 qp->s_pkey_index = qp->s_alt_pkey_index; hfi1_modify_qp()
808 qp->s_flags |= HFI1_S_AHG_CLEAR; hfi1_modify_qp()
818 ibp = &dd->pport[qp->port_num - 1].ibport_data; hfi1_modify_qp()
820 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_modify_qp()
828 qp->path_mtu = pmtu; hfi1_modify_qp()
829 qp->pmtu = mtu; hfi1_modify_qp()
833 qp->s_retry_cnt = attr->retry_cnt; hfi1_modify_qp()
834 qp->s_retry = attr->retry_cnt; hfi1_modify_qp()
838 qp->s_rnr_retry_cnt = attr->rnr_retry; hfi1_modify_qp()
839 qp->s_rnr_retry = attr->rnr_retry; hfi1_modify_qp()
843 qp->r_min_rnr_timer = attr->min_rnr_timer; hfi1_modify_qp()
846 qp->timeout = attr->timeout; hfi1_modify_qp()
847 qp->timeout_jiffies = hfi1_modify_qp()
848 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / hfi1_modify_qp()
853 qp->qkey = attr->qkey; hfi1_modify_qp()
856 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; hfi1_modify_qp()
859 qp->s_max_rd_atomic = attr->max_rd_atomic; hfi1_modify_qp()
861 spin_unlock(&qp->s_lock); hfi1_modify_qp()
862 spin_unlock_irq(&qp->r_lock); hfi1_modify_qp()
865 insert_qp(dev, qp); hfi1_modify_qp()
868 ev.device = qp->ibqp.device; hfi1_modify_qp()
869 ev.element.qp = &qp->ibqp; hfi1_modify_qp()
871 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); hfi1_modify_qp()
874 ev.device = qp->ibqp.device; hfi1_modify_qp()
875 ev.element.qp = &qp->ibqp; hfi1_modify_qp()
877 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); hfi1_modify_qp()
883 spin_unlock(&qp->s_lock); hfi1_modify_qp()
884 spin_unlock_irq(&qp->r_lock); hfi1_modify_qp()
894 struct hfi1_qp *qp = to_iqp(ibqp); hfi1_query_qp() local
896 attr->qp_state = qp->state; hfi1_query_qp()
898 attr->path_mtu = qp->path_mtu; hfi1_query_qp()
899 attr->path_mig_state = qp->s_mig_state; hfi1_query_qp()
900 attr->qkey = qp->qkey; hfi1_query_qp()
901 attr->rq_psn = mask_psn(qp->r_psn); hfi1_query_qp()
902 attr->sq_psn = mask_psn(qp->s_next_psn); hfi1_query_qp()
903 attr->dest_qp_num = qp->remote_qpn; hfi1_query_qp()
904 attr->qp_access_flags = qp->qp_access_flags; hfi1_query_qp()
905 attr->cap.max_send_wr = qp->s_size - 1; hfi1_query_qp()
906 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; hfi1_query_qp()
907 attr->cap.max_send_sge = qp->s_max_sge; hfi1_query_qp()
908 attr->cap.max_recv_sge = qp->r_rq.max_sge; hfi1_query_qp()
910 attr->ah_attr = qp->remote_ah_attr; hfi1_query_qp()
911 attr->alt_ah_attr = qp->alt_ah_attr; hfi1_query_qp()
912 attr->pkey_index = qp->s_pkey_index; hfi1_query_qp()
913 attr->alt_pkey_index = qp->s_alt_pkey_index; hfi1_query_qp()
915 attr->sq_draining = qp->s_draining; hfi1_query_qp()
916 attr->max_rd_atomic = qp->s_max_rd_atomic; hfi1_query_qp()
917 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; hfi1_query_qp()
918 attr->min_rnr_timer = qp->r_min_rnr_timer; hfi1_query_qp()
919 attr->port_num = qp->port_num; hfi1_query_qp()
920 attr->timeout = qp->timeout; hfi1_query_qp()
921 attr->retry_cnt = qp->s_retry_cnt; hfi1_query_qp()
922 attr->rnr_retry = qp->s_rnr_retry_cnt; hfi1_query_qp()
923 attr->alt_port_num = qp->alt_ah_attr.port_num; hfi1_query_qp()
924 attr->alt_timeout = qp->alt_timeout; hfi1_query_qp()
926 init_attr->event_handler = qp->ibqp.event_handler; hfi1_query_qp()
927 init_attr->qp_context = qp->ibqp.qp_context; hfi1_query_qp()
928 init_attr->send_cq = qp->ibqp.send_cq; hfi1_query_qp()
929 init_attr->recv_cq = qp->ibqp.recv_cq; hfi1_query_qp()
930 init_attr->srq = qp->ibqp.srq; hfi1_query_qp()
932 if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR) hfi1_query_qp()
936 init_attr->qp_type = qp->ibqp.qp_type; hfi1_query_qp()
937 init_attr->port_num = qp->port_num; hfi1_query_qp()
943 * @qp: the queue pair to compute the AETH for
947 __be32 hfi1_compute_aeth(struct hfi1_qp *qp) hfi1_compute_aeth() argument
949 u32 aeth = qp->r_msn & HFI1_MSN_MASK; hfi1_compute_aeth()
951 if (qp->ibqp.srq) { hfi1_compute_aeth()
960 struct hfi1_rwq *wq = qp->r_rq.wq; hfi1_compute_aeth()
966 if (head >= qp->r_rq.size) hfi1_compute_aeth()
969 if (tail >= qp->r_rq.size) hfi1_compute_aeth()
979 credits += qp->r_rq.size; hfi1_compute_aeth()
1016 struct hfi1_qp *qp; hfi1_create_qp() local
1067 sz = sizeof(*qp); hfi1_create_qp()
1073 sg_list_sz = sizeof(*qp->r_sg_list) * hfi1_create_qp()
1076 sg_list_sz = sizeof(*qp->r_sg_list) * hfi1_create_qp()
1078 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); hfi1_create_qp()
1079 if (!qp) { hfi1_create_qp()
1083 RCU_INIT_POINTER(qp->next, NULL); hfi1_create_qp()
1084 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL); hfi1_create_qp()
1085 if (!qp->s_hdr) { hfi1_create_qp()
1089 qp->timeout_jiffies = hfi1_create_qp()
1090 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / hfi1_create_qp()
1095 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; hfi1_create_qp()
1096 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; hfi1_create_qp()
1097 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + hfi1_create_qp()
1099 qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + hfi1_create_qp()
1100 qp->r_rq.size * sz); hfi1_create_qp()
1101 if (!qp->r_rq.wq) { hfi1_create_qp()
1108 * ib_create_qp() will initialize qp->ibqp hfi1_create_qp()
1109 * except for qp->ibqp.qp_num. hfi1_create_qp()
1111 spin_lock_init(&qp->r_lock); hfi1_create_qp()
1112 spin_lock_init(&qp->s_lock); hfi1_create_qp()
1113 spin_lock_init(&qp->r_rq.lock); hfi1_create_qp()
1114 atomic_set(&qp->refcount, 0); hfi1_create_qp()
1115 init_waitqueue_head(&qp->wait); hfi1_create_qp()
1116 init_timer(&qp->s_timer); hfi1_create_qp()
1117 qp->s_timer.data = (unsigned long)qp; hfi1_create_qp()
1118 INIT_LIST_HEAD(&qp->rspwait); hfi1_create_qp()
1119 qp->state = IB_QPS_RESET; hfi1_create_qp()
1120 qp->s_wq = swq; hfi1_create_qp()
1121 qp->s_size = init_attr->cap.max_send_wr + 1; hfi1_create_qp()
1122 qp->s_max_sge = init_attr->cap.max_send_sge; hfi1_create_qp()
1124 qp->s_flags = HFI1_S_SIGNAL_REQ_WR; hfi1_create_qp()
1131 vfree(qp->r_rq.wq); hfi1_create_qp()
1134 qp->ibqp.qp_num = err; hfi1_create_qp()
1135 qp->port_num = init_attr->port_num; hfi1_create_qp()
1136 reset_qp(qp, init_attr->qp_type); hfi1_create_qp()
1153 if (!qp->r_rq.wq) { hfi1_create_qp()
1163 u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz; hfi1_create_qp()
1165 qp->ip = hfi1_create_mmap_info(dev, s, hfi1_create_qp()
1167 qp->r_rq.wq); hfi1_create_qp()
1168 if (!qp->ip) { hfi1_create_qp()
1173 err = ib_copy_to_udata(udata, &(qp->ip->offset), hfi1_create_qp()
1174 sizeof(qp->ip->offset)); hfi1_create_qp()
1192 if (qp->ip) { hfi1_create_qp()
1194 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); hfi1_create_qp()
1198 ret = &qp->ibqp; hfi1_create_qp()
1209 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & OPCODE_QP_MASK; hfi1_create_qp()
1212 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & OPCODE_QP_MASK; hfi1_create_qp()
1215 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & OPCODE_QP_MASK; hfi1_create_qp()
1225 if (qp->ip) hfi1_create_qp()
1226 kref_put(&qp->ip->ref, hfi1_release_mmap_info); hfi1_create_qp()
1228 vfree(qp->r_rq.wq); hfi1_create_qp()
1229 free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); hfi1_create_qp()
1231 kfree(qp->s_hdr); hfi1_create_qp()
1232 kfree(qp); hfi1_create_qp()
1250 struct hfi1_qp *qp = to_iqp(ibqp); hfi1_destroy_qp() local
1254 spin_lock_irq(&qp->r_lock); hfi1_destroy_qp()
1255 spin_lock(&qp->s_lock); hfi1_destroy_qp()
1256 if (qp->state != IB_QPS_RESET) { hfi1_destroy_qp()
1257 qp->state = IB_QPS_RESET; hfi1_destroy_qp()
1258 flush_iowait(qp); hfi1_destroy_qp()
1259 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); hfi1_destroy_qp()
1260 spin_unlock(&qp->s_lock); hfi1_destroy_qp()
1261 spin_unlock_irq(&qp->r_lock); hfi1_destroy_qp()
1262 cancel_work_sync(&qp->s_iowait.iowork); hfi1_destroy_qp()
1263 del_timer_sync(&qp->s_timer); hfi1_destroy_qp()
1264 iowait_sdma_drain(&qp->s_iowait); hfi1_destroy_qp()
1265 flush_tx_list(qp); hfi1_destroy_qp()
1266 remove_qp(dev, qp); hfi1_destroy_qp()
1267 wait_event(qp->wait, !atomic_read(&qp->refcount)); hfi1_destroy_qp()
1268 spin_lock_irq(&qp->r_lock); hfi1_destroy_qp()
1269 spin_lock(&qp->s_lock); hfi1_destroy_qp()
1270 clear_mr_refs(qp, 1); hfi1_destroy_qp()
1271 clear_ahg(qp); hfi1_destroy_qp()
1273 spin_unlock(&qp->s_lock); hfi1_destroy_qp()
1274 spin_unlock_irq(&qp->r_lock); hfi1_destroy_qp()
1277 free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); hfi1_destroy_qp()
1282 if (qp->ip) hfi1_destroy_qp()
1283 kref_put(&qp->ip->ref, hfi1_release_mmap_info); hfi1_destroy_qp()
1285 vfree(qp->r_rq.wq); hfi1_destroy_qp()
1286 vfree(qp->s_wq); hfi1_destroy_qp()
1287 kfree(qp->s_hdr); hfi1_destroy_qp()
1288 kfree(qp); hfi1_destroy_qp()
1349 * @qp: the qp who's send work queue to flush
1354 void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth) hfi1_get_credit() argument
1364 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { hfi1_get_credit()
1365 qp->s_flags |= HFI1_S_UNLIMITED_CREDIT; hfi1_get_credit()
1366 if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { hfi1_get_credit()
1367 qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; hfi1_get_credit()
1368 hfi1_schedule_send(qp); hfi1_get_credit()
1371 } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { hfi1_get_credit()
1374 if (cmp_msn(credit, qp->s_lsn) > 0) { hfi1_get_credit()
1375 qp->s_lsn = credit; hfi1_get_credit()
1376 if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { hfi1_get_credit()
1377 qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; hfi1_get_credit()
1378 hfi1_schedule_send(qp); hfi1_get_credit()
1384 void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag) hfi1_qp_wakeup() argument
1388 spin_lock_irqsave(&qp->s_lock, flags); hfi1_qp_wakeup()
1389 if (qp->s_flags & flag) { hfi1_qp_wakeup()
1390 qp->s_flags &= ~flag; hfi1_qp_wakeup()
1391 trace_hfi1_qpwakeup(qp, flag); hfi1_qp_wakeup()
1392 hfi1_schedule_send(qp); hfi1_qp_wakeup()
1394 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_qp_wakeup()
1396 if (atomic_dec_and_test(&qp->refcount)) hfi1_qp_wakeup()
1397 wake_up(&qp->wait); hfi1_qp_wakeup()
1407 struct hfi1_qp *qp; iowait_sleep() local
1412 qp = tx->qp; iowait_sleep()
1414 spin_lock_irqsave(&qp->s_lock, flags); iowait_sleep()
1415 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { iowait_sleep()
1428 if (list_empty(&qp->s_iowait.list)) { iowait_sleep()
1430 to_iport(qp->ibqp.device, qp->port_num); iowait_sleep()
1433 qp->s_flags |= HFI1_S_WAIT_DMA_DESC; iowait_sleep()
1434 list_add_tail(&qp->s_iowait.list, &sde->dmawait); iowait_sleep()
1435 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC); iowait_sleep()
1436 atomic_inc(&qp->refcount); iowait_sleep()
1439 qp->s_flags &= ~HFI1_S_BUSY; iowait_sleep()
1440 spin_unlock_irqrestore(&qp->s_lock, flags); iowait_sleep()
1443 spin_unlock_irqrestore(&qp->s_lock, flags); iowait_sleep()
1449 spin_unlock_irqrestore(&qp->s_lock, flags); iowait_sleep()
1456 struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait); iowait_wakeup() local
1459 hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC); iowait_wakeup()
1516 * qp_to_sdma_engine - map a qp to a send engine
1517 * @qp: the QP
1521 * A send engine for the qp or NULL for SMI type qp.
1523 struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5) qp_to_sdma_engine() argument
1525 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); qp_to_sdma_engine()
1530 switch (qp->ibqp.qp_type) { qp_to_sdma_engine()
1539 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); qp_to_sdma_engine()
1545 struct hfi1_qp *qp; member in struct:qp_iter
1573 struct hfi1_qp *pqp = iter->qp; qp_iter_next()
1574 struct hfi1_qp *qp; qp_iter_next() local
1579 * real hash table. Since the qp code sets qp_iter_next()
1580 * the qp->next hash link to NULL, this works just fine. qp_iter_next()
1584 * n = 0..iter->specials is the special qp indices qp_iter_next()
1592 qp = rcu_dereference(pqp->next); qp_iter_next()
1604 qp = rcu_dereference(ibp->qp[0]); qp_iter_next()
1606 qp = rcu_dereference(ibp->qp[1]); qp_iter_next()
1608 qp = rcu_dereference( qp_iter_next()
1613 pqp = qp; qp_iter_next()
1614 if (qp) { qp_iter_next()
1615 iter->qp = qp; qp_iter_next()
1627 static int qp_idle(struct hfi1_qp *qp) qp_idle() argument
1630 qp->s_last == qp->s_acked && qp_idle()
1631 qp->s_acked == qp->s_cur && qp_idle()
1632 qp->s_cur == qp->s_tail && qp_idle()
1633 qp->s_tail == qp->s_head; qp_idle()
1639 struct hfi1_qp *qp = iter->qp; qp_iter_print() local
1642 sde = qp_to_sdma_engine(qp, qp->s_sc); qp_iter_print()
1643 wqe = get_swqe_ptr(qp, qp->s_last); qp_iter_print()
1647 qp_idle(qp) ? "I" : "B", qp_iter_print()
1648 qp->ibqp.qp_num, qp_iter_print()
1649 atomic_read(&qp->refcount), qp_iter_print()
1650 qp_type_str[qp->ibqp.qp_type], qp_iter_print()
1651 qp->state, qp_iter_print()
1653 qp->s_hdrwords, qp_iter_print()
1654 qp->s_flags, qp_iter_print()
1655 atomic_read(&qp->s_iowait.sdma_busy), qp_iter_print()
1656 !list_empty(&qp->s_iowait.list), qp_iter_print()
1657 qp->timeout, qp_iter_print()
1659 qp->s_lsn, qp_iter_print()
1660 qp->s_last_psn, qp_iter_print()
1661 qp->s_psn, qp->s_next_psn, qp_iter_print()
1662 qp->s_sending_psn, qp->s_sending_hpsn, qp_iter_print()
1663 qp->s_last, qp->s_acked, qp->s_cur, qp_iter_print()
1664 qp->s_tail, qp->s_head, qp->s_size, qp_iter_print()
1665 qp->remote_qpn, qp_iter_print()
1666 qp->remote_ah_attr.dlid, qp_iter_print()
1667 qp->remote_ah_attr.sl, qp_iter_print()
1668 qp->pmtu, qp_iter_print()
1669 qp->s_retry_cnt, qp_iter_print()
1670 qp->timeout, qp_iter_print()
1671 qp->s_rnr_retry_cnt, qp_iter_print()
1676 void qp_comm_est(struct hfi1_qp *qp) qp_comm_est() argument
1678 qp->r_flags |= HFI1_R_COMM_EST; qp_comm_est()
1679 if (qp->ibqp.event_handler) { qp_comm_est()
1682 ev.device = qp->ibqp.device; qp_comm_est()
1683 ev.element.qp = &qp->ibqp; qp_comm_est()
1685 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qp_comm_est()
H A Drc.c54 #include "qp.h"
77 static void start_timer(struct hfi1_qp *qp) start_timer() argument
79 qp->s_flags |= HFI1_S_TIMER; start_timer()
80 qp->s_timer.function = rc_timeout; start_timer()
81 /* 4.096 usec. * (1 << qp->timeout) */ start_timer()
82 qp->s_timer.expires = jiffies + qp->timeout_jiffies; start_timer()
83 add_timer(&qp->s_timer); start_timer()
89 * @qp: a pointer to the QP
97 static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, make_rc_ack() argument
108 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) make_rc_ack()
114 switch (qp->s_ack_state) { make_rc_ack()
117 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; make_rc_ack()
129 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC) make_rc_ack()
130 qp->s_tail_ack_queue = 0; make_rc_ack()
135 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { OP()
136 if (qp->s_flags & HFI1_S_ACK_PENDING) OP()
141 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
151 qp->s_tail_ack_queue = qp->r_head_ack_queue;
155 qp->s_rdma_mr = e->rdma_sge.mr;
156 if (qp->s_rdma_mr)
157 hfi1_get_mr(qp->s_rdma_mr);
158 qp->s_ack_rdma_sge.sge = e->rdma_sge;
159 qp->s_ack_rdma_sge.num_sge = 1;
160 qp->s_cur_sge = &qp->s_ack_rdma_sge;
163 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
165 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
168 ohdr->u.aeth = hfi1_compute_aeth(qp);
170 qp->s_ack_rdma_psn = e->psn;
171 bth2 = mask_psn(qp->s_ack_rdma_psn++);
174 qp->s_cur_sge = NULL;
176 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
177 ohdr->u.at.aeth = hfi1_compute_aeth(qp);
186 bth0 = qp->s_ack_state << 24;
190 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
193 qp->s_cur_sge = &qp->s_ack_rdma_sge;
194 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
195 if (qp->s_rdma_mr)
196 hfi1_get_mr(qp->s_rdma_mr);
197 len = qp->s_ack_rdma_sge.sge.sge_length;
202 ohdr->u.aeth = hfi1_compute_aeth(qp);
204 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
205 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
208 bth0 = qp->s_ack_state << 24;
209 bth2 = mask_psn(qp->s_ack_rdma_psn++);
220 qp->s_ack_state = OP(SEND_ONLY);
221 qp->s_flags &= ~HFI1_S_ACK_PENDING;
222 qp->s_cur_sge = NULL;
223 if (qp->s_nak_state)
225 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
226 (qp->s_nak_state <<
229 ohdr->u.aeth = hfi1_compute_aeth(qp);
233 bth2 = mask_psn(qp->s_ack_psn);
235 qp->s_rdma_ack_cnt++;
236 qp->s_hdrwords = hwords;
237 qp->s_cur_size = len;
238 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle);
242 qp->s_ack_state = OP(ACKNOWLEDGE);
248 qp->s_flags &= ~(HFI1_S_RESP_PENDING
256 * @qp: a pointer to the QP
260 int hfi1_make_rc_req(struct hfi1_qp *qp) hfi1_make_rc_req() argument
262 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); hfi1_make_rc_req()
271 u32 pmtu = qp->pmtu; hfi1_make_rc_req()
278 ohdr = &qp->s_hdr->ibh.u.oth; hfi1_make_rc_req()
279 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) hfi1_make_rc_req()
280 ohdr = &qp->s_hdr->ibh.u.l.oth; hfi1_make_rc_req()
286 spin_lock_irqsave(&qp->s_lock, flags); hfi1_make_rc_req()
289 if ((qp->s_flags & HFI1_S_RESP_PENDING) && hfi1_make_rc_req()
290 make_rc_ack(dev, qp, ohdr, pmtu)) hfi1_make_rc_req()
293 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) { hfi1_make_rc_req()
294 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) hfi1_make_rc_req()
297 if (qp->s_last == qp->s_head) hfi1_make_rc_req()
300 if (atomic_read(&qp->s_iowait.sdma_busy)) { hfi1_make_rc_req()
301 qp->s_flags |= HFI1_S_WAIT_DMA; hfi1_make_rc_req()
304 clear_ahg(qp); hfi1_make_rc_req()
305 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_rc_req()
306 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? hfi1_make_rc_req()
312 if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK)) hfi1_make_rc_req()
315 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { hfi1_make_rc_req()
316 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { hfi1_make_rc_req()
317 qp->s_flags |= HFI1_S_WAIT_PSN; hfi1_make_rc_req()
320 qp->s_sending_psn = qp->s_psn; hfi1_make_rc_req()
321 qp->s_sending_hpsn = qp->s_psn - 1; hfi1_make_rc_req()
325 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_rc_req()
326 switch (qp->s_state) { hfi1_make_rc_req()
328 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) hfi1_make_rc_req()
338 if (qp->s_cur == qp->s_tail) { hfi1_make_rc_req()
340 if (qp->s_tail == qp->s_head) { hfi1_make_rc_req()
341 clear_ahg(qp); hfi1_make_rc_req()
349 qp->s_num_rd_atomic) { hfi1_make_rc_req()
350 qp->s_flags |= HFI1_S_WAIT_FENCE; hfi1_make_rc_req()
353 wqe->psn = qp->s_next_psn; hfi1_make_rc_req()
362 ss = &qp->s_sge; hfi1_make_rc_req()
363 bth2 = mask_psn(qp->s_psn); hfi1_make_rc_req()
368 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) && hfi1_make_rc_req()
369 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { hfi1_make_rc_req()
370 qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT; hfi1_make_rc_req()
376 qp->s_state = OP(SEND_FIRST); hfi1_make_rc_req()
381 qp->s_state = OP(SEND_ONLY); hfi1_make_rc_req()
383 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); hfi1_make_rc_req()
391 if (++qp->s_cur == qp->s_size) hfi1_make_rc_req()
392 qp->s_cur = 0; hfi1_make_rc_req()
396 if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) hfi1_make_rc_req()
397 qp->s_lsn++; hfi1_make_rc_req()
401 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) && hfi1_make_rc_req()
402 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { hfi1_make_rc_req()
403 qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT; hfi1_make_rc_req()
415 qp->s_state = OP(RDMA_WRITE_FIRST); hfi1_make_rc_req()
420 qp->s_state = OP(RDMA_WRITE_ONLY); hfi1_make_rc_req()
422 qp->s_state = hfi1_make_rc_req()
431 if (++qp->s_cur == qp->s_size) hfi1_make_rc_req()
432 qp->s_cur = 0; hfi1_make_rc_req()
441 if (qp->s_num_rd_atomic >= hfi1_make_rc_req()
442 qp->s_max_rd_atomic) { hfi1_make_rc_req()
443 qp->s_flags |= HFI1_S_WAIT_RDMAR; hfi1_make_rc_req()
446 qp->s_num_rd_atomic++; hfi1_make_rc_req()
447 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) hfi1_make_rc_req()
448 qp->s_lsn++; hfi1_make_rc_req()
454 qp->s_next_psn += (len - 1) / pmtu; hfi1_make_rc_req()
455 wqe->lpsn = qp->s_next_psn++; hfi1_make_rc_req()
462 qp->s_state = OP(RDMA_READ_REQUEST); hfi1_make_rc_req()
467 if (++qp->s_cur == qp->s_size) hfi1_make_rc_req()
468 qp->s_cur = 0; hfi1_make_rc_req()
478 if (qp->s_num_rd_atomic >= hfi1_make_rc_req()
479 qp->s_max_rd_atomic) { hfi1_make_rc_req()
480 qp->s_flags |= HFI1_S_WAIT_RDMAR; hfi1_make_rc_req()
483 qp->s_num_rd_atomic++; hfi1_make_rc_req()
484 if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) hfi1_make_rc_req()
485 qp->s_lsn++; hfi1_make_rc_req()
489 qp->s_state = OP(COMPARE_SWAP); hfi1_make_rc_req()
495 qp->s_state = OP(FETCH_ADD); hfi1_make_rc_req()
510 if (++qp->s_cur == qp->s_size) hfi1_make_rc_req()
511 qp->s_cur = 0; hfi1_make_rc_req()
517 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_rc_req()
518 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_rc_req()
519 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_rc_req()
520 qp->s_sge.total_len = wqe->length; hfi1_make_rc_req()
521 qp->s_len = wqe->length; hfi1_make_rc_req()
523 qp->s_tail++; hfi1_make_rc_req()
524 if (qp->s_tail >= qp->s_size) hfi1_make_rc_req()
525 qp->s_tail = 0; hfi1_make_rc_req()
528 qp->s_psn = wqe->lpsn + 1; hfi1_make_rc_req()
530 qp->s_psn++; hfi1_make_rc_req()
531 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) hfi1_make_rc_req()
532 qp->s_next_psn = qp->s_psn; hfi1_make_rc_req()
538 * qp->s_state is normally set to the opcode of the hfi1_make_rc_req()
546 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); hfi1_make_rc_req()
549 qp->s_state = OP(SEND_MIDDLE); hfi1_make_rc_req()
552 bth2 = mask_psn(qp->s_psn++); hfi1_make_rc_req()
553 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) hfi1_make_rc_req()
554 qp->s_next_psn = qp->s_psn; hfi1_make_rc_req()
555 ss = &qp->s_sge; hfi1_make_rc_req()
556 len = qp->s_len; hfi1_make_rc_req()
563 qp->s_state = OP(SEND_LAST); hfi1_make_rc_req()
565 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); hfi1_make_rc_req()
573 qp->s_cur++; hfi1_make_rc_req()
574 if (qp->s_cur >= qp->s_size) hfi1_make_rc_req()
575 qp->s_cur = 0; hfi1_make_rc_req()
580 * qp->s_state is normally set to the opcode of the hfi1_make_rc_req()
588 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); hfi1_make_rc_req()
591 qp->s_state = OP(RDMA_WRITE_MIDDLE); hfi1_make_rc_req()
594 bth2 = mask_psn(qp->s_psn++); hfi1_make_rc_req()
595 if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) hfi1_make_rc_req()
596 qp->s_next_psn = qp->s_psn; hfi1_make_rc_req()
597 ss = &qp->s_sge; hfi1_make_rc_req()
598 len = qp->s_len; hfi1_make_rc_req()
605 qp->s_state = OP(RDMA_WRITE_LAST); hfi1_make_rc_req()
607 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); hfi1_make_rc_req()
615 qp->s_cur++; hfi1_make_rc_req()
616 if (qp->s_cur >= qp->s_size) hfi1_make_rc_req()
617 qp->s_cur = 0; hfi1_make_rc_req()
622 * qp->s_state is normally set to the opcode of the hfi1_make_rc_req()
630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; hfi1_make_rc_req()
636 qp->s_state = OP(RDMA_READ_REQUEST); hfi1_make_rc_req()
638 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK; hfi1_make_rc_req()
639 qp->s_psn = wqe->lpsn + 1; hfi1_make_rc_req()
642 qp->s_cur++; hfi1_make_rc_req()
643 if (qp->s_cur == qp->s_size) hfi1_make_rc_req()
644 qp->s_cur = 0; hfi1_make_rc_req()
647 qp->s_sending_hpsn = bth2; hfi1_make_rc_req()
651 if (qp->s_flags & HFI1_S_SEND_ONE) { hfi1_make_rc_req()
652 qp->s_flags &= ~HFI1_S_SEND_ONE; hfi1_make_rc_req()
653 qp->s_flags |= HFI1_S_WAIT_ACK; hfi1_make_rc_req()
656 qp->s_len -= len; hfi1_make_rc_req()
657 qp->s_hdrwords = hwords; hfi1_make_rc_req()
658 qp->s_cur_sge = ss; hfi1_make_rc_req()
659 qp->s_cur_size = len; hfi1_make_rc_req()
661 qp, hfi1_make_rc_req()
663 bth0 | (qp->s_state << 24), hfi1_make_rc_req()
671 qp->s_flags &= ~HFI1_S_BUSY; hfi1_make_rc_req()
673 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_make_rc_req()
679 * @qp: a pointer to the QP
685 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp, hfi1_send_rc_ack() argument
688 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_send_rc_ack()
703 if (qp->s_flags & HFI1_S_RESP_PENDING) hfi1_send_rc_ack()
708 if (qp->s_rdma_ack_cnt) hfi1_send_rc_ack()
714 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { hfi1_send_rc_ack()
716 &qp->remote_ah_attr.grh, hwords, 0); hfi1_send_rc_ack()
724 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); hfi1_send_rc_ack()
725 if (qp->s_mig_state == IB_MIG_MIGRATED) hfi1_send_rc_ack()
727 if (qp->r_nak_state) hfi1_send_rc_ack()
728 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) | hfi1_send_rc_ack()
729 (qp->r_nak_state << hfi1_send_rc_ack()
732 ohdr->u.aeth = hfi1_compute_aeth(qp); hfi1_send_rc_ack()
733 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_send_rc_ack()
736 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; hfi1_send_rc_ack()
738 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); hfi1_send_rc_ack()
740 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); hfi1_send_rc_ack()
742 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); hfi1_send_rc_ack()
744 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn)); hfi1_send_rc_ack()
753 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); hfi1_send_rc_ack()
766 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); hfi1_send_rc_ack()
775 spin_lock_irqsave(&qp->s_lock, flags); hfi1_send_rc_ack()
776 qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING; hfi1_send_rc_ack()
777 qp->s_nak_state = qp->r_nak_state; hfi1_send_rc_ack()
778 qp->s_ack_psn = qp->r_ack_psn; hfi1_send_rc_ack()
780 qp->s_flags |= HFI1_S_ECN; hfi1_send_rc_ack()
783 hfi1_schedule_send(qp); hfi1_send_rc_ack()
784 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_send_rc_ack()
789 * @qp: the QP
796 static void reset_psn(struct hfi1_qp *qp, u32 psn) reset_psn() argument
798 u32 n = qp->s_acked; reset_psn()
799 struct hfi1_swqe *wqe = get_swqe_ptr(qp, n); reset_psn()
802 qp->s_cur = n; reset_psn()
809 qp->s_state = OP(SEND_LAST); reset_psn()
818 if (++n == qp->s_size) reset_psn()
820 if (n == qp->s_tail) reset_psn()
822 wqe = get_swqe_ptr(qp, n); reset_psn()
826 qp->s_cur = n; reset_psn()
832 qp->s_state = OP(SEND_LAST); reset_psn()
846 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); reset_psn()
851 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); reset_psn()
855 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); reset_psn()
863 qp->s_state = OP(SEND_LAST); reset_psn()
866 qp->s_psn = psn; reset_psn()
872 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && reset_psn()
873 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) reset_psn()
874 qp->s_flags |= HFI1_S_WAIT_PSN; reset_psn()
875 qp->s_flags &= ~HFI1_S_AHG_VALID; reset_psn()
882 static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait) restart_rc() argument
884 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); restart_rc()
887 if (qp->s_retry == 0) { restart_rc()
888 if (qp->s_mig_state == IB_MIG_ARMED) { restart_rc()
889 hfi1_migrate_qp(qp); restart_rc()
890 qp->s_retry = qp->s_retry_cnt; restart_rc()
891 } else if (qp->s_last == qp->s_acked) { restart_rc()
892 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); restart_rc()
893 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); restart_rc()
898 qp->s_retry--; restart_rc()
900 ibp = to_iport(qp->ibqp.device, qp->port_num); restart_rc()
904 ibp->n_rc_resends += delta_psn(qp->s_psn, psn); restart_rc()
906 qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | restart_rc()
910 qp->s_flags |= HFI1_S_SEND_ONE; restart_rc()
911 reset_psn(qp, psn); restart_rc()
919 struct hfi1_qp *qp = (struct hfi1_qp *)arg; rc_timeout() local
923 spin_lock_irqsave(&qp->r_lock, flags); rc_timeout()
924 spin_lock(&qp->s_lock); rc_timeout()
925 if (qp->s_flags & HFI1_S_TIMER) { rc_timeout()
926 ibp = to_iport(qp->ibqp.device, qp->port_num); rc_timeout()
928 qp->s_flags &= ~HFI1_S_TIMER; rc_timeout()
929 del_timer(&qp->s_timer); rc_timeout()
930 trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1); rc_timeout()
931 restart_rc(qp, qp->s_last_psn + 1, 1); rc_timeout()
932 hfi1_schedule_send(qp); rc_timeout()
934 spin_unlock(&qp->s_lock); rc_timeout()
935 spin_unlock_irqrestore(&qp->r_lock, flags); rc_timeout()
943 struct hfi1_qp *qp = (struct hfi1_qp *)arg; hfi1_rc_rnr_retry() local
946 spin_lock_irqsave(&qp->s_lock, flags); hfi1_rc_rnr_retry()
947 if (qp->s_flags & HFI1_S_WAIT_RNR) { hfi1_rc_rnr_retry()
948 qp->s_flags &= ~HFI1_S_WAIT_RNR; hfi1_rc_rnr_retry()
949 del_timer(&qp->s_timer); hfi1_rc_rnr_retry()
950 hfi1_schedule_send(qp); hfi1_rc_rnr_retry()
952 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_rc_rnr_retry()
956 * Set qp->s_sending_psn to the next PSN after the given one.
959 static void reset_sending_psn(struct hfi1_qp *qp, u32 psn) reset_sending_psn() argument
962 u32 n = qp->s_last; reset_sending_psn()
966 wqe = get_swqe_ptr(qp, n); reset_sending_psn()
969 qp->s_sending_psn = wqe->lpsn + 1; reset_sending_psn()
971 qp->s_sending_psn = psn + 1; reset_sending_psn()
974 if (++n == qp->s_size) reset_sending_psn()
976 if (n == qp->s_tail) reset_sending_psn()
984 void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr) hfi1_rc_send_complete() argument
993 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) hfi1_rc_send_complete()
1005 WARN_ON(!qp->s_rdma_ack_cnt); hfi1_rc_send_complete()
1006 qp->s_rdma_ack_cnt--; hfi1_rc_send_complete()
1011 reset_sending_psn(qp, psn); hfi1_rc_send_complete()
1017 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && hfi1_rc_send_complete()
1018 !(qp->s_flags & hfi1_rc_send_complete()
1020 (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) hfi1_rc_send_complete()
1021 start_timer(qp); hfi1_rc_send_complete()
1023 while (qp->s_last != qp->s_acked) { hfi1_rc_send_complete()
1024 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_rc_send_complete()
1025 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && hfi1_rc_send_complete()
1026 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) hfi1_rc_send_complete()
1034 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || hfi1_rc_send_complete()
1041 wc.qp = &qp->ibqp; hfi1_rc_send_complete()
1042 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); hfi1_rc_send_complete()
1044 if (++qp->s_last >= qp->s_size) hfi1_rc_send_complete()
1045 qp->s_last = 0; hfi1_rc_send_complete()
1051 trace_hfi1_rc_sendcomplete(qp, psn); hfi1_rc_send_complete()
1052 if (qp->s_flags & HFI1_S_WAIT_PSN && hfi1_rc_send_complete()
1053 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { hfi1_rc_send_complete()
1054 qp->s_flags &= ~HFI1_S_WAIT_PSN; hfi1_rc_send_complete()
1055 qp->s_sending_psn = qp->s_psn; hfi1_rc_send_complete()
1056 qp->s_sending_hpsn = qp->s_psn - 1; hfi1_rc_send_complete()
1057 hfi1_schedule_send(qp); hfi1_rc_send_complete()
1061 static inline void update_last_psn(struct hfi1_qp *qp, u32 psn) update_last_psn() argument
1063 qp->s_last_psn = psn; update_last_psn()
1071 static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp, do_rc_completion() argument
1083 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || do_rc_completion()
1084 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { do_rc_completion()
1091 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || do_rc_completion()
1098 wc.qp = &qp->ibqp; do_rc_completion()
1099 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); do_rc_completion()
1101 if (++qp->s_last >= qp->s_size) do_rc_completion()
1102 qp->s_last = 0; do_rc_completion()
1116 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; do_rc_completion()
1117 engine = qp_to_sdma_engine(qp, sc5); do_rc_completion()
1122 qp->s_retry = qp->s_retry_cnt; do_rc_completion()
1123 update_last_psn(qp, wqe->lpsn); do_rc_completion()
1130 if (qp->s_acked == qp->s_cur) { do_rc_completion()
1131 if (++qp->s_cur >= qp->s_size) do_rc_completion()
1132 qp->s_cur = 0; do_rc_completion()
1133 qp->s_acked = qp->s_cur; do_rc_completion()
1134 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_completion()
1135 if (qp->s_acked != qp->s_tail) { do_rc_completion()
1136 qp->s_state = OP(SEND_LAST); do_rc_completion()
1137 qp->s_psn = wqe->psn; do_rc_completion()
1140 if (++qp->s_acked >= qp->s_size) do_rc_completion()
1141 qp->s_acked = 0; do_rc_completion()
1142 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) do_rc_completion()
1143 qp->s_draining = 0; do_rc_completion()
1144 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_completion()
1151 * @qp: the QP the ACK came in on
1160 static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument
1171 if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { do_rc_ack()
1172 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); do_rc_ack()
1173 del_timer(&qp->s_timer); do_rc_ack()
1185 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_ack()
1186 ibp = to_iport(qp->ibqp.device, qp->port_num); do_rc_ack()
1220 if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) { do_rc_ack()
1221 qp->r_flags |= HFI1_R_RDMAR_SEQ; do_rc_ack()
1222 restart_rc(qp, qp->s_last_psn + 1, 0); do_rc_ack()
1223 if (list_empty(&qp->rspwait)) { do_rc_ack()
1224 qp->r_flags |= HFI1_R_RSP_SEND; do_rc_ack()
1225 atomic_inc(&qp->refcount); do_rc_ack()
1226 list_add_tail(&qp->rspwait, do_rc_ack()
1241 if (qp->s_num_rd_atomic && do_rc_ack()
1245 qp->s_num_rd_atomic--; do_rc_ack()
1247 if ((qp->s_flags & HFI1_S_WAIT_FENCE) && do_rc_ack()
1248 !qp->s_num_rd_atomic) { do_rc_ack()
1249 qp->s_flags &= ~(HFI1_S_WAIT_FENCE | do_rc_ack()
1251 hfi1_schedule_send(qp); do_rc_ack()
1252 } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) { do_rc_ack()
1253 qp->s_flags &= ~(HFI1_S_WAIT_RDMAR | do_rc_ack()
1255 hfi1_schedule_send(qp); do_rc_ack()
1258 wqe = do_rc_completion(qp, wqe, ibp); do_rc_ack()
1259 if (qp->s_acked == qp->s_tail) do_rc_ack()
1266 if (qp->s_acked != qp->s_tail) { do_rc_ack()
1271 start_timer(qp); do_rc_ack()
1276 if (cmp_psn(qp->s_psn, psn) <= 0) do_rc_ack()
1277 reset_psn(qp, psn + 1); do_rc_ack()
1278 } else if (cmp_psn(qp->s_psn, psn) <= 0) { do_rc_ack()
1279 qp->s_state = OP(SEND_LAST); do_rc_ack()
1280 qp->s_psn = psn + 1; do_rc_ack()
1282 if (qp->s_flags & HFI1_S_WAIT_ACK) { do_rc_ack()
1283 qp->s_flags &= ~HFI1_S_WAIT_ACK; do_rc_ack()
1284 hfi1_schedule_send(qp); do_rc_ack()
1286 hfi1_get_credit(qp, aeth); do_rc_ack()
1287 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1288 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
1289 update_last_psn(qp, psn); do_rc_ack()
1295 if (qp->s_acked == qp->s_tail) do_rc_ack()
1297 if (qp->s_flags & HFI1_S_WAIT_RNR) do_rc_ack()
1299 if (qp->s_rnr_retry == 0) { do_rc_ack()
1303 if (qp->s_rnr_retry_cnt < 7) do_rc_ack()
1304 qp->s_rnr_retry--; do_rc_ack()
1307 update_last_psn(qp, psn - 1); do_rc_ack()
1309 ibp->n_rc_resends += delta_psn(qp->s_psn, psn); do_rc_ack()
1311 reset_psn(qp, psn); do_rc_ack()
1313 qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK); do_rc_ack()
1314 qp->s_flags |= HFI1_S_WAIT_RNR; do_rc_ack()
1315 qp->s_timer.function = hfi1_rc_rnr_retry; do_rc_ack()
1316 qp->s_timer.expires = jiffies + usecs_to_jiffies( do_rc_ack()
1319 add_timer(&qp->s_timer); do_rc_ack()
1323 if (qp->s_acked == qp->s_tail) do_rc_ack()
1326 update_last_psn(qp, psn - 1); do_rc_ack()
1337 restart_rc(qp, psn, 0); do_rc_ack()
1338 hfi1_schedule_send(qp); do_rc_ack()
1355 if (qp->s_last == qp->s_acked) { do_rc_ack()
1356 hfi1_send_complete(qp, wqe, status); do_rc_ack()
1357 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); do_rc_ack()
1365 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
1366 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1383 static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn, rdma_seq_err() argument
1389 if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { rdma_seq_err()
1390 qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); rdma_seq_err()
1391 del_timer(&qp->s_timer); rdma_seq_err()
1394 wqe = get_swqe_ptr(qp, qp->s_acked); rdma_seq_err()
1401 wqe = do_rc_completion(qp, wqe, ibp); rdma_seq_err()
1405 qp->r_flags |= HFI1_R_RDMAR_SEQ; rdma_seq_err()
1406 restart_rc(qp, qp->s_last_psn + 1, 0); rdma_seq_err()
1407 if (list_empty(&qp->rspwait)) { rdma_seq_err()
1408 qp->r_flags |= HFI1_R_RSP_SEND; rdma_seq_err()
1409 atomic_inc(&qp->refcount); rdma_seq_err()
1410 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); rdma_seq_err()
1420 * @qp: the QP for this packet
1432 void *data, u32 tlen, struct hfi1_qp *qp, rc_rcv_resp()
1444 spin_lock_irqsave(&qp->s_lock, flags); rc_rcv_resp()
1446 trace_hfi1_rc_ack(qp, psn); rc_rcv_resp()
1449 if (cmp_psn(psn, qp->s_next_psn) >= 0) rc_rcv_resp()
1453 diff = cmp_psn(psn, qp->s_last_psn); rc_rcv_resp()
1459 hfi1_get_credit(qp, aeth); rc_rcv_resp()
1468 if (qp->r_flags & HFI1_R_RDMAR_SEQ) { rc_rcv_resp()
1469 if (cmp_psn(psn, qp->s_last_psn + 1) != 0) rc_rcv_resp()
1471 qp->r_flags &= ~HFI1_R_RDMAR_SEQ; rc_rcv_resp()
1474 if (unlikely(qp->s_acked == qp->s_tail)) rc_rcv_resp()
1476 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp()
1491 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || rc_rcv_resp()
1494 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp()
1502 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, rc_rcv_resp()
1508 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) rc_rcv_resp()
1515 if (unlikely(pmtu >= qp->s_rdma_read_len)) rc_rcv_resp()
1520 * 4.096 usec. * (1 << qp->timeout) rc_rcv_resp()
1522 qp->s_flags |= HFI1_S_TIMER; rc_rcv_resp()
1523 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); rc_rcv_resp()
1524 if (qp->s_flags & HFI1_S_WAIT_ACK) { rc_rcv_resp()
1525 qp->s_flags &= ~HFI1_S_WAIT_ACK; rc_rcv_resp()
1526 hfi1_schedule_send(qp); rc_rcv_resp()
1530 qp->s_retry = qp->s_retry_cnt; rc_rcv_resp()
1536 qp->s_rdma_read_len -= pmtu; rc_rcv_resp()
1537 update_last_psn(qp, psn); rc_rcv_resp()
1538 spin_unlock_irqrestore(&qp->s_lock, flags); rc_rcv_resp()
1539 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); rc_rcv_resp()
1544 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) rc_rcv_resp()
1559 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp()
1560 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, rc_rcv_resp()
1566 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1))) rc_rcv_resp()
1580 if (unlikely(tlen != qp->s_rdma_read_len)) rc_rcv_resp()
1583 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); rc_rcv_resp()
1584 WARN_ON(qp->s_rdma_read_sge.num_sge); rc_rcv_resp()
1585 (void) do_rc_ack(qp, aeth, psn, rc_rcv_resp()
1595 rdma_seq_err(qp, ibp, psn, rcd); rc_rcv_resp()
1601 if (qp->s_last == qp->s_acked) { rc_rcv_resp()
1602 hfi1_send_complete(qp, wqe, status); rc_rcv_resp()
1603 hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); rc_rcv_resp()
1606 spin_unlock_irqrestore(&qp->s_lock, flags); rc_rcv_resp()
1615 * @qp: the QP for this packet
1627 struct hfi1_qp *qp, u32 opcode, u32 psn, int diff, rc_rcv_error()
1630 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); rc_rcv_error()
1636 trace_hfi1_rc_rcv_error(qp, psn); rc_rcv_error()
1643 if (!qp->r_nak_state) { rc_rcv_error()
1645 qp->r_nak_state = IB_NAK_PSN_ERROR; rc_rcv_error()
1647 qp->r_ack_psn = qp->r_psn; rc_rcv_error()
1653 if (list_empty(&qp->rspwait)) { rc_rcv_error()
1654 qp->r_flags |= HFI1_R_RSP_NAK; rc_rcv_error()
1655 atomic_inc(&qp->refcount); rc_rcv_error()
1656 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); rc_rcv_error()
1682 spin_lock_irqsave(&qp->s_lock, flags); rc_rcv_error()
1684 for (i = qp->r_head_ack_queue; ; i = prev) { rc_rcv_error()
1685 if (i == qp->s_tail_ack_queue) rc_rcv_error()
1691 if (prev == qp->r_head_ack_queue) { rc_rcv_error()
1695 e = &qp->s_ack_queue[prev]; rc_rcv_error()
1701 if (prev == qp->s_tail_ack_queue && rc_rcv_error()
1728 offset = delta_psn(psn, e->psn) * qp->pmtu; OP()
1741 ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, OP()
1753 qp->s_tail_ack_queue = prev; OP()
1766 qp->s_tail_ack_queue = prev; OP()
1781 if (i == qp->r_head_ack_queue) {
1782 spin_unlock_irqrestore(&qp->s_lock, flags);
1783 qp->r_nak_state = 0;
1784 qp->r_ack_psn = qp->r_psn - 1;
1792 qp->s_tail_ack_queue = i;
1795 qp->s_ack_state = OP(ACKNOWLEDGE);
1796 qp->s_flags |= HFI1_S_RESP_PENDING;
1797 qp->r_nak_state = 0;
1798 hfi1_schedule_send(qp);
1801 spin_unlock_irqrestore(&qp->s_lock, flags);
1809 void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err) hfi1_rc_error() argument
1814 spin_lock_irqsave(&qp->s_lock, flags); hfi1_rc_error()
1815 lastwqe = hfi1_error_qp(qp, err); hfi1_rc_error()
1816 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_rc_error()
1821 ev.device = qp->ibqp.device; hfi1_rc_error()
1822 ev.element.qp = &qp->ibqp; hfi1_rc_error()
1824 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); hfi1_rc_error()
1828 static inline void update_ack_queue(struct hfi1_qp *qp, unsigned n) update_ack_queue() argument
1835 qp->s_tail_ack_queue = next; update_ack_queue()
1836 qp->s_ack_state = OP(ACKNOWLEDGE); update_ack_queue()
1930 * @qp: the QP for this packet
1943 struct hfi1_qp *qp = packet->qp; hfi1_rc_rcv() local
1944 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_rc_rcv()
1952 u32 pmtu = qp->pmtu; hfi1_rc_rcv()
1960 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) hfi1_rc_rcv()
1966 u16 rlid = qp->remote_ah_attr.dlid; hfi1_rc_rcv()
1969 lqpn = qp->ibqp.qp_num; hfi1_rc_rcv()
1970 rqpn = qp->remote_qpn; hfi1_rc_rcv()
1973 qp->remote_ah_attr.sl, hfi1_rc_rcv()
1991 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hfi1_rc_rcv()
1999 diff = delta_psn(psn, qp->r_psn); hfi1_rc_rcv()
2001 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) hfi1_rc_rcv()
2007 switch (qp->r_state) { hfi1_rc_rcv()
2040 if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST)) hfi1_rc_rcv()
2041 qp_comm_est(qp); hfi1_rc_rcv()
2046 ret = hfi1_get_rwqe(qp, 0); hfi1_rc_rcv()
2051 qp->r_rcv_len = 0; hfi1_rc_rcv()
2059 qp->r_rcv_len += pmtu; hfi1_rc_rcv()
2060 if (unlikely(qp->r_rcv_len > qp->r_len)) hfi1_rc_rcv()
2062 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1); hfi1_rc_rcv()
2067 ret = hfi1_get_rwqe(qp, 1); hfi1_rc_rcv()
2076 ret = hfi1_get_rwqe(qp, 0); hfi1_rc_rcv()
2081 qp->r_rcv_len = 0; hfi1_rc_rcv()
2104 wc.byte_len = tlen + qp->r_rcv_len; hfi1_rc_rcv()
2105 if (unlikely(wc.byte_len > qp->r_len)) hfi1_rc_rcv()
2107 hfi1_copy_sge(&qp->r_sge, data, tlen, 1); hfi1_rc_rcv()
2108 hfi1_put_ss(&qp->r_sge); hfi1_rc_rcv()
2109 qp->r_msn++; hfi1_rc_rcv()
2110 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) hfi1_rc_rcv()
2112 wc.wr_id = qp->r_wr_id; hfi1_rc_rcv()
2119 wc.qp = &qp->ibqp; hfi1_rc_rcv()
2120 wc.src_qp = qp->remote_qpn; hfi1_rc_rcv()
2121 wc.slid = qp->remote_ah_attr.dlid; hfi1_rc_rcv()
2133 wc.sl = qp->remote_ah_attr.sl; hfi1_rc_rcv()
2140 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, hfi1_rc_rcv()
2147 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) hfi1_rc_rcv()
2151 qp->r_len = be32_to_cpu(reth->length); hfi1_rc_rcv()
2152 qp->r_rcv_len = 0; hfi1_rc_rcv()
2153 qp->r_sge.sg_list = NULL; hfi1_rc_rcv()
2154 if (qp->r_len != 0) { hfi1_rc_rcv()
2160 ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, hfi1_rc_rcv()
2164 qp->r_sge.num_sge = 1; hfi1_rc_rcv()
2166 qp->r_sge.num_sge = 0; hfi1_rc_rcv()
2167 qp->r_sge.sge.mr = NULL; hfi1_rc_rcv()
2168 qp->r_sge.sge.vaddr = NULL; hfi1_rc_rcv()
2169 qp->r_sge.sge.length = 0; hfi1_rc_rcv()
2170 qp->r_sge.sge.sge_length = 0; hfi1_rc_rcv()
2176 ret = hfi1_get_rwqe(qp, 1); hfi1_rc_rcv()
2190 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) OP()
2192 next = qp->r_head_ack_queue + 1; OP()
2196 spin_lock_irqsave(&qp->s_lock, flags); OP()
2197 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
2198 if (!qp->s_ack_queue[next].sent) OP()
2200 update_ack_queue(qp, next); OP()
2202 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
2215 ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, OP()
2224 qp->r_psn += (len - 1) / pmtu; OP()
2234 e->lpsn = qp->r_psn; OP()
2240 qp->r_msn++; OP()
2241 qp->r_psn++; OP()
2242 qp->r_state = opcode; OP()
2243 qp->r_nak_state = 0; OP()
2244 qp->r_head_ack_queue = next; OP()
2247 qp->s_flags |= HFI1_S_RESP_PENDING; OP()
2248 hfi1_schedule_send(qp); OP()
2250 spin_unlock_irqrestore(&qp->s_lock, flags); OP()
2266 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) OP()
2268 next = qp->r_head_ack_queue + 1; OP()
2271 spin_lock_irqsave(&qp->s_lock, flags); OP()
2272 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
2273 if (!qp->s_ack_queue[next].sent) OP()
2275 update_ack_queue(qp, next); OP()
2277 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
2289 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), OP()
2294 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; OP()
2298 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, OP()
2301 hfi1_put_mr(qp->r_sge.sge.mr); OP()
2302 qp->r_sge.num_sge = 0; OP()
2307 qp->r_msn++; OP()
2308 qp->r_psn++; OP()
2309 qp->r_state = opcode; OP()
2310 qp->r_nak_state = 0; OP()
2311 qp->r_head_ack_queue = next; OP()
2314 qp->s_flags |= HFI1_S_RESP_PENDING; OP()
2315 hfi1_schedule_send(qp); OP()
2317 spin_unlock_irqrestore(&qp->s_lock, flags); OP()
2327 qp->r_psn++;
2328 qp->r_state = opcode;
2329 qp->r_ack_psn = psn;
2330 qp->r_nak_state = 0;
2337 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2338 qp->r_ack_psn = qp->r_psn;
2340 if (list_empty(&qp->rspwait)) {
2341 qp->r_flags |= HFI1_R_RSP_NAK;
2342 atomic_inc(&qp->refcount);
2343 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2348 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2349 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2350 qp->r_ack_psn = qp->r_psn;
2352 if (list_empty(&qp->rspwait)) {
2353 qp->r_flags |= HFI1_R_RSP_NAK;
2354 atomic_inc(&qp->refcount);
2355 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2360 spin_unlock_irqrestore(&qp->s_lock, flags);
2362 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2363 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2364 qp->r_ack_psn = qp->r_psn;
2366 if (list_empty(&qp->rspwait)) {
2367 qp->r_flags |= HFI1_R_RSP_NAK;
2368 atomic_inc(&qp->refcount);
2369 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2374 spin_unlock_irqrestore(&qp->s_lock, flags);
2376 hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
2377 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2378 qp->r_ack_psn = qp->r_psn;
2380 hfi1_send_rc_ack(rcd, qp, is_fecn);
2387 struct hfi1_qp *qp) hfi1_rc_hdrerr()
2391 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_rc_hdrerr()
2402 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) hfi1_rc_hdrerr()
2410 diff = delta_psn(psn, qp->r_psn); hfi1_rc_hdrerr()
2411 if (!qp->r_nak_state && diff >= 0) { hfi1_rc_hdrerr()
2413 qp->r_nak_state = IB_NAK_PSN_ERROR; hfi1_rc_hdrerr()
2415 qp->r_ack_psn = qp->r_psn; hfi1_rc_hdrerr()
2424 if (list_empty(&qp->rspwait)) { hfi1_rc_hdrerr()
2425 qp->r_flags |= HFI1_R_RSP_NAK; hfi1_rc_hdrerr()
2426 atomic_inc(&qp->refcount); hfi1_rc_hdrerr()
2428 &qp->rspwait, hfi1_rc_hdrerr()
1430 rc_rcv_resp(struct hfi1_ibport *ibp, struct hfi1_other_headers *ohdr, void *data, u32 tlen, struct hfi1_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct hfi1_ctxtdata *rcd) rc_rcv_resp() argument
1626 rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, struct hfi1_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) rc_rcv_error() argument
2383 hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_ib_header *hdr, u32 rcv_flags, struct hfi1_qp *qp) hfi1_rc_hdrerr() argument
H A Duc.c53 #include "qp.h"
60 * @qp: a pointer to the QP
64 int hfi1_make_uc_req(struct hfi1_qp *qp) hfi1_make_uc_req() argument
72 u32 pmtu = qp->pmtu; hfi1_make_uc_req()
76 spin_lock_irqsave(&qp->s_lock, flags); hfi1_make_uc_req()
78 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) { hfi1_make_uc_req()
79 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) hfi1_make_uc_req()
82 if (qp->s_last == qp->s_head) hfi1_make_uc_req()
85 if (atomic_read(&qp->s_iowait.sdma_busy)) { hfi1_make_uc_req()
86 qp->s_flags |= HFI1_S_WAIT_DMA; hfi1_make_uc_req()
89 clear_ahg(qp); hfi1_make_uc_req()
90 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_uc_req()
91 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); hfi1_make_uc_req()
95 ohdr = &qp->s_hdr->ibh.u.oth; hfi1_make_uc_req()
96 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) hfi1_make_uc_req()
97 ohdr = &qp->s_hdr->ibh.u.l.oth; hfi1_make_uc_req()
100 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_uc_req()
101 qp->s_wqe = NULL; hfi1_make_uc_req()
102 switch (qp->s_state) { hfi1_make_uc_req()
104 if (!(ib_hfi1_state_ops[qp->state] & hfi1_make_uc_req()
108 if (qp->s_cur == qp->s_head) { hfi1_make_uc_req()
109 clear_ahg(qp); hfi1_make_uc_req()
115 wqe->psn = qp->s_next_psn; hfi1_make_uc_req()
116 qp->s_psn = qp->s_next_psn; hfi1_make_uc_req()
117 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_uc_req()
118 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_uc_req()
119 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_uc_req()
120 qp->s_sge.total_len = wqe->length; hfi1_make_uc_req()
122 qp->s_len = len; hfi1_make_uc_req()
127 qp->s_state = OP(SEND_FIRST); hfi1_make_uc_req()
132 qp->s_state = OP(SEND_ONLY); hfi1_make_uc_req()
134 qp->s_state = hfi1_make_uc_req()
142 qp->s_wqe = wqe; hfi1_make_uc_req()
143 if (++qp->s_cur >= qp->s_size) hfi1_make_uc_req()
144 qp->s_cur = 0; hfi1_make_uc_req()
156 qp->s_state = OP(RDMA_WRITE_FIRST); hfi1_make_uc_req()
161 qp->s_state = OP(RDMA_WRITE_ONLY); hfi1_make_uc_req()
163 qp->s_state = hfi1_make_uc_req()
171 qp->s_wqe = wqe; hfi1_make_uc_req()
172 if (++qp->s_cur >= qp->s_size) hfi1_make_uc_req()
173 qp->s_cur = 0; hfi1_make_uc_req()
182 qp->s_state = OP(SEND_MIDDLE); hfi1_make_uc_req()
185 len = qp->s_len; hfi1_make_uc_req()
192 qp->s_state = OP(SEND_LAST); hfi1_make_uc_req()
194 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); hfi1_make_uc_req()
201 qp->s_wqe = wqe; hfi1_make_uc_req()
202 if (++qp->s_cur >= qp->s_size) hfi1_make_uc_req()
203 qp->s_cur = 0; hfi1_make_uc_req()
207 qp->s_state = OP(RDMA_WRITE_MIDDLE); hfi1_make_uc_req()
210 len = qp->s_len; hfi1_make_uc_req()
217 qp->s_state = OP(RDMA_WRITE_LAST); hfi1_make_uc_req()
219 qp->s_state = hfi1_make_uc_req()
227 qp->s_wqe = wqe; hfi1_make_uc_req()
228 if (++qp->s_cur >= qp->s_size) hfi1_make_uc_req()
229 qp->s_cur = 0; hfi1_make_uc_req()
232 qp->s_len -= len; hfi1_make_uc_req()
233 qp->s_hdrwords = hwords; hfi1_make_uc_req()
234 qp->s_cur_sge = &qp->s_sge; hfi1_make_uc_req()
235 qp->s_cur_size = len; hfi1_make_uc_req()
236 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), hfi1_make_uc_req()
237 mask_psn(qp->s_next_psn++), middle); hfi1_make_uc_req()
243 qp->s_flags &= ~HFI1_S_BUSY; hfi1_make_uc_req()
245 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_make_uc_req()
256 * @qp: the QP for this packet.
269 struct hfi1_qp *qp = packet->qp; hfi1_uc_rcv() local
276 u32 pmtu = qp->pmtu; hfi1_uc_rcv()
284 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) hfi1_uc_rcv()
296 rqpn = qp->remote_qpn; hfi1_uc_rcv()
298 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_uc_rcv()
309 u32 src_qp = qp->remote_qpn; hfi1_uc_rcv()
312 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_uc_rcv()
314 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh); hfi1_uc_rcv()
322 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) { hfi1_uc_rcv()
327 qp->r_psn = psn; hfi1_uc_rcv()
329 if (qp->r_state == OP(SEND_FIRST) || hfi1_uc_rcv()
330 qp->r_state == OP(SEND_MIDDLE)) { hfi1_uc_rcv()
331 set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); hfi1_uc_rcv()
332 qp->r_sge.num_sge = 0; hfi1_uc_rcv()
334 hfi1_put_ss(&qp->r_sge); hfi1_uc_rcv()
335 qp->r_state = OP(SEND_LAST); hfi1_uc_rcv()
353 switch (qp->r_state) { hfi1_uc_rcv()
381 if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST)) hfi1_uc_rcv()
382 qp_comm_est(qp); hfi1_uc_rcv()
390 if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) hfi1_uc_rcv()
391 qp->r_sge = qp->s_rdma_read_sge; hfi1_uc_rcv()
393 ret = hfi1_get_rwqe(qp, 0); hfi1_uc_rcv()
399 * qp->s_rdma_read_sge will be the owner hfi1_uc_rcv()
402 qp->s_rdma_read_sge = qp->r_sge; hfi1_uc_rcv()
404 qp->r_rcv_len = 0; hfi1_uc_rcv()
414 qp->r_rcv_len += pmtu; hfi1_uc_rcv()
415 if (unlikely(qp->r_rcv_len > qp->r_len)) hfi1_uc_rcv()
417 hfi1_copy_sge(&qp->r_sge, data, pmtu, 0); hfi1_uc_rcv()
438 wc.byte_len = tlen + qp->r_rcv_len; hfi1_uc_rcv()
439 if (unlikely(wc.byte_len > qp->r_len)) hfi1_uc_rcv()
442 hfi1_copy_sge(&qp->r_sge, data, tlen, 0); hfi1_uc_rcv()
443 hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_uc_rcv()
445 wc.wr_id = qp->r_wr_id; hfi1_uc_rcv()
447 wc.qp = &qp->ibqp; hfi1_uc_rcv()
448 wc.src_qp = qp->remote_qpn; hfi1_uc_rcv()
449 wc.slid = qp->remote_ah_attr.dlid; hfi1_uc_rcv()
461 wc.sl = qp->remote_ah_attr.sl; hfi1_uc_rcv()
468 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, hfi1_uc_rcv()
477 if (unlikely(!(qp->qp_access_flags & OP()
482 qp->r_len = be32_to_cpu(reth->length);
483 qp->r_rcv_len = 0;
484 qp->r_sge.sg_list = NULL;
485 if (qp->r_len != 0) {
491 ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
495 qp->r_sge.num_sge = 1;
497 qp->r_sge.num_sge = 0;
498 qp->r_sge.sge.mr = NULL;
499 qp->r_sge.sge.vaddr = NULL;
500 qp->r_sge.sge.length = 0;
501 qp->r_sge.sge.sge_length = 0;
514 qp->r_rcv_len += pmtu;
515 if (unlikely(qp->r_rcv_len > qp->r_len))
517 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
533 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
535 if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
536 hfi1_put_ss(&qp->s_rdma_read_sge);
538 ret = hfi1_get_rwqe(qp, 1);
544 wc.byte_len = qp->r_len;
546 hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
547 hfi1_put_ss(&qp->r_sge);
560 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
562 hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
563 hfi1_put_ss(&qp->r_sge);
570 qp->r_psn++;
571 qp->r_state = opcode;
575 set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
576 qp->r_sge.num_sge = 0;
582 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
H A Druc.c55 #include "qp.h"
100 static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) init_sge() argument
108 rkt = &to_idev(qp->ibqp.device)->lk_table; init_sge()
109 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); init_sge()
110 ss = &qp->r_sge; init_sge()
111 ss->sg_list = qp->r_sg_list; init_sge()
112 qp->r_len = 0; init_sge()
120 qp->r_len += wqe->sg_list[i].length; init_sge()
124 ss->total_len = qp->r_len; init_sge()
139 wc.qp = &qp->ibqp; init_sge()
141 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); init_sge()
149 * @qp: the QP
150 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
157 int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only) hfi1_get_rwqe() argument
168 if (qp->ibqp.srq) { hfi1_get_rwqe()
169 srq = to_isrq(qp->ibqp.srq); hfi1_get_rwqe()
175 rq = &qp->r_rq; hfi1_get_rwqe()
179 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { hfi1_get_rwqe()
204 if (!wr_id_only && !init_sge(qp, wqe)) { hfi1_get_rwqe()
208 qp->r_wr_id = wqe->wr_id; hfi1_get_rwqe()
211 set_bit(HFI1_R_WRID_VALID, &qp->r_aflags); hfi1_get_rwqe()
231 ev.device = qp->ibqp.device; hfi1_get_rwqe()
232 ev.element.srq = qp->ibqp.srq; hfi1_get_rwqe()
248 void hfi1_migrate_qp(struct hfi1_qp *qp) hfi1_migrate_qp() argument
252 qp->s_mig_state = IB_MIG_MIGRATED; hfi1_migrate_qp()
253 qp->remote_ah_attr = qp->alt_ah_attr; hfi1_migrate_qp()
254 qp->port_num = qp->alt_ah_attr.port_num; hfi1_migrate_qp()
255 qp->s_pkey_index = qp->s_alt_pkey_index; hfi1_migrate_qp()
256 qp->s_flags |= HFI1_S_AHG_CLEAR; hfi1_migrate_qp()
258 ev.device = qp->ibqp.device; hfi1_migrate_qp()
259 ev.element.qp = &qp->ibqp; hfi1_migrate_qp()
261 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); hfi1_migrate_qp()
288 int has_grh, struct hfi1_qp *qp, u32 bth0) hfi1_ruc_check_hdr()
292 u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_ruc_check_hdr()
294 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { hfi1_ruc_check_hdr()
296 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) hfi1_ruc_check_hdr()
299 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) hfi1_ruc_check_hdr()
301 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); hfi1_ruc_check_hdr()
305 qp->alt_ah_attr.grh.dgid.global.subnet_prefix, hfi1_ruc_check_hdr()
306 qp->alt_ah_attr.grh.dgid.global.interface_id)) hfi1_ruc_check_hdr()
314 0, qp->ibqp.qp_num, hfi1_ruc_check_hdr()
319 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || hfi1_ruc_check_hdr()
320 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) hfi1_ruc_check_hdr()
322 spin_lock_irqsave(&qp->s_lock, flags); hfi1_ruc_check_hdr()
323 hfi1_migrate_qp(qp); hfi1_ruc_check_hdr()
324 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_ruc_check_hdr()
327 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) hfi1_ruc_check_hdr()
330 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) hfi1_ruc_check_hdr()
333 qp->remote_ah_attr.grh.sgid_index); hfi1_ruc_check_hdr()
337 qp->remote_ah_attr.grh.dgid.global.subnet_prefix, hfi1_ruc_check_hdr()
338 qp->remote_ah_attr.grh.dgid.global.interface_id)) hfi1_ruc_check_hdr()
346 0, qp->ibqp.qp_num, hfi1_ruc_check_hdr()
351 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || hfi1_ruc_check_hdr()
352 ppd_from_ibp(ibp)->port != qp->port_num) hfi1_ruc_check_hdr()
354 if (qp->s_mig_state == IB_MIG_REARM && hfi1_ruc_check_hdr()
356 qp->s_mig_state = IB_MIG_ARMED; hfi1_ruc_check_hdr()
379 struct hfi1_qp *qp; ruc_loopback() local
396 qp = hfi1_lookup_qpn(ibp, sqp->remote_qpn); ruc_loopback()
432 if (!qp || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) || ruc_loopback()
433 qp->ibqp.qp_type != sqp->ibqp.qp_type) { ruc_loopback()
460 ret = hfi1_get_rwqe(qp, 0); ruc_loopback()
468 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ruc_loopback()
472 ret = hfi1_get_rwqe(qp, 1); ruc_loopback()
479 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ruc_loopback()
483 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, ruc_loopback()
488 qp->r_sge.sg_list = NULL; ruc_loopback()
489 qp->r_sge.num_sge = 1; ruc_loopback()
490 qp->r_sge.total_len = wqe->length; ruc_loopback()
494 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) ruc_loopback()
496 if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, ruc_loopback()
504 qp->r_sge.sge = wqe->sg_list[0]; ruc_loopback()
505 qp->r_sge.sg_list = wqe->sg_list + 1; ruc_loopback()
506 qp->r_sge.num_sge = wqe->wr.num_sge; ruc_loopback()
507 qp->r_sge.total_len = wqe->length; ruc_loopback()
512 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) ruc_loopback()
514 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), ruc_loopback()
520 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ruc_loopback()
525 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, ruc_loopback()
527 hfi1_put_mr(qp->r_sge.sge.mr); ruc_loopback()
528 qp->r_sge.num_sge = 0; ruc_loopback()
545 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release); ruc_loopback()
568 hfi1_put_ss(&qp->r_sge); ruc_loopback()
570 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) ruc_loopback()
577 wc.wr_id = qp->r_wr_id; ruc_loopback()
580 wc.qp = &qp->ibqp; ruc_loopback()
581 wc.src_qp = qp->remote_qpn; ruc_loopback()
582 wc.slid = qp->remote_ah_attr.dlid; ruc_loopback()
583 wc.sl = qp->remote_ah_attr.sl; ruc_loopback()
586 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ruc_loopback()
599 if (qp->ibqp.qp_type == IB_QPT_UC) ruc_loopback()
618 usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]); ruc_loopback()
637 hfi1_rc_error(qp, wc.status); ruc_loopback()
651 ev.element.qp = &sqp->ibqp; ruc_loopback()
702 * @qp: a pointer to QP
711 static inline void build_ahg(struct hfi1_qp *qp, u32 npsn) build_ahg() argument
713 if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) build_ahg()
714 clear_ahg(qp); build_ahg()
715 if (!(qp->s_flags & HFI1_S_AHG_VALID)) { build_ahg()
717 if (qp->s_ahgidx < 0) { build_ahg()
718 if (!qp->s_sde) build_ahg()
719 qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc); build_ahg()
720 qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde); build_ahg()
722 if (qp->s_ahgidx >= 0) { build_ahg()
723 qp->s_ahgpsn = npsn; build_ahg()
724 qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY; build_ahg()
726 qp->s_hdr->sde = qp->s_sde; build_ahg()
727 qp->s_hdr->ahgidx = qp->s_ahgidx; build_ahg()
728 qp->s_flags |= HFI1_S_AHG_VALID; build_ahg()
732 if (qp->s_ahgidx >= 0) { build_ahg()
733 qp->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG; build_ahg()
734 qp->s_hdr->ahgidx = qp->s_ahgidx; build_ahg()
735 qp->s_hdr->ahgcount++; build_ahg()
736 qp->s_hdr->ahgdesc[0] = build_ahg()
743 (qp->s_ahgpsn & 0xffff0000)) { build_ahg()
744 qp->s_hdr->ahgcount++; build_ahg()
745 qp->s_hdr->ahgdesc[1] = build_ahg()
757 void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, hfi1_make_ruc_header() argument
760 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_make_ruc_header()
768 extra_bytes = -qp->s_cur_size & 3; hfi1_make_ruc_header()
769 nwords = (qp->s_cur_size + extra_bytes) >> 2; hfi1_make_ruc_header()
771 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { hfi1_make_ruc_header()
772 qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh, hfi1_make_ruc_header()
773 &qp->remote_ah_attr.grh, hfi1_make_ruc_header()
774 qp->s_hdrwords, nwords); hfi1_make_ruc_header()
778 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; hfi1_make_ruc_header()
779 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; hfi1_make_ruc_header()
780 qp->s_sc = sc5; hfi1_make_ruc_header()
792 qp->s_hdr->tx_flags = 0; hfi1_make_ruc_header()
793 qp->s_hdr->ahgcount = 0; hfi1_make_ruc_header()
794 qp->s_hdr->ahgidx = 0; hfi1_make_ruc_header()
795 qp->s_hdr->sde = NULL; hfi1_make_ruc_header()
796 if (qp->s_mig_state == IB_MIG_MIGRATED) hfi1_make_ruc_header()
801 build_ahg(qp, bth2); hfi1_make_ruc_header()
803 qp->s_flags &= ~HFI1_S_AHG_VALID; hfi1_make_ruc_header()
804 qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); hfi1_make_ruc_header()
805 qp->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); hfi1_make_ruc_header()
806 qp->s_hdr->ibh.lrh[2] = hfi1_make_ruc_header()
807 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); hfi1_make_ruc_header()
808 qp->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | hfi1_make_ruc_header()
809 qp->remote_ah_attr.src_path_bits); hfi1_make_ruc_header()
810 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); hfi1_make_ruc_header()
813 bth1 = qp->remote_qpn; hfi1_make_ruc_header()
814 if (qp->s_flags & HFI1_S_ECN) { hfi1_make_ruc_header()
815 qp->s_flags &= ~HFI1_S_ECN; hfi1_make_ruc_header()
837 struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait); hfi1_do_send() local
838 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_do_send()
840 int (*make_req)(struct hfi1_qp *qp); hfi1_do_send()
844 if ((qp->ibqp.qp_type == IB_QPT_RC || hfi1_do_send()
845 qp->ibqp.qp_type == IB_QPT_UC) && hfi1_do_send()
847 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { hfi1_do_send()
848 ruc_loopback(qp); hfi1_do_send()
852 if (qp->ibqp.qp_type == IB_QPT_RC) hfi1_do_send()
854 else if (qp->ibqp.qp_type == IB_QPT_UC) hfi1_do_send()
859 spin_lock_irqsave(&qp->s_lock, flags); hfi1_do_send()
862 if (!hfi1_send_ok(qp)) { hfi1_do_send()
863 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_do_send()
867 qp->s_flags |= HFI1_S_BUSY; hfi1_do_send()
869 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_do_send()
874 if (qp->s_hdrwords != 0) { hfi1_do_send()
879 if (hfi1_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, hfi1_do_send()
880 qp->s_cur_sge, qp->s_cur_size)) hfi1_do_send()
883 qp->s_hdrwords = 0; hfi1_do_send()
892 } while (make_req(qp)); hfi1_do_send()
898 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, hfi1_send_complete() argument
904 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) hfi1_send_complete()
912 if (qp->ibqp.qp_type == IB_QPT_UD || hfi1_send_complete()
913 qp->ibqp.qp_type == IB_QPT_SMI || hfi1_send_complete()
914 qp->ibqp.qp_type == IB_QPT_GSI) hfi1_send_complete()
918 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || hfi1_send_complete()
927 wc.qp = &qp->ibqp; hfi1_send_complete()
930 hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, hfi1_send_complete()
934 last = qp->s_last; hfi1_send_complete()
936 if (++last >= qp->s_size) hfi1_send_complete()
938 qp->s_last = last; hfi1_send_complete()
939 if (qp->s_acked == old_last) hfi1_send_complete()
940 qp->s_acked = last; hfi1_send_complete()
941 if (qp->s_cur == old_last) hfi1_send_complete()
942 qp->s_cur = last; hfi1_send_complete()
943 if (qp->s_tail == old_last) hfi1_send_complete()
944 qp->s_tail = last; hfi1_send_complete()
945 if (qp->state == IB_QPS_SQD && last == qp->s_cur) hfi1_send_complete()
946 qp->s_draining = 0; hfi1_send_complete()
287 hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, int has_grh, struct hfi1_qp *qp, u32 bth0) hfi1_ruc_check_hdr() argument
H A Dqp.h99 * the returned qp is no longer in use.
104 struct hfi1_qp *qp = NULL; __must_hold() local
107 qp = rcu_dereference(ibp->qp[qpn]); __must_hold()
112 for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp; __must_hold()
113 qp = rcu_dereference(qp->next)) __must_hold()
114 if (qp->ibqp.qp_num == qpn) __must_hold()
117 return qp; __must_hold()
121 * clear_ahg - reset ahg status in qp
122 * @qp - qp pointer
124 static inline void clear_ahg(struct hfi1_qp *qp) clear_ahg() argument
126 qp->s_hdr->ahgcount = 0; clear_ahg()
127 qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); clear_ahg()
128 if (qp->s_sde && qp->s_ahgidx >= 0) clear_ahg()
129 sdma_ahg_free(qp->s_sde, qp->s_ahgidx); clear_ahg()
130 qp->s_ahgidx = -1; clear_ahg()
131 qp->s_sde = NULL; clear_ahg()
136 * @qp: the QP to put into the error state
144 int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err);
163 * @qp: the queue pair to compute the AETH for
167 __be32 hfi1_compute_aeth(struct hfi1_qp *qp);
195 * @qp: the qp who's send work queue to flush
200 void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth);
216 * @qp: the QP
217 * @flag: flag the qp on which the qp is stalled
219 void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag);
221 struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
233 * @iter: the iterator for the qp hash list
239 * @s: the seq_file to emit the qp information on
240 * @iter: the iterator for the qp hash list
246 * @qp: the QP
248 void qp_comm_est(struct hfi1_qp *qp);
H A Dud.c56 #include "qp.h"
72 struct hfi1_qp *qp; ud_loopback() local
83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); ud_loopback()
84 if (!qp) { ud_loopback()
92 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? ud_loopback()
93 IB_QPT_UD : qp->ibqp.qp_type; ud_loopback()
96 !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { ud_loopback()
104 if (qp->ibqp.qp_num > 1) { ud_loopback()
113 qp->s_pkey_index, slid))) { ud_loopback()
116 sqp->ibqp.qp_num, qp->ibqp.qp_num, ud_loopback()
128 if (qp->ibqp.qp_num) { ud_loopback()
133 if (unlikely(qkey != qp->qkey)) { ud_loopback()
140 sqp->ibqp.qp_num, qp->ibqp.qp_num, ud_loopback()
160 spin_lock_irqsave(&qp->r_lock, flags); ud_loopback()
165 if (qp->r_flags & HFI1_R_REUSE_SGE) ud_loopback()
166 qp->r_flags &= ~HFI1_R_REUSE_SGE; ud_loopback()
170 ret = hfi1_get_rwqe(qp, 0); ud_loopback()
172 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); ud_loopback()
176 if (qp->ibqp.qp_num == 0) ud_loopback()
182 if (unlikely(wc.byte_len > qp->r_len)) { ud_loopback()
183 qp->r_flags |= HFI1_R_REUSE_SGE; ud_loopback()
189 hfi1_copy_sge(&qp->r_sge, &ah_attr->grh, ud_loopback()
193 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); ud_loopback()
206 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1); ud_loopback()
226 hfi1_put_ss(&qp->r_sge); ud_loopback()
227 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) ud_loopback()
229 wc.wr_id = qp->r_wr_id; ud_loopback()
232 wc.qp = &qp->ibqp; ud_loopback()
234 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { ud_loopback()
249 wc.port_num = qp->port_num; ud_loopback()
251 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ud_loopback()
255 spin_unlock_irqrestore(&qp->r_lock, flags); ud_loopback()
262 * @qp: the QP
266 int hfi1_make_ud_req(struct hfi1_qp *qp) hfi1_make_ud_req() argument
283 spin_lock_irqsave(&qp->s_lock, flags); hfi1_make_ud_req()
285 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) { hfi1_make_ud_req()
286 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) hfi1_make_ud_req()
289 if (qp->s_last == qp->s_head) hfi1_make_ud_req()
292 if (atomic_read(&qp->s_iowait.sdma_busy)) { hfi1_make_ud_req()
293 qp->s_flags |= HFI1_S_WAIT_DMA; hfi1_make_ud_req()
296 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_ud_req()
297 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); hfi1_make_ud_req()
301 if (qp->s_cur == qp->s_head) hfi1_make_ud_req()
304 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_ud_req()
305 next_cur = qp->s_cur + 1; hfi1_make_ud_req()
306 if (next_cur >= qp->s_size) hfi1_make_ud_req()
310 ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_make_ud_req()
318 qp->ibqp.qp_type == IB_QPT_GSI)))) { hfi1_make_ud_req()
326 if (atomic_read(&qp->s_iowait.sdma_busy)) { hfi1_make_ud_req()
327 qp->s_flags |= HFI1_S_WAIT_DMA; hfi1_make_ud_req()
330 qp->s_cur = next_cur; hfi1_make_ud_req()
331 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_make_ud_req()
332 ud_loopback(qp, wqe); hfi1_make_ud_req()
333 spin_lock_irqsave(&qp->s_lock, flags); hfi1_make_ud_req()
334 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); hfi1_make_ud_req()
339 qp->s_cur = next_cur; hfi1_make_ud_req()
344 qp->s_hdrwords = 7; hfi1_make_ud_req()
345 qp->s_cur_size = wqe->length; hfi1_make_ud_req()
346 qp->s_cur_sge = &qp->s_sge; hfi1_make_ud_req()
347 qp->s_srate = ah_attr->static_rate; hfi1_make_ud_req()
348 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); hfi1_make_ud_req()
349 qp->s_wqe = wqe; hfi1_make_ud_req()
350 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_ud_req()
351 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_ud_req()
352 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_ud_req()
353 qp->s_sge.total_len = wqe->length; hfi1_make_ud_req()
357 qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh, hfi1_make_ud_req()
359 qp->s_hdrwords, nwords); hfi1_make_ud_req()
361 ohdr = &qp->s_hdr->ibh.u.l.oth; hfi1_make_ud_req()
369 ohdr = &qp->s_hdr->ibh.u.oth; hfi1_make_ud_req()
372 qp->s_hdrwords++; hfi1_make_ud_req()
379 if (qp->ibqp.qp_type == IB_QPT_SMI) { hfi1_make_ud_req()
381 qp->s_sc = 0xf; hfi1_make_ud_req()
384 qp->s_sc = sc5; hfi1_make_ud_req()
386 qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); hfi1_make_ud_req()
387 qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ hfi1_make_ud_req()
388 qp->s_hdr->ibh.lrh[2] = hfi1_make_ud_req()
389 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); hfi1_make_ud_req()
391 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; hfi1_make_ud_req()
396 qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid); hfi1_make_ud_req()
398 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; hfi1_make_ud_req()
403 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) hfi1_make_ud_req()
406 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); hfi1_make_ud_req()
409 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++)); hfi1_make_ud_req()
415 qp->qkey : wqe->ud_wr.remote_qkey); hfi1_make_ud_req()
416 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); hfi1_make_ud_req()
418 qp->s_hdr->ahgcount = 0; hfi1_make_ud_req()
419 qp->s_hdr->ahgidx = 0; hfi1_make_ud_req()
420 qp->s_hdr->tx_flags = 0; hfi1_make_ud_req()
421 qp->s_hdr->sde = NULL; hfi1_make_ud_req()
428 qp->s_flags &= ~HFI1_S_BUSY; hfi1_make_ud_req()
430 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_make_ud_req()
479 void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn, return_cnp() argument
490 struct send_context *ctxt = qp_to_send_context(qp, sc5); return_cnp()
525 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); return_cnp()
553 struct hfi1_qp *qp, u16 slid, struct opa_smp *smp) opa_smp_check()
635 * @qp: the QP the packet came on
657 struct hfi1_qp *qp = packet->qp; hfi1_ud_rcv() local
703 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh); hfi1_ud_rcv()
719 if (qp->ibqp.qp_num) { hfi1_ud_rcv()
723 if (qp->ibqp.qp_num > 1) { hfi1_ud_rcv()
743 src_qp, qp->ibqp.qp_num, hfi1_ud_rcv()
754 if (unlikely(qkey != qp->qkey)) { hfi1_ud_rcv()
757 src_qp, qp->ibqp.qp_num, hfi1_ud_rcv()
762 if (unlikely(qp->ibqp.qp_num == 1 && hfi1_ud_rcv()
775 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp)) hfi1_ud_rcv()
792 if (qp->ibqp.qp_num > 1 && hfi1_ud_rcv()
812 if (qp->r_flags & HFI1_R_REUSE_SGE) hfi1_ud_rcv()
813 qp->r_flags &= ~HFI1_R_REUSE_SGE; hfi1_ud_rcv()
817 ret = hfi1_get_rwqe(qp, 0); hfi1_ud_rcv()
819 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); hfi1_ud_rcv()
823 if (qp->ibqp.qp_num == 0) hfi1_ud_rcv()
829 if (unlikely(wc.byte_len > qp->r_len)) { hfi1_ud_rcv()
830 qp->r_flags |= HFI1_R_REUSE_SGE; hfi1_ud_rcv()
834 hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh, hfi1_ud_rcv()
838 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); hfi1_ud_rcv()
839 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); hfi1_ud_rcv()
840 hfi1_put_ss(&qp->r_sge); hfi1_ud_rcv()
841 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) hfi1_ud_rcv()
843 wc.wr_id = qp->r_wr_id; hfi1_ud_rcv()
847 wc.qp = &qp->ibqp; hfi1_ud_rcv()
850 if (qp->ibqp.qp_type == IB_QPT_GSI || hfi1_ud_rcv()
851 qp->ibqp.qp_type == IB_QPT_SMI) { hfi1_ud_rcv()
858 qp->ibqp.qp_type); hfi1_ud_rcv()
876 wc.port_num = qp->port_num; hfi1_ud_rcv()
878 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, hfi1_ud_rcv()
552 opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, struct hfi1_qp *qp, u16 slid, struct opa_smp *smp) opa_smp_check() argument
H A Dverbs.c65 #include "qp.h"
358 * @qp: the QP to post on
361 static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) post_one_send() argument
370 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); post_one_send()
375 if (unlikely(wr->num_sge > qp->s_max_sge)) post_one_send()
378 ppd = &dd->pport[qp->port_num - 1]; post_one_send()
386 if (qp->ibqp.qp_type == IB_QPT_UC) { post_one_send()
389 } else if (qp->ibqp.qp_type != IB_QPT_RC) { post_one_send()
395 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) post_one_send()
404 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) post_one_send()
407 next = qp->s_head + 1; post_one_send()
408 if (next >= qp->s_size) post_one_send()
410 if (next == qp->s_last) post_one_send()
413 rkt = &to_idev(qp->ibqp.device)->lk_table; post_one_send()
414 pd = to_ipd(qp->ibqp.pd); post_one_send()
415 wqe = get_swqe_ptr(qp, qp->s_head); post_one_send()
418 if (qp->ibqp.qp_type != IB_QPT_UC && post_one_send()
419 qp->ibqp.qp_type != IB_QPT_RC) post_one_send()
451 if (qp->ibqp.qp_type == IB_QPT_UC || post_one_send()
452 qp->ibqp.qp_type == IB_QPT_RC) { post_one_send()
460 wqe->ssn = qp->s_ssn++; post_one_send()
461 qp->s_head = next; post_one_send()
486 struct hfi1_qp *qp = to_iqp(ibqp); post_send() local
492 spin_lock_irqsave(&qp->s_lock, flags); post_send()
495 if (unlikely(!(ib_hfi1_state_ops[qp->state] & HFI1_POST_SEND_OK))) { post_send()
496 spin_unlock_irqrestore(&qp->s_lock, flags); post_send()
501 call_send = qp->s_head == qp->s_last && !wr->next; post_send()
504 err = post_one_send(qp, wr); post_send()
513 hfi1_schedule_send(qp); post_send()
514 spin_unlock_irqrestore(&qp->s_lock, flags); post_send()
516 hfi1_do_send(&qp->s_iowait.iowork); post_send()
531 struct hfi1_qp *qp = to_iqp(ibqp); post_receive() local
532 struct hfi1_rwq *wq = qp->r_rq.wq; post_receive()
537 if (!(ib_hfi1_state_ops[qp->state] & HFI1_POST_RECV_OK) || !wq) { post_receive()
548 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { post_receive()
554 spin_lock_irqsave(&qp->r_rq.lock, flags); post_receive()
556 if (next >= qp->r_rq.size) post_receive()
559 spin_unlock_irqrestore(&qp->r_rq.lock, flags); post_receive()
565 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); post_receive()
573 spin_unlock_irqrestore(&qp->r_rq.lock, flags); post_receive()
588 if (!(ib_hfi1_state_ops[packet->qp->state] & HFI1_PROCESS_RECV_OK)) qp_ok()
590 if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) || qp_ok()
657 packet->qp = p->qp; hfi1_ib_rcv()
658 spin_lock_irqsave(&packet->qp->r_lock, flags); hfi1_ib_rcv()
661 spin_unlock_irqrestore(&packet->qp->r_lock, flags); hfi1_ib_rcv()
671 packet->qp = hfi1_lookup_qpn(ibp, qp_num); hfi1_ib_rcv()
672 if (!packet->qp) { hfi1_ib_rcv()
676 spin_lock_irqsave(&packet->qp->r_lock, flags); hfi1_ib_rcv()
679 spin_unlock_irqrestore(&packet->qp->r_lock, flags); hfi1_ib_rcv()
696 struct hfi1_qp *qp = NULL; mem_timer() local
703 qp = container_of(wait, struct hfi1_qp, s_iowait); mem_timer()
704 list_del_init(&qp->s_iowait.list); mem_timer()
711 if (qp) mem_timer()
712 hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM); mem_timer()
737 struct hfi1_qp *qp) __get_txreq()
744 spin_lock_irqsave(&qp->s_lock, flags); __get_txreq()
746 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK && __get_txreq()
747 list_empty(&qp->s_iowait.list)) { __get_txreq()
749 qp->s_flags |= HFI1_S_WAIT_TX; __get_txreq()
750 list_add_tail(&qp->s_iowait.list, &dev->txwait); __get_txreq()
751 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX); __get_txreq()
752 atomic_inc(&qp->refcount); __get_txreq()
754 qp->s_flags &= ~HFI1_S_BUSY; __get_txreq()
756 spin_unlock_irqrestore(&qp->s_lock, flags); __get_txreq()
763 struct hfi1_qp *qp) get_txreq()
770 tx = __get_txreq(dev, qp); get_txreq()
774 tx->qp = qp; get_txreq()
781 struct hfi1_qp *qp; hfi1_put_txreq() local
785 qp = tx->qp; hfi1_put_txreq()
786 dev = to_idev(qp->ibqp.device); hfi1_put_txreq()
806 qp = container_of(wait, struct hfi1_qp, s_iowait); hfi1_put_txreq()
807 list_del_init(&qp->s_iowait.list); hfi1_put_txreq()
810 hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX); hfi1_put_txreq()
827 struct hfi1_qp *qp = tx->qp; verbs_sdma_complete() local
829 spin_lock(&qp->s_lock); verbs_sdma_complete()
831 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); verbs_sdma_complete()
832 else if (qp->ibqp.qp_type == IB_QPT_RC) { verbs_sdma_complete()
836 hfi1_rc_send_complete(qp, hdr); verbs_sdma_complete()
845 if (qp->s_flags & HFI1_S_WAIT_DMA) { verbs_sdma_complete()
846 qp->s_flags &= ~HFI1_S_WAIT_DMA; verbs_sdma_complete()
847 hfi1_schedule_send(qp); verbs_sdma_complete()
850 spin_unlock(&qp->s_lock); verbs_sdma_complete()
855 static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp) wait_kmem() argument
860 spin_lock_irqsave(&qp->s_lock, flags); wait_kmem()
861 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { wait_kmem()
863 if (list_empty(&qp->s_iowait.list)) { wait_kmem()
866 qp->s_flags |= HFI1_S_WAIT_KMEM; wait_kmem()
867 list_add_tail(&qp->s_iowait.list, &dev->memwait); wait_kmem()
868 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM); wait_kmem()
869 atomic_inc(&qp->refcount); wait_kmem()
872 qp->s_flags &= ~HFI1_S_BUSY; wait_kmem()
875 spin_unlock_irqrestore(&qp->s_lock, flags); wait_kmem()
1002 int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr, hfi1_verbs_send_dma() argument
1006 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); hfi1_verbs_send_dma()
1007 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_verbs_send_dma()
1013 u8 sc5 = qp->s_sc; hfi1_verbs_send_dma()
1016 if (!list_empty(&qp->s_iowait.tx_head)) { hfi1_verbs_send_dma()
1018 &qp->s_iowait.tx_head, hfi1_verbs_send_dma()
1023 ret = sdma_send_txreq(tx->sde, &qp->s_iowait, stx); hfi1_verbs_send_dma()
1029 tx = get_txreq(dev, qp); hfi1_verbs_send_dma()
1033 if (!qp->s_hdr->sde) { hfi1_verbs_send_dma()
1034 tx->sde = sde = qp_to_sdma_engine(qp, sc5); hfi1_verbs_send_dma()
1038 tx->sde = sde = qp->s_hdr->sde; hfi1_verbs_send_dma()
1041 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); hfi1_verbs_send_dma()
1046 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); hfi1_verbs_send_dma()
1048 tx->wqe = qp->s_wqe; hfi1_verbs_send_dma()
1049 tx->mr = qp->s_rdma_mr; hfi1_verbs_send_dma()
1050 if (qp->s_rdma_mr) hfi1_verbs_send_dma()
1051 qp->s_rdma_mr = NULL; hfi1_verbs_send_dma()
1056 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); hfi1_verbs_send_dma()
1057 ret = sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq); hfi1_verbs_send_dma()
1070 return wait_kmem(dev, qp); hfi1_verbs_send_dma()
1079 static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc) no_bufs_available() argument
1092 spin_lock_irqsave(&qp->s_lock, flags); no_bufs_available()
1093 if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { no_bufs_available()
1095 if (list_empty(&qp->s_iowait.list)) { no_bufs_available()
1100 qp->s_flags |= HFI1_S_WAIT_PIO; no_bufs_available()
1102 list_add_tail(&qp->s_iowait.list, &sc->piowait); no_bufs_available()
1103 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO); no_bufs_available()
1104 atomic_inc(&qp->refcount); no_bufs_available()
1110 qp->s_flags &= ~HFI1_S_BUSY; no_bufs_available()
1113 spin_unlock_irqrestore(&qp->s_lock, flags); no_bufs_available()
1117 struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5) qp_to_send_context() argument
1119 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); qp_to_send_context()
1120 struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1); qp_to_send_context()
1129 int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *ahdr, hfi1_verbs_send_pio() argument
1133 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); hfi1_verbs_send_pio()
1144 sc5 = qp->s_sc; hfi1_verbs_send_pio()
1145 sc = qp_to_send_context(qp, sc5); hfi1_verbs_send_pio()
1150 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); hfi1_verbs_send_pio()
1153 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); hfi1_verbs_send_pio()
1176 return no_bufs_available(qp, sc); hfi1_verbs_send_pio()
1199 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); hfi1_verbs_send_pio()
1201 if (qp->s_rdma_mr) { hfi1_verbs_send_pio()
1202 hfi1_put_mr(qp->s_rdma_mr); hfi1_verbs_send_pio()
1203 qp->s_rdma_mr = NULL; hfi1_verbs_send_pio()
1207 if (qp->s_wqe) { hfi1_verbs_send_pio()
1208 spin_lock_irqsave(&qp->s_lock, flags); hfi1_verbs_send_pio()
1209 hfi1_send_complete(qp, qp->s_wqe, wc_status); hfi1_verbs_send_pio()
1210 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_verbs_send_pio()
1211 } else if (qp->ibqp.qp_type == IB_QPT_RC) { hfi1_verbs_send_pio()
1212 spin_lock_irqsave(&qp->s_lock, flags); hfi1_verbs_send_pio()
1213 hfi1_rc_send_complete(qp, &ahdr->ibh); hfi1_verbs_send_pio()
1214 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_verbs_send_pio()
1249 struct hfi1_qp *qp) egress_pkey_check()
1255 u8 lnh, sc5 = qp->s_sc; egress_pkey_check()
1278 /* The most likely matching pkey has index qp->s_pkey_index */ egress_pkey_check()
1280 ppd->pkeys[qp->s_pkey_index]))) { egress_pkey_check()
1305 * @qp: the QP to send on
1312 * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
1314 int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr, hfi1_verbs_send() argument
1317 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); hfi1_verbs_send()
1329 if ((qp->ibqp.qp_type == IB_QPT_SMI) || hfi1_verbs_send()
1333 ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp); hfi1_verbs_send()
1346 spin_lock_irqsave(&qp->s_lock, flags); hfi1_verbs_send()
1347 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); hfi1_verbs_send()
1348 spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_verbs_send()
1361 qp, ahdr, hdrwords, ss, len, plen, dwords, 0); hfi1_verbs_send()
1369 qp, ahdr, hdrwords, ss, len, plen, dwords, 0); hfi1_verbs_send()
1765 qp0 = rcu_dereference(ibp->qp[0]); hfi1_create_qp0_ah()
1898 RCU_INIT_POINTER(ibp->qp[0], NULL); init_ibport()
1899 RCU_INIT_POINTER(ibp->qp[1], NULL); init_ibport()
2141 void hfi1_schedule_send(struct hfi1_qp *qp) hfi1_schedule_send() argument
2143 if (hfi1_send_ok(qp)) { hfi1_schedule_send()
2145 to_iport(qp->ibqp.device, qp->port_num); hfi1_schedule_send()
2148 iowait_schedule(&qp->s_iowait, ppd->hfi1_wq); hfi1_schedule_send()
2156 if (packet->qp->ibqp.qp_type == IB_QPT_UC) hfi1_cnp_rcv()
2158 else if (packet->qp->ibqp.qp_type == IB_QPT_UD) hfi1_cnp_rcv()
736 __get_txreq(struct hfi1_ibdev *dev, struct hfi1_qp *qp) __get_txreq() argument
762 get_txreq(struct hfi1_ibdev *dev, struct hfi1_qp *qp) get_txreq() argument
1247 egress_pkey_check(struct hfi1_pportdata *ppd, struct hfi1_ib_header *hdr, struct hfi1_qp *qp) egress_pkey_check() argument
H A DMakefile12 qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
H A Dverbs_mcast.c57 * @qp: the QP to link
59 static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp) mcast_qp_alloc() argument
67 mqp->qp = qp; mcast_qp_alloc()
68 atomic_inc(&qp->refcount); mcast_qp_alloc()
76 struct hfi1_qp *qp = mqp->qp; mcast_qp_free() local
79 if (atomic_dec_and_test(&qp->refcount)) mcast_qp_free()
80 wake_up(&qp->wait); mcast_qp_free()
199 if (p->qp == mqp->qp) { mcast_add()
244 struct hfi1_qp *qp = to_iqp(ibqp); hfi1_multicast_attach() local
251 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { hfi1_multicast_attach()
265 mqp = mcast_qp_alloc(qp); hfi1_multicast_attach()
271 ibp = to_iport(ibqp->device, qp->port_num); hfi1_multicast_attach()
302 struct hfi1_qp *qp = to_iqp(ibqp); hfi1_multicast_detach() local
304 struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num); hfi1_multicast_detach()
311 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { hfi1_multicast_detach()
340 if (p->qp != qp) hfi1_multicast_detach()
H A Dtrace.h297 TP_PROTO(struct hfi1_qp *qp, u32 flags),
298 TP_ARGS(qp, flags),
300 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
306 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
308 __entry->qpn = qp->ibqp.qp_num;
309 __entry->s_flags = qp->s_flags;
321 TP_PROTO(struct hfi1_qp *qp, u32 flags),
322 TP_ARGS(qp, flags));
325 TP_PROTO(struct hfi1_qp *qp, u32 flags),
326 TP_ARGS(qp, flags));
331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
332 TP_ARGS(qp, bucket),
334 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
339 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
340 __entry->qpn = qp->ibqp.qp_num;
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
1256 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1257 TP_ARGS(qp, psn),
1259 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1270 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1271 __entry->qpn = qp->ibqp.qp_num;
1272 __entry->s_flags = qp->s_flags;
1274 __entry->s_psn = qp->s_psn;
1275 __entry->s_next_psn = qp->s_next_psn;
1276 __entry->s_sending_psn = qp->s_sending_psn;
1277 __entry->s_sending_hpsn = qp->s_sending_hpsn;
1278 __entry->r_psn = qp->r_psn;
1295 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1296 TP_ARGS(qp, psn)
1300 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1301 TP_ARGS(qp, psn)
1305 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1306 TP_ARGS(qp, psn)
1310 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1311 TP_ARGS(qp, psn)
H A Ddriver.c62 #include "qp.h"
304 struct hfi1_qp *qp; rcv_hdrerr() local
308 qp = hfi1_lookup_qpn(ibp, qp_num); rcv_hdrerr()
309 if (!qp) { rcv_hdrerr()
318 spin_lock_irqsave(&qp->r_lock, flags); rcv_hdrerr()
321 if (!(ib_hfi1_state_ops[qp->state] & rcv_hdrerr()
326 switch (qp->ibqp.qp_type) { rcv_hdrerr()
332 qp); rcv_hdrerr()
339 spin_unlock_irqrestore(&qp->r_lock, flags); rcv_hdrerr()
373 struct hfi1_qp *qp = NULL; rcv_hdrerr() local
385 qp = hfi1_lookup_qpn(ibp, lqpn); rcv_hdrerr()
386 if (qp == NULL) { rcv_hdrerr()
391 switch (qp->ibqp.qp_type) { rcv_hdrerr()
399 rqpn = qp->remote_qpn; rcv_hdrerr()
442 static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr, process_ecn() argument
446 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); process_ecn()
451 switch (qp->ibqp.qp_type) { process_ecn()
476 return_cnp(ibp, qp, src_qpn, pkey, dlid, slid, sc5, grh); process_ecn()
569 struct hfi1_qp *qp; prescan_rxq() local
606 qp = hfi1_lookup_qpn(ibp, qpn); prescan_rxq()
608 if (qp == NULL) { prescan_rxq()
613 process_ecn(qp, hdr, ohdr, rhf, grh); prescan_rxq()
713 struct hfi1_qp *qp, *nqp; process_rcv_qp_work() local
722 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { process_rcv_qp_work()
723 list_del_init(&qp->rspwait); process_rcv_qp_work()
724 if (qp->r_flags & HFI1_R_RSP_NAK) { process_rcv_qp_work()
725 qp->r_flags &= ~HFI1_R_RSP_NAK; process_rcv_qp_work()
726 hfi1_send_rc_ack(rcd, qp, 0); process_rcv_qp_work()
728 if (qp->r_flags & HFI1_R_RSP_SEND) { process_rcv_qp_work()
731 qp->r_flags &= ~HFI1_R_RSP_SEND; process_rcv_qp_work()
732 spin_lock_irqsave(&qp->s_lock, flags); process_rcv_qp_work()
733 if (ib_hfi1_state_ops[qp->state] & process_rcv_qp_work()
735 hfi1_schedule_send(qp); process_rcv_qp_work()
736 spin_unlock_irqrestore(&qp->s_lock, flags); process_rcv_qp_work()
738 if (atomic_dec_and_test(&qp->refcount)) process_rcv_qp_work()
739 wake_up(&qp->wait); process_rcv_qp_work()
H A Dverbs.h224 struct hfi1_qp *qp; member in struct:hfi1_mcast_qp
348 * in qp->s_max_sge.
367 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
628 static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp, get_swqe_ptr() argument
631 return (struct hfi1_swqe *)((char *)qp->s_wq + get_swqe_ptr()
633 qp->s_max_sge * get_swqe_ptr()
679 struct hfi1_qp __rcu *qp[2]; member in struct:hfi1_ibport
839 static inline int hfi1_send_ok(struct hfi1_qp *qp) hfi1_send_ok() argument
841 return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) && hfi1_send_ok()
842 (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) || hfi1_send_ok()
843 !(qp->s_flags & HFI1_S_ANY_WAIT_SEND)); hfi1_send_ok()
849 void hfi1_schedule_send(struct hfi1_qp *qp);
882 /* Number of bits to pay attention to in the opcode for checking qp type */
930 int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
948 struct hfi1_qp *qp);
958 void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr);
960 void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err);
973 int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
1070 int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only);
1072 void hfi1_migrate_qp(struct hfi1_qp *qp);
1075 int has_grh, struct hfi1_qp *qp, u32 bth0);
1080 void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
1085 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
1088 void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct hfi1_qp *qp, int is_fecn);
1090 int hfi1_make_rc_req(struct hfi1_qp *qp);
1092 int hfi1_make_uc_req(struct hfi1_qp *qp);
1094 int hfi1_make_ud_req(struct hfi1_qp *qp);
1104 int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
1108 int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
1112 struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5);
H A Dkeys.c263 * @qp: qp for validation
274 int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, hfi1_rkey_ok() argument
277 struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; hfi1_rkey_ok()
288 struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); hfi1_rkey_ok()
310 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) hfi1_rkey_ok()
H A Ddiag.c1621 int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr, snoop_send_dma_handler() argument
1627 return hfi1_verbs_send_dma(qp, ibhdr, hdrwords, ss, len, plen, dwords, snoop_send_dma_handler()
1636 int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr, snoop_send_pio_handler() argument
1640 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); snoop_send_pio_handler()
1682 md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); snoop_send_pio_handler()
1766 if (qp->s_wqe) { snoop_send_pio_handler()
1767 spin_lock_irqsave(&qp->s_lock, flags); snoop_send_pio_handler()
1769 qp, snoop_send_pio_handler()
1770 qp->s_wqe, snoop_send_pio_handler()
1772 spin_unlock_irqrestore(&qp->s_lock, flags); snoop_send_pio_handler()
1773 } else if (qp->ibqp.qp_type == IB_QPT_RC) { snoop_send_pio_handler()
1774 spin_lock_irqsave(&qp->s_lock, flags); snoop_send_pio_handler()
1775 hfi1_rc_send_complete(qp, &ahdr->ibh); snoop_send_pio_handler()
1776 spin_unlock_irqrestore(&qp->s_lock, flags); snoop_send_pio_handler()
1786 return hfi1_verbs_send_pio(qp, ahdr, hdrwords, ss, len, plen, dwords, snoop_send_pio_handler()
H A Dcq.c65 * This may be called with qp->s_lock held.
107 wc->uqueue[head].qp_num = entry->qp->qp_num; hfi1_cq_enter()
H A Dhfi.h331 struct hfi1_qp *qp; member in struct:hfi1_packet
1064 int (*process_pio_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
1067 int (*process_dma_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
1225 void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
1417 int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
1420 int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_rc.c56 * ipath_init_restart- initialize the qp->s_sge after a restart
57 * @qp: the QP who's SGE we're restarting
62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) ipath_init_restart() argument
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, ipath_init_restart()
67 ib_mtu_enum_to_int(qp->path_mtu)); ipath_init_restart()
68 dev = to_idev(qp->ibqp.device); ipath_init_restart()
70 if (list_empty(&qp->timerwait)) ipath_init_restart()
71 list_add_tail(&qp->timerwait, ipath_init_restart()
78 * @qp: a pointer to the QP
86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, ipath_make_rc_ack() argument
96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_make_rc_ack()
102 switch (qp->s_ack_state) { ipath_make_rc_ack()
111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) ipath_make_rc_ack()
112 qp->s_tail_ack_queue = 0; ipath_make_rc_ack()
117 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { OP()
118 if (qp->s_flags & IPATH_S_ACK_PENDING) OP()
120 qp->s_ack_state = OP(ACKNOWLEDGE); OP()
124 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
127 qp->s_ack_rdma_sge = e->rdma_sge;
128 qp->s_cur_sge = &qp->s_ack_rdma_sge;
132 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
134 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
137 ohdr->u.aeth = ipath_compute_aeth(qp);
139 qp->s_ack_rdma_psn = e->psn;
140 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
143 qp->s_cur_sge = NULL;
145 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
146 ohdr->u.at.aeth = ipath_compute_aeth(qp);
155 bth0 = qp->s_ack_state << 24;
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
162 len = qp->s_ack_rdma_sge.sge.sge_length;
166 ohdr->u.aeth = ipath_compute_aeth(qp);
168 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
169 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
171 bth0 = qp->s_ack_state << 24;
172 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
183 qp->s_ack_state = OP(SEND_ONLY);
184 qp->s_flags &= ~IPATH_S_ACK_PENDING;
185 qp->s_cur_sge = NULL;
186 if (qp->s_nak_state)
188 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
189 (qp->s_nak_state <<
192 ohdr->u.aeth = ipath_compute_aeth(qp);
196 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
198 qp->s_hdrwords = hwords;
199 qp->s_cur_size = len;
200 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
209 * @qp: a pointer to the QP
213 int ipath_make_rc_req(struct ipath_qp *qp) ipath_make_rc_req() argument
215 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_make_rc_req()
223 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_make_rc_req()
228 ohdr = &qp->s_hdr.u.oth; ipath_make_rc_req()
229 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ipath_make_rc_req()
230 ohdr = &qp->s_hdr.u.l.oth; ipath_make_rc_req()
236 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_rc_req()
239 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || ipath_make_rc_req()
240 (qp->s_flags & IPATH_S_ACK_PENDING) || ipath_make_rc_req()
241 qp->s_ack_state != OP(ACKNOWLEDGE)) && ipath_make_rc_req()
242 ipath_make_rc_ack(dev, qp, ohdr, pmtu)) ipath_make_rc_req()
245 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { ipath_make_rc_req()
246 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_rc_req()
249 if (qp->s_last == qp->s_head) ipath_make_rc_req()
252 if (atomic_read(&qp->s_dma_busy)) { ipath_make_rc_req()
253 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_rc_req()
256 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_rc_req()
257 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_rc_req()
262 if (qp->s_rnr_timeout) { ipath_make_rc_req()
263 qp->s_flags |= IPATH_S_WAITING; ipath_make_rc_req()
272 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_rc_req()
273 switch (qp->s_state) { ipath_make_rc_req()
275 if (!(ib_ipath_state_ops[qp->state] & ipath_make_rc_req()
286 if (qp->s_cur == qp->s_tail) { ipath_make_rc_req()
288 if (qp->s_tail == qp->s_head) ipath_make_rc_req()
295 qp->s_num_rd_atomic) { ipath_make_rc_req()
296 qp->s_flags |= IPATH_S_FENCE_PENDING; ipath_make_rc_req()
299 wqe->psn = qp->s_next_psn; ipath_make_rc_req()
308 ss = &qp->s_sge; ipath_make_rc_req()
314 if (qp->s_lsn != (u32) -1 && ipath_make_rc_req()
315 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req()
316 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; ipath_make_rc_req()
322 qp->s_state = OP(SEND_FIRST); ipath_make_rc_req()
327 qp->s_state = OP(SEND_ONLY); ipath_make_rc_req()
329 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); ipath_make_rc_req()
337 if (++qp->s_cur == qp->s_size) ipath_make_rc_req()
338 qp->s_cur = 0; ipath_make_rc_req()
342 if (newreq && qp->s_lsn != (u32) -1) ipath_make_rc_req()
343 qp->s_lsn++; ipath_make_rc_req()
347 if (qp->s_lsn != (u32) -1 && ipath_make_rc_req()
348 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req()
349 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; ipath_make_rc_req()
361 qp->s_state = OP(RDMA_WRITE_FIRST); ipath_make_rc_req()
366 qp->s_state = OP(RDMA_WRITE_ONLY); ipath_make_rc_req()
368 qp->s_state = ipath_make_rc_req()
377 if (++qp->s_cur == qp->s_size) ipath_make_rc_req()
378 qp->s_cur = 0; ipath_make_rc_req()
387 if (qp->s_num_rd_atomic >= ipath_make_rc_req()
388 qp->s_max_rd_atomic) { ipath_make_rc_req()
389 qp->s_flags |= IPATH_S_RDMAR_PENDING; ipath_make_rc_req()
392 qp->s_num_rd_atomic++; ipath_make_rc_req()
393 if (qp->s_lsn != (u32) -1) ipath_make_rc_req()
394 qp->s_lsn++; ipath_make_rc_req()
400 qp->s_next_psn += (len - 1) / pmtu; ipath_make_rc_req()
401 wqe->lpsn = qp->s_next_psn++; ipath_make_rc_req()
408 qp->s_state = OP(RDMA_READ_REQUEST); ipath_make_rc_req()
412 if (++qp->s_cur == qp->s_size) ipath_make_rc_req()
413 qp->s_cur = 0; ipath_make_rc_req()
423 if (qp->s_num_rd_atomic >= ipath_make_rc_req()
424 qp->s_max_rd_atomic) { ipath_make_rc_req()
425 qp->s_flags |= IPATH_S_RDMAR_PENDING; ipath_make_rc_req()
428 qp->s_num_rd_atomic++; ipath_make_rc_req()
429 if (qp->s_lsn != (u32) -1) ipath_make_rc_req()
430 qp->s_lsn++; ipath_make_rc_req()
434 qp->s_state = OP(COMPARE_SWAP); ipath_make_rc_req()
440 qp->s_state = OP(FETCH_ADD); ipath_make_rc_req()
454 if (++qp->s_cur == qp->s_size) ipath_make_rc_req()
455 qp->s_cur = 0; ipath_make_rc_req()
461 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_rc_req()
462 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_rc_req()
463 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_rc_req()
464 qp->s_len = wqe->length; ipath_make_rc_req()
466 qp->s_tail++; ipath_make_rc_req()
467 if (qp->s_tail >= qp->s_size) ipath_make_rc_req()
468 qp->s_tail = 0; ipath_make_rc_req()
470 bth2 |= qp->s_psn & IPATH_PSN_MASK; ipath_make_rc_req()
472 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req()
474 qp->s_psn++; ipath_make_rc_req()
475 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req()
476 qp->s_next_psn = qp->s_psn; ipath_make_rc_req()
484 if (list_empty(&qp->timerwait)) ipath_make_rc_req()
485 list_add_tail(&qp->timerwait, ipath_make_rc_req()
495 ipath_init_restart(qp, wqe); ipath_make_rc_req()
498 qp->s_state = OP(SEND_MIDDLE); ipath_make_rc_req()
501 bth2 = qp->s_psn++ & IPATH_PSN_MASK; ipath_make_rc_req()
502 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req()
503 qp->s_next_psn = qp->s_psn; ipath_make_rc_req()
504 ss = &qp->s_sge; ipath_make_rc_req()
505 len = qp->s_len; ipath_make_rc_req()
511 qp->s_state = OP(SEND_LAST); ipath_make_rc_req()
513 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); ipath_make_rc_req()
521 qp->s_cur++; ipath_make_rc_req()
522 if (qp->s_cur >= qp->s_size) ipath_make_rc_req()
523 qp->s_cur = 0; ipath_make_rc_req()
531 ipath_init_restart(qp, wqe); ipath_make_rc_req()
534 qp->s_state = OP(RDMA_WRITE_MIDDLE); ipath_make_rc_req()
537 bth2 = qp->s_psn++ & IPATH_PSN_MASK; ipath_make_rc_req()
538 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req()
539 qp->s_next_psn = qp->s_psn; ipath_make_rc_req()
540 ss = &qp->s_sge; ipath_make_rc_req()
541 len = qp->s_len; ipath_make_rc_req()
547 qp->s_state = OP(RDMA_WRITE_LAST); ipath_make_rc_req()
549 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); ipath_make_rc_req()
557 qp->s_cur++; ipath_make_rc_req()
558 if (qp->s_cur >= qp->s_size) ipath_make_rc_req()
559 qp->s_cur = 0; ipath_make_rc_req()
567 ipath_init_restart(qp, wqe); ipath_make_rc_req()
568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; ipath_make_rc_req()
573 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); ipath_make_rc_req()
574 qp->s_state = OP(RDMA_READ_REQUEST); ipath_make_rc_req()
576 bth2 = qp->s_psn & IPATH_PSN_MASK; ipath_make_rc_req()
577 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req()
580 qp->s_cur++; ipath_make_rc_req()
581 if (qp->s_cur == qp->s_size) ipath_make_rc_req()
582 qp->s_cur = 0; ipath_make_rc_req()
585 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) ipath_make_rc_req()
587 qp->s_len -= len; ipath_make_rc_req()
588 qp->s_hdrwords = hwords; ipath_make_rc_req()
589 qp->s_cur_sge = ss; ipath_make_rc_req()
590 qp->s_cur_size = len; ipath_make_rc_req()
591 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2); ipath_make_rc_req()
597 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_rc_req()
599 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_rc_req()
605 * @qp: a pointer to the QP
612 static void send_rc_ack(struct ipath_qp *qp) send_rc_ack() argument
614 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); send_rc_ack()
624 spin_lock_irqsave(&qp->s_lock, flags); send_rc_ack()
627 if (qp->r_head_ack_queue != qp->s_tail_ack_queue || send_rc_ack()
628 (qp->s_flags & IPATH_S_ACK_PENDING) || send_rc_ack()
629 qp->s_ack_state != OP(ACKNOWLEDGE)) send_rc_ack()
632 spin_unlock_irqrestore(&qp->s_lock, flags); send_rc_ack()
648 spin_lock_irqsave(&qp->s_lock, flags); send_rc_ack()
657 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { send_rc_ack()
659 &qp->remote_ah_attr.grh, send_rc_ack()
665 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) | send_rc_ack()
667 if (qp->r_nak_state) send_rc_ack()
668 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | send_rc_ack()
669 (qp->r_nak_state << send_rc_ack()
672 ohdr->u.aeth = ipath_compute_aeth(qp); send_rc_ack()
673 lrh0 |= qp->remote_ah_attr.sl << 4; send_rc_ack()
675 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); send_rc_ack()
678 qp->remote_ah_attr.src_path_bits); send_rc_ack()
680 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); send_rc_ack()
681 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); send_rc_ack()
701 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) { send_rc_ack()
703 qp->s_flags |= IPATH_S_ACK_PENDING; send_rc_ack()
704 qp->s_nak_state = qp->r_nak_state; send_rc_ack()
705 qp->s_ack_psn = qp->r_ack_psn; send_rc_ack()
708 ipath_schedule_send(qp); send_rc_ack()
710 spin_unlock_irqrestore(&qp->s_lock, flags); send_rc_ack()
717 * @qp: the QP
724 static void reset_psn(struct ipath_qp *qp, u32 psn) reset_psn() argument
726 u32 n = qp->s_last; reset_psn()
727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); reset_psn()
730 qp->s_cur = n; reset_psn()
737 qp->s_state = OP(SEND_LAST); reset_psn()
746 if (++n == qp->s_size) reset_psn()
748 if (n == qp->s_tail) reset_psn()
750 wqe = get_swqe_ptr(qp, n); reset_psn()
754 qp->s_cur = n; reset_psn()
760 qp->s_state = OP(SEND_LAST); reset_psn()
774 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); reset_psn()
779 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); reset_psn()
783 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); reset_psn()
791 qp->s_state = OP(SEND_LAST); reset_psn()
794 qp->s_psn = psn; reset_psn()
799 * @qp: the QP to restart
805 void ipath_restart_rc(struct ipath_qp *qp, u32 psn) ipath_restart_rc() argument
807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); ipath_restart_rc()
810 if (qp->s_retry == 0) { ipath_restart_rc()
811 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); ipath_restart_rc()
812 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); ipath_restart_rc()
815 qp->s_retry--; ipath_restart_rc()
821 dev = to_idev(qp->ibqp.device); ipath_restart_rc()
823 if (!list_empty(&qp->timerwait)) ipath_restart_rc()
824 list_del_init(&qp->timerwait); ipath_restart_rc()
825 if (!list_empty(&qp->piowait)) ipath_restart_rc()
826 list_del_init(&qp->piowait); ipath_restart_rc()
832 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK; ipath_restart_rc()
834 reset_psn(qp, psn); ipath_restart_rc()
835 ipath_schedule_send(qp); ipath_restart_rc()
841 static inline void update_last_psn(struct ipath_qp *qp, u32 psn) update_last_psn() argument
843 qp->s_last_psn = psn; update_last_psn()
848 * @qp: the QP the ACK came in on
857 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument
860 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); do_rc_ack()
875 if (!list_empty(&qp->timerwait)) do_rc_ack()
876 list_del_init(&qp->timerwait); do_rc_ack()
888 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack()
925 update_last_psn(qp, wqe->psn - 1); do_rc_ack()
927 ipath_restart_rc(qp, wqe->psn); do_rc_ack()
937 if (qp->s_num_rd_atomic && do_rc_ack()
941 qp->s_num_rd_atomic--; do_rc_ack()
943 if (((qp->s_flags & IPATH_S_FENCE_PENDING) && do_rc_ack()
944 !qp->s_num_rd_atomic) || do_rc_ack()
945 qp->s_flags & IPATH_S_RDMAR_PENDING) do_rc_ack()
946 ipath_schedule_send(qp); do_rc_ack()
949 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || do_rc_ack()
956 wc.qp = &qp->ibqp; do_rc_ack()
957 wc.src_qp = qp->remote_qpn; do_rc_ack()
958 wc.slid = qp->remote_ah_attr.dlid; do_rc_ack()
959 wc.sl = qp->remote_ah_attr.sl; do_rc_ack()
960 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); do_rc_ack()
962 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
968 if (qp->s_last == qp->s_cur) { do_rc_ack()
969 if (++qp->s_cur >= qp->s_size) do_rc_ack()
970 qp->s_cur = 0; do_rc_ack()
971 qp->s_last = qp->s_cur; do_rc_ack()
972 if (qp->s_last == qp->s_tail) do_rc_ack()
974 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_ack()
975 qp->s_state = OP(SEND_LAST); do_rc_ack()
976 qp->s_psn = wqe->psn; do_rc_ack()
978 if (++qp->s_last >= qp->s_size) do_rc_ack()
979 qp->s_last = 0; do_rc_ack()
980 if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur) do_rc_ack()
981 qp->s_draining = 0; do_rc_ack()
982 if (qp->s_last == qp->s_tail) do_rc_ack()
984 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack()
992 if (qp->s_last != qp->s_tail) { do_rc_ack()
994 if (list_empty(&qp->timerwait)) do_rc_ack()
995 list_add_tail(&qp->timerwait, do_rc_ack()
1003 if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack()
1004 reset_psn(qp, psn + 1); do_rc_ack()
1005 ipath_schedule_send(qp); do_rc_ack()
1007 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack()
1008 qp->s_state = OP(SEND_LAST); do_rc_ack()
1009 qp->s_psn = psn + 1; do_rc_ack()
1011 ipath_get_credit(qp, aeth); do_rc_ack()
1012 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1013 qp->s_retry = qp->s_retry_cnt; do_rc_ack()
1014 update_last_psn(qp, psn); do_rc_ack()
1020 if (qp->s_last == qp->s_tail) do_rc_ack()
1022 if (qp->s_rnr_retry == 0) { do_rc_ack()
1026 if (qp->s_rnr_retry_cnt < 7) do_rc_ack()
1027 qp->s_rnr_retry--; do_rc_ack()
1030 update_last_psn(qp, psn - 1); do_rc_ack()
1036 (qp->s_psn - psn) & IPATH_PSN_MASK; do_rc_ack()
1038 reset_psn(qp, psn); do_rc_ack()
1040 qp->s_rnr_timeout = do_rc_ack()
1043 ipath_insert_rnr_queue(qp); do_rc_ack()
1044 ipath_schedule_send(qp); do_rc_ack()
1048 if (qp->s_last == qp->s_tail) do_rc_ack()
1051 update_last_psn(qp, psn - 1); do_rc_ack()
1062 ipath_restart_rc(qp, psn); do_rc_ack()
1079 ipath_send_complete(qp, wqe, status); do_rc_ack()
1080 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); do_rc_ack()
1087 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack()
1106 * @qp: the QP for this packet
1120 struct ipath_qp *qp, ipath_rc_rcv_resp()
1133 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_rcv_resp()
1136 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_rc_rcv_resp()
1140 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) ipath_rc_rcv_resp()
1144 diff = ipath_cmp24(psn, qp->s_last_psn); ipath_rc_rcv_resp()
1155 ipath_get_credit(qp, aeth); ipath_rc_rcv_resp()
1160 if (unlikely(qp->s_last == qp->s_tail)) ipath_rc_rcv_resp()
1162 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp()
1185 if (!do_rc_ack(qp, aeth, psn, opcode, val) || ipath_rc_rcv_resp()
1189 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp()
1192 qp->r_flags &= ~IPATH_R_RDMAR_SEQ; ipath_rc_rcv_resp()
1198 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, ipath_rc_rcv_resp()
1204 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP()
1206 if (qp->r_flags & IPATH_R_RDMAR_SEQ) OP()
1208 qp->r_flags |= IPATH_R_RDMAR_SEQ; OP()
1209 ipath_restart_rc(qp, qp->s_last_psn + 1); OP()
1217 if (unlikely(pmtu >= qp->s_rdma_read_len))
1222 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1223 list_move_tail(&qp->timerwait,
1228 qp->s_retry = qp->s_retry_cnt;
1234 qp->s_rdma_read_len -= pmtu;
1235 update_last_psn(qp, psn);
1236 spin_unlock_irqrestore(&qp->s_lock, flags);
1237 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1245 if (!do_rc_ack(qp, aeth, psn, opcode, 0))
1261 wqe = get_swqe_ptr(qp, qp->s_last);
1262 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1268 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP()
1270 if (qp->r_flags & IPATH_R_RDMAR_SEQ) OP()
1272 qp->r_flags |= IPATH_R_RDMAR_SEQ; OP()
1273 ipath_restart_rc(qp, qp->s_last_psn + 1); OP()
1289 if (unlikely(tlen != qp->s_rdma_read_len))
1297 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1298 (void) do_rc_ack(qp, aeth, psn,
1310 ipath_send_complete(qp, wqe, status);
1311 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1313 spin_unlock_irqrestore(&qp->s_lock, flags);
1323 * @qp: the QP for this packet
1338 struct ipath_qp *qp, ipath_rc_rcv_error()
1355 if (!qp->r_nak_state) { ipath_rc_rcv_error()
1356 qp->r_nak_state = IB_NAK_PSN_ERROR; ipath_rc_rcv_error()
1358 qp->r_ack_psn = qp->r_psn; ipath_rc_rcv_error()
1382 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_rcv_error()
1384 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_rc_rcv_error()
1387 for (i = qp->r_head_ack_queue; ; i = prev) { ipath_rc_rcv_error()
1388 if (i == qp->s_tail_ack_queue) ipath_rc_rcv_error()
1394 if (prev == qp->r_head_ack_queue) { ipath_rc_rcv_error()
1398 e = &qp->s_ack_queue[prev]; ipath_rc_rcv_error()
1404 if (prev == qp->s_tail_ack_queue) ipath_rc_rcv_error()
1437 ib_mtu_enum_to_int(qp->path_mtu); OP()
1446 ok = ipath_rkey_ok(qp, &e->rdma_sge, OP()
1460 qp->s_ack_state = OP(ACKNOWLEDGE); OP()
1461 qp->s_tail_ack_queue = prev; OP()
1474 qp->s_ack_state = OP(ACKNOWLEDGE); OP()
1475 qp->s_tail_ack_queue = prev; OP()
1486 if (i == qp->r_head_ack_queue) {
1487 spin_unlock_irqrestore(&qp->s_lock, flags);
1488 qp->r_nak_state = 0;
1489 qp->r_ack_psn = qp->r_psn - 1;
1497 if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
1498 !(qp->s_flags & IPATH_S_ACK_PENDING) &&
1499 qp->s_ack_state == OP(ACKNOWLEDGE)) {
1500 spin_unlock_irqrestore(&qp->s_lock, flags);
1501 qp->r_nak_state = 0;
1502 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1509 qp->s_ack_state = OP(ACKNOWLEDGE);
1510 qp->s_tail_ack_queue = i;
1513 qp->r_nak_state = 0;
1514 ipath_schedule_send(qp);
1517 spin_unlock_irqrestore(&qp->s_lock, flags);
1525 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) ipath_rc_error() argument
1530 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_error()
1531 lastwqe = ipath_error_qp(qp, err); ipath_rc_error()
1532 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_rc_error()
1537 ev.device = qp->ibqp.device; ipath_rc_error()
1538 ev.element.qp = &qp->ibqp; ipath_rc_error()
1540 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); ipath_rc_error()
1544 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n) ipath_update_ack_queue() argument
1551 if (n == qp->s_tail_ack_queue) { ipath_update_ack_queue()
1552 qp->s_tail_ack_queue = next; ipath_update_ack_queue()
1553 qp->s_ack_state = OP(ACKNOWLEDGE); ipath_update_ack_queue()
1564 * @qp: the QP for this packet
1571 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_rc_rcv()
1579 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_rc_rcv()
1586 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) ipath_rc_rcv()
1620 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn, ipath_rc_rcv()
1626 diff = ipath_cmp24(psn, qp->r_psn); ipath_rc_rcv()
1628 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode, ipath_rc_rcv()
1635 switch (qp->r_state) { ipath_rc_rcv()
1673 if (!ipath_get_rwqe(qp, 0)) ipath_rc_rcv()
1675 qp->r_rcv_len = 0; ipath_rc_rcv()
1683 qp->r_rcv_len += pmtu; ipath_rc_rcv()
1684 if (unlikely(qp->r_rcv_len > qp->r_len)) ipath_rc_rcv()
1686 ipath_copy_sge(&qp->r_sge, data, pmtu); ipath_rc_rcv()
1691 if (!ipath_get_rwqe(qp, 1)) ipath_rc_rcv()
1697 if (!ipath_get_rwqe(qp, 0)) ipath_rc_rcv()
1699 qp->r_rcv_len = 0; ipath_rc_rcv()
1726 wc.byte_len = tlen + qp->r_rcv_len;
1727 if (unlikely(wc.byte_len > qp->r_len))
1729 ipath_copy_sge(&qp->r_sge, data, tlen);
1730 qp->r_msn++;
1731 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
1733 wc.wr_id = qp->r_wr_id;
1740 wc.qp = &qp->ibqp;
1741 wc.src_qp = qp->remote_qpn;
1742 wc.slid = qp->remote_ah_attr.dlid;
1743 wc.sl = qp->remote_ah_attr.sl;
1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1753 if (unlikely(!(qp->qp_access_flags &
1765 qp->r_len = be32_to_cpu(reth->length);
1766 qp->r_rcv_len = 0;
1767 if (qp->r_len != 0) {
1773 ok = ipath_rkey_ok(qp, &qp->r_sge,
1774 qp->r_len, vaddr, rkey,
1779 qp->r_sge.sg_list = NULL;
1780 qp->r_sge.sge.mr = NULL;
1781 qp->r_sge.sge.vaddr = NULL;
1782 qp->r_sge.sge.length = 0;
1783 qp->r_sge.sge.sge_length = 0;
1789 if (!ipath_get_rwqe(qp, 1))
1798 if (unlikely(!(qp->qp_access_flags & OP()
1801 next = qp->r_head_ack_queue + 1; OP()
1804 spin_lock_irqsave(&qp->s_lock, flags); OP()
1806 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) OP()
1808 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
1809 if (!qp->s_ack_queue[next].sent) OP()
1811 ipath_update_ack_queue(qp, next); OP()
1813 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
1828 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr, OP()
1837 qp->r_psn += (len - 1) / pmtu; OP()
1854 qp->r_msn++; OP()
1855 qp->r_psn++; OP()
1856 qp->r_state = opcode; OP()
1857 qp->r_nak_state = 0; OP()
1858 qp->r_head_ack_queue = next; OP()
1861 ipath_schedule_send(qp); OP()
1876 if (unlikely(!(qp->qp_access_flags & OP()
1879 next = qp->r_head_ack_queue + 1; OP()
1882 spin_lock_irqsave(&qp->s_lock, flags); OP()
1884 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) OP()
1886 if (unlikely(next == qp->s_tail_ack_queue)) { OP()
1887 if (!qp->s_ack_queue[next].sent) OP()
1889 ipath_update_ack_queue(qp, next); OP()
1901 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, OP()
1906 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; OP()
1908 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP()
1911 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, OP()
1917 qp->r_msn++; OP()
1918 qp->r_psn++; OP()
1919 qp->r_state = opcode; OP()
1920 qp->r_nak_state = 0; OP()
1921 qp->r_head_ack_queue = next; OP()
1924 ipath_schedule_send(qp); OP()
1933 qp->r_psn++;
1934 qp->r_state = opcode;
1935 qp->r_ack_psn = psn;
1936 qp->r_nak_state = 0;
1943 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1944 qp->r_ack_psn = qp->r_psn;
1948 spin_unlock_irqrestore(&qp->s_lock, flags);
1950 ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
1951 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1952 qp->r_ack_psn = qp->r_psn;
1956 spin_unlock_irqrestore(&qp->s_lock, flags);
1958 ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
1959 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1960 qp->r_ack_psn = qp->r_psn;
1962 send_rc_ack(qp);
1966 spin_unlock_irqrestore(&qp->s_lock, flags);
1117 ipath_rc_rcv_resp(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, u32 tlen, struct ipath_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, int header_in_data) ipath_rc_rcv_resp() argument
1335 ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, struct ipath_qp *qp, u32 opcode, u32 psn, int diff, int header_in_data) ipath_rc_rcv_error() argument
1570 ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_rc_rcv() argument
H A Dipath_uc.c42 * @qp: a pointer to the QP
46 int ipath_make_uc_req(struct ipath_qp *qp) ipath_make_uc_req() argument
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_make_uc_req()
57 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_uc_req()
59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { ipath_make_uc_req()
60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_uc_req()
63 if (qp->s_last == qp->s_head) ipath_make_uc_req()
66 if (atomic_read(&qp->s_dma_busy)) { ipath_make_uc_req()
67 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_uc_req()
70 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_uc_req()
71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_uc_req()
75 ohdr = &qp->s_hdr.u.oth; ipath_make_uc_req()
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ipath_make_uc_req()
77 ohdr = &qp->s_hdr.u.l.oth; ipath_make_uc_req()
84 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_uc_req()
85 qp->s_wqe = NULL; ipath_make_uc_req()
86 switch (qp->s_state) { ipath_make_uc_req()
88 if (!(ib_ipath_state_ops[qp->state] & ipath_make_uc_req()
92 if (qp->s_cur == qp->s_head) ipath_make_uc_req()
97 qp->s_psn = wqe->psn = qp->s_next_psn; ipath_make_uc_req()
98 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_uc_req()
99 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_uc_req()
100 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_uc_req()
101 qp->s_len = len = wqe->length; ipath_make_uc_req()
106 qp->s_state = OP(SEND_FIRST); ipath_make_uc_req()
111 qp->s_state = OP(SEND_ONLY); ipath_make_uc_req()
113 qp->s_state = ipath_make_uc_req()
121 qp->s_wqe = wqe; ipath_make_uc_req()
122 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req()
123 qp->s_cur = 0; ipath_make_uc_req()
135 qp->s_state = OP(RDMA_WRITE_FIRST); ipath_make_uc_req()
140 qp->s_state = OP(RDMA_WRITE_ONLY); ipath_make_uc_req()
142 qp->s_state = ipath_make_uc_req()
150 qp->s_wqe = wqe; ipath_make_uc_req()
151 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req()
152 qp->s_cur = 0; ipath_make_uc_req()
161 qp->s_state = OP(SEND_MIDDLE); ipath_make_uc_req()
164 len = qp->s_len; ipath_make_uc_req()
170 qp->s_state = OP(SEND_LAST); ipath_make_uc_req()
172 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); ipath_make_uc_req()
179 qp->s_wqe = wqe; ipath_make_uc_req()
180 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req()
181 qp->s_cur = 0; ipath_make_uc_req()
185 qp->s_state = OP(RDMA_WRITE_MIDDLE); ipath_make_uc_req()
188 len = qp->s_len; ipath_make_uc_req()
194 qp->s_state = OP(RDMA_WRITE_LAST); ipath_make_uc_req()
196 qp->s_state = ipath_make_uc_req()
204 qp->s_wqe = wqe; ipath_make_uc_req()
205 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req()
206 qp->s_cur = 0; ipath_make_uc_req()
209 qp->s_len -= len; ipath_make_uc_req()
210 qp->s_hdrwords = hwords; ipath_make_uc_req()
211 qp->s_cur_sge = &qp->s_sge; ipath_make_uc_req()
212 qp->s_cur_size = len; ipath_make_uc_req()
213 ipath_make_ruc_header(to_idev(qp->ibqp.device), ipath_make_uc_req()
214 qp, ohdr, bth0 | (qp->s_state << 24), ipath_make_uc_req()
215 qp->s_next_psn++ & IPATH_PSN_MASK); ipath_make_uc_req()
221 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_uc_req()
223 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_uc_req()
234 * @qp: the QP for this packet.
241 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_uc_rcv()
249 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_uc_rcv()
254 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) ipath_uc_rcv()
288 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { ipath_uc_rcv()
293 qp->r_psn = psn; ipath_uc_rcv()
295 qp->r_state = OP(SEND_LAST); ipath_uc_rcv()
314 switch (qp->r_state) { ipath_uc_rcv()
348 if (qp->r_flags & IPATH_R_REUSE_SGE) { OP()
349 qp->r_flags &= ~IPATH_R_REUSE_SGE; OP()
350 qp->r_sge = qp->s_rdma_read_sge; OP()
351 } else if (!ipath_get_rwqe(qp, 0)) {
356 qp->s_rdma_read_sge = qp->r_sge;
357 qp->r_rcv_len = 0;
366 qp->r_flags |= IPATH_R_REUSE_SGE; OP()
370 qp->r_rcv_len += pmtu;
371 if (unlikely(qp->r_rcv_len > qp->r_len)) {
372 qp->r_flags |= IPATH_R_REUSE_SGE;
376 ipath_copy_sge(&qp->r_sge, data, pmtu);
398 qp->r_flags |= IPATH_R_REUSE_SGE;
404 wc.byte_len = tlen + qp->r_rcv_len;
405 if (unlikely(wc.byte_len > qp->r_len)) {
406 qp->r_flags |= IPATH_R_REUSE_SGE;
412 ipath_copy_sge(&qp->r_sge, data, tlen);
413 wc.wr_id = qp->r_wr_id;
415 wc.qp = &qp->ibqp;
416 wc.src_qp = qp->remote_qpn;
417 wc.slid = qp->remote_ah_attr.dlid;
418 wc.sl = qp->remote_ah_attr.sl;
420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
437 qp->r_len = be32_to_cpu(reth->length);
438 qp->r_rcv_len = 0;
439 if (qp->r_len != 0) {
445 ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
453 qp->r_sge.sg_list = NULL;
454 qp->r_sge.sge.mr = NULL;
455 qp->r_sge.sge.vaddr = NULL;
456 qp->r_sge.sge.length = 0;
457 qp->r_sge.sge.sge_length = 0;
459 if (unlikely(!(qp->qp_access_flags &
475 qp->r_rcv_len += pmtu;
476 if (unlikely(qp->r_rcv_len > qp->r_len)) {
480 ipath_copy_sge(&qp->r_sge, data, pmtu);
505 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
509 if (qp->r_flags & IPATH_R_REUSE_SGE)
510 qp->r_flags &= ~IPATH_R_REUSE_SGE;
511 else if (!ipath_get_rwqe(qp, 1)) {
515 wc.byte_len = qp->r_len;
531 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
535 ipath_copy_sge(&qp->r_sge, data, tlen);
543 qp->r_psn++;
544 qp->r_state = opcode;
240 ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_uc_rcv() argument
H A Dipath_qp.c202 * @qp: the QP
208 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, ipath_alloc_qpn() argument
217 qp->ibqp.qp_num = ret; ipath_alloc_qpn()
223 qp->next = qpt->table[ret]; ipath_alloc_qpn()
224 qpt->table[ret] = qp; ipath_alloc_qpn()
225 atomic_inc(&qp->refcount); ipath_alloc_qpn()
237 * @qp: the QP to remove
242 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) ipath_free_qp() argument
250 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; ipath_free_qp()
252 if (q == qp) { ipath_free_qp()
253 *qpp = qp->next; ipath_free_qp()
254 qp->next = NULL; ipath_free_qp()
255 atomic_dec(&qp->refcount); ipath_free_qp()
273 struct ipath_qp *qp; ipath_free_all_qps() local
278 qp = qpt->table[n]; ipath_free_all_qps()
281 for (; qp; qp = qp->next) ipath_free_all_qps()
303 struct ipath_qp *qp; ipath_lookup_qpn() local
307 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { ipath_lookup_qpn()
308 if (qp->ibqp.qp_num == qpn) { ipath_lookup_qpn()
309 atomic_inc(&qp->refcount); ipath_lookup_qpn()
315 return qp; ipath_lookup_qpn()
320 * @qp: the QP to reset
323 static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) ipath_reset_qp() argument
325 qp->remote_qpn = 0; ipath_reset_qp()
326 qp->qkey = 0; ipath_reset_qp()
327 qp->qp_access_flags = 0; ipath_reset_qp()
328 atomic_set(&qp->s_dma_busy, 0); ipath_reset_qp()
329 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; ipath_reset_qp()
330 qp->s_hdrwords = 0; ipath_reset_qp()
331 qp->s_wqe = NULL; ipath_reset_qp()
332 qp->s_pkt_delay = 0; ipath_reset_qp()
333 qp->s_draining = 0; ipath_reset_qp()
334 qp->s_psn = 0; ipath_reset_qp()
335 qp->r_psn = 0; ipath_reset_qp()
336 qp->r_msn = 0; ipath_reset_qp()
338 qp->s_state = IB_OPCODE_RC_SEND_LAST; ipath_reset_qp()
339 qp->r_state = IB_OPCODE_RC_SEND_LAST; ipath_reset_qp()
341 qp->s_state = IB_OPCODE_UC_SEND_LAST; ipath_reset_qp()
342 qp->r_state = IB_OPCODE_UC_SEND_LAST; ipath_reset_qp()
344 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; ipath_reset_qp()
345 qp->r_nak_state = 0; ipath_reset_qp()
346 qp->r_aflags = 0; ipath_reset_qp()
347 qp->r_flags = 0; ipath_reset_qp()
348 qp->s_rnr_timeout = 0; ipath_reset_qp()
349 qp->s_head = 0; ipath_reset_qp()
350 qp->s_tail = 0; ipath_reset_qp()
351 qp->s_cur = 0; ipath_reset_qp()
352 qp->s_last = 0; ipath_reset_qp()
353 qp->s_ssn = 1; ipath_reset_qp()
354 qp->s_lsn = 0; ipath_reset_qp()
355 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); ipath_reset_qp()
356 qp->r_head_ack_queue = 0; ipath_reset_qp()
357 qp->s_tail_ack_queue = 0; ipath_reset_qp()
358 qp->s_num_rd_atomic = 0; ipath_reset_qp()
359 if (qp->r_rq.wq) { ipath_reset_qp()
360 qp->r_rq.wq->head = 0; ipath_reset_qp()
361 qp->r_rq.wq->tail = 0; ipath_reset_qp()
367 * @qp: the QP to put into the error state
376 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) ipath_error_qp() argument
378 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_error_qp()
382 if (qp->state == IB_QPS_ERR) ipath_error_qp()
385 qp->state = IB_QPS_ERR; ipath_error_qp()
388 if (!list_empty(&qp->timerwait)) ipath_error_qp()
389 list_del_init(&qp->timerwait); ipath_error_qp()
390 if (!list_empty(&qp->piowait)) ipath_error_qp()
391 list_del_init(&qp->piowait); ipath_error_qp()
395 if (qp->s_last != qp->s_head) ipath_error_qp()
396 ipath_schedule_send(qp); ipath_error_qp()
399 wc.qp = &qp->ibqp; ipath_error_qp()
402 if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { ipath_error_qp()
403 wc.wr_id = qp->r_wr_id; ipath_error_qp()
405 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_error_qp()
409 if (qp->r_rq.wq) { ipath_error_qp()
414 spin_lock(&qp->r_rq.lock); ipath_error_qp()
417 wq = qp->r_rq.wq; ipath_error_qp()
419 if (head >= qp->r_rq.size) ipath_error_qp()
422 if (tail >= qp->r_rq.size) ipath_error_qp()
425 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; ipath_error_qp()
426 if (++tail >= qp->r_rq.size) ipath_error_qp()
428 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_error_qp()
432 spin_unlock(&qp->r_rq.lock); ipath_error_qp()
433 } else if (qp->ibqp.event_handler) ipath_error_qp()
453 struct ipath_qp *qp = to_iqp(ibqp); ipath_modify_qp() local
458 spin_lock_irq(&qp->s_lock); ipath_modify_qp()
461 attr->cur_qp_state : qp->state; ipath_modify_qp()
511 if (qp->state != IB_QPS_RESET) { ipath_modify_qp()
512 qp->state = IB_QPS_RESET; ipath_modify_qp()
514 if (!list_empty(&qp->timerwait)) ipath_modify_qp()
515 list_del_init(&qp->timerwait); ipath_modify_qp()
516 if (!list_empty(&qp->piowait)) ipath_modify_qp()
517 list_del_init(&qp->piowait); ipath_modify_qp()
519 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_modify_qp()
520 spin_unlock_irq(&qp->s_lock); ipath_modify_qp()
522 tasklet_kill(&qp->s_task); ipath_modify_qp()
523 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); ipath_modify_qp()
524 spin_lock_irq(&qp->s_lock); ipath_modify_qp()
526 ipath_reset_qp(qp, ibqp->qp_type); ipath_modify_qp()
530 qp->s_draining = qp->s_last != qp->s_cur; ipath_modify_qp()
531 qp->state = new_state; ipath_modify_qp()
535 if (qp->ibqp.qp_type == IB_QPT_RC) ipath_modify_qp()
537 qp->state = new_state; ipath_modify_qp()
541 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); ipath_modify_qp()
545 qp->state = new_state; ipath_modify_qp()
550 qp->s_pkey_index = attr->pkey_index; ipath_modify_qp()
553 qp->remote_qpn = attr->dest_qp_num; ipath_modify_qp()
556 qp->s_psn = qp->s_next_psn = attr->sq_psn; ipath_modify_qp()
557 qp->s_last_psn = qp->s_next_psn - 1; ipath_modify_qp()
561 qp->r_psn = attr->rq_psn; ipath_modify_qp()
564 qp->qp_access_flags = attr->qp_access_flags; ipath_modify_qp()
567 qp->remote_ah_attr = attr->ah_attr; ipath_modify_qp()
568 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); ipath_modify_qp()
572 qp->path_mtu = attr->path_mtu; ipath_modify_qp()
575 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; ipath_modify_qp()
578 qp->s_rnr_retry = attr->rnr_retry; ipath_modify_qp()
579 if (qp->s_rnr_retry > 7) ipath_modify_qp()
580 qp->s_rnr_retry = 7; ipath_modify_qp()
581 qp->s_rnr_retry_cnt = qp->s_rnr_retry; ipath_modify_qp()
585 qp->r_min_rnr_timer = attr->min_rnr_timer; ipath_modify_qp()
588 qp->timeout = attr->timeout; ipath_modify_qp()
591 qp->qkey = attr->qkey; ipath_modify_qp()
594 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; ipath_modify_qp()
597 qp->s_max_rd_atomic = attr->max_rd_atomic; ipath_modify_qp()
599 spin_unlock_irq(&qp->s_lock); ipath_modify_qp()
604 ev.device = qp->ibqp.device; ipath_modify_qp()
605 ev.element.qp = &qp->ibqp; ipath_modify_qp()
607 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); ipath_modify_qp()
613 spin_unlock_irq(&qp->s_lock); ipath_modify_qp()
623 struct ipath_qp *qp = to_iqp(ibqp); ipath_query_qp() local
625 attr->qp_state = qp->state; ipath_query_qp()
627 attr->path_mtu = qp->path_mtu; ipath_query_qp()
629 attr->qkey = qp->qkey; ipath_query_qp()
630 attr->rq_psn = qp->r_psn; ipath_query_qp()
631 attr->sq_psn = qp->s_next_psn; ipath_query_qp()
632 attr->dest_qp_num = qp->remote_qpn; ipath_query_qp()
633 attr->qp_access_flags = qp->qp_access_flags; ipath_query_qp()
634 attr->cap.max_send_wr = qp->s_size - 1; ipath_query_qp()
635 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; ipath_query_qp()
636 attr->cap.max_send_sge = qp->s_max_sge; ipath_query_qp()
637 attr->cap.max_recv_sge = qp->r_rq.max_sge; ipath_query_qp()
639 attr->ah_attr = qp->remote_ah_attr; ipath_query_qp()
641 attr->pkey_index = qp->s_pkey_index; ipath_query_qp()
644 attr->sq_draining = qp->s_draining; ipath_query_qp()
645 attr->max_rd_atomic = qp->s_max_rd_atomic; ipath_query_qp()
646 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; ipath_query_qp()
647 attr->min_rnr_timer = qp->r_min_rnr_timer; ipath_query_qp()
649 attr->timeout = qp->timeout; ipath_query_qp()
650 attr->retry_cnt = qp->s_retry_cnt; ipath_query_qp()
651 attr->rnr_retry = qp->s_rnr_retry_cnt; ipath_query_qp()
655 init_attr->event_handler = qp->ibqp.event_handler; ipath_query_qp()
656 init_attr->qp_context = qp->ibqp.qp_context; ipath_query_qp()
657 init_attr->send_cq = qp->ibqp.send_cq; ipath_query_qp()
658 init_attr->recv_cq = qp->ibqp.recv_cq; ipath_query_qp()
659 init_attr->srq = qp->ibqp.srq; ipath_query_qp()
661 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ipath_query_qp()
665 init_attr->qp_type = qp->ibqp.qp_type; ipath_query_qp()
672 * @qp: the queue pair to compute the AETH for
676 __be32 ipath_compute_aeth(struct ipath_qp *qp) ipath_compute_aeth() argument
678 u32 aeth = qp->r_msn & IPATH_MSN_MASK; ipath_compute_aeth()
680 if (qp->ibqp.srq) { ipath_compute_aeth()
689 struct ipath_rwq *wq = qp->r_rq.wq; ipath_compute_aeth()
695 if (head >= qp->r_rq.size) ipath_compute_aeth()
698 if (tail >= qp->r_rq.size) ipath_compute_aeth()
707 credits += qp->r_rq.size; ipath_compute_aeth()
744 struct ipath_qp *qp; ipath_create_qp() local
793 sz = sizeof(*qp); ipath_create_qp()
799 sg_list_sz = sizeof(*qp->r_sg_list) * ipath_create_qp()
802 sg_list_sz = sizeof(*qp->r_sg_list) * ipath_create_qp()
804 qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); ipath_create_qp()
805 if (!qp) { ipath_create_qp()
812 qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); ipath_create_qp()
813 if (!qp->r_ud_sg_list) { ipath_create_qp()
818 qp->r_ud_sg_list = NULL; ipath_create_qp()
821 qp->r_rq.size = 0; ipath_create_qp()
822 qp->r_rq.max_sge = 0; ipath_create_qp()
823 qp->r_rq.wq = NULL; ipath_create_qp()
827 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; ipath_create_qp()
828 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; ipath_create_qp()
829 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + ipath_create_qp()
831 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + ipath_create_qp()
832 qp->r_rq.size * sz); ipath_create_qp()
833 if (!qp->r_rq.wq) { ipath_create_qp()
840 * ib_create_qp() will initialize qp->ibqp ipath_create_qp()
841 * except for qp->ibqp.qp_num. ipath_create_qp()
843 spin_lock_init(&qp->s_lock); ipath_create_qp()
844 spin_lock_init(&qp->r_rq.lock); ipath_create_qp()
845 atomic_set(&qp->refcount, 0); ipath_create_qp()
846 init_waitqueue_head(&qp->wait); ipath_create_qp()
847 init_waitqueue_head(&qp->wait_dma); ipath_create_qp()
848 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); ipath_create_qp()
849 INIT_LIST_HEAD(&qp->piowait); ipath_create_qp()
850 INIT_LIST_HEAD(&qp->timerwait); ipath_create_qp()
851 qp->state = IB_QPS_RESET; ipath_create_qp()
852 qp->s_wq = swq; ipath_create_qp()
853 qp->s_size = init_attr->cap.max_send_wr + 1; ipath_create_qp()
854 qp->s_max_sge = init_attr->cap.max_send_sge; ipath_create_qp()
856 qp->s_flags = IPATH_S_SIGNAL_REQ_WR; ipath_create_qp()
858 qp->s_flags = 0; ipath_create_qp()
860 err = ipath_alloc_qpn(&dev->qp_table, qp, ipath_create_qp()
864 vfree(qp->r_rq.wq); ipath_create_qp()
867 qp->ip = NULL; ipath_create_qp()
868 qp->s_tx = NULL; ipath_create_qp()
869 ipath_reset_qp(qp, init_attr->qp_type); ipath_create_qp()
885 if (!qp->r_rq.wq) { ipath_create_qp()
896 qp->r_rq.size * sz; ipath_create_qp()
898 qp->ip = ipath_create_qp()
901 qp->r_rq.wq); ipath_create_qp()
902 if (!qp->ip) { ipath_create_qp()
907 err = ib_copy_to_udata(udata, &(qp->ip->offset), ipath_create_qp()
908 sizeof(qp->ip->offset)); ipath_create_qp()
926 if (qp->ip) { ipath_create_qp()
928 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); ipath_create_qp()
932 ret = &qp->ibqp; ipath_create_qp()
936 if (qp->ip) ipath_create_qp()
937 kref_put(&qp->ip->ref, ipath_release_mmap_info); ipath_create_qp()
939 vfree(qp->r_rq.wq); ipath_create_qp()
940 ipath_free_qp(&dev->qp_table, qp); ipath_create_qp()
941 free_qpn(&dev->qp_table, qp->ibqp.qp_num); ipath_create_qp()
943 kfree(qp->r_ud_sg_list); ipath_create_qp()
945 kfree(qp); ipath_create_qp()
963 struct ipath_qp *qp = to_iqp(ibqp); ipath_destroy_qp() local
967 spin_lock_irq(&qp->s_lock); ipath_destroy_qp()
968 if (qp->state != IB_QPS_RESET) { ipath_destroy_qp()
969 qp->state = IB_QPS_RESET; ipath_destroy_qp()
971 if (!list_empty(&qp->timerwait)) ipath_destroy_qp()
972 list_del_init(&qp->timerwait); ipath_destroy_qp()
973 if (!list_empty(&qp->piowait)) ipath_destroy_qp()
974 list_del_init(&qp->piowait); ipath_destroy_qp()
976 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_destroy_qp()
977 spin_unlock_irq(&qp->s_lock); ipath_destroy_qp()
979 tasklet_kill(&qp->s_task); ipath_destroy_qp()
980 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); ipath_destroy_qp()
982 spin_unlock_irq(&qp->s_lock); ipath_destroy_qp()
984 ipath_free_qp(&dev->qp_table, qp); ipath_destroy_qp()
986 if (qp->s_tx) { ipath_destroy_qp()
987 atomic_dec(&qp->refcount); ipath_destroy_qp()
988 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) ipath_destroy_qp()
989 kfree(qp->s_tx->txreq.map_addr); ipath_destroy_qp()
991 list_add(&qp->s_tx->txreq.list, &dev->txreq_free); ipath_destroy_qp()
993 qp->s_tx = NULL; ipath_destroy_qp()
996 wait_event(qp->wait, !atomic_read(&qp->refcount)); ipath_destroy_qp()
999 free_qpn(&dev->qp_table, qp->ibqp.qp_num); ipath_destroy_qp()
1004 if (qp->ip) ipath_destroy_qp()
1005 kref_put(&qp->ip->ref, ipath_release_mmap_info); ipath_destroy_qp()
1007 vfree(qp->r_rq.wq); ipath_destroy_qp()
1008 kfree(qp->r_ud_sg_list); ipath_destroy_qp()
1009 vfree(qp->s_wq); ipath_destroy_qp()
1010 kfree(qp); ipath_destroy_qp()
1049 * @qp: the qp who's send work queue to flush
1054 void ipath_get_credit(struct ipath_qp *qp, u32 aeth) ipath_get_credit() argument
1064 qp->s_lsn = (u32) -1; ipath_get_credit()
1065 else if (qp->s_lsn != (u32) -1) { ipath_get_credit()
1068 if (ipath_cmp24(credit, qp->s_lsn) > 0) ipath_get_credit()
1069 qp->s_lsn = credit; ipath_get_credit()
1073 if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && ipath_get_credit()
1074 qp->s_cur != qp->s_head && ipath_get_credit()
1075 (qp->s_lsn == (u32) -1 || ipath_get_credit()
1076 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, ipath_get_credit()
1077 qp->s_lsn + 1) <= 0)) ipath_get_credit()
1078 ipath_schedule_send(qp); ipath_get_credit()
H A Dipath_ruc.c79 * @qp: the QP
86 void ipath_insert_rnr_queue(struct ipath_qp *qp) ipath_insert_rnr_queue() argument
88 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_insert_rnr_queue()
93 list_add(&qp->timerwait, &dev->rnrwait); ipath_insert_rnr_queue()
99 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { ipath_insert_rnr_queue()
100 qp->s_rnr_timeout -= nqp->s_rnr_timeout; ipath_insert_rnr_queue()
110 nqp->s_rnr_timeout -= qp->s_rnr_timeout; ipath_insert_rnr_queue()
111 list_add(&qp->timerwait, l); ipath_insert_rnr_queue()
118 * @qp: the QP
122 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, ipath_init_sge() argument
133 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, ipath_init_sge()
148 wc.qp = &qp->ibqp; ipath_init_sge()
150 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_init_sge()
158 * @qp: the QP
159 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
165 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ipath_get_rwqe() argument
176 if (qp->ibqp.srq) { ipath_get_rwqe()
177 srq = to_isrq(qp->ibqp.srq); ipath_get_rwqe()
183 rq = &qp->r_rq; ipath_get_rwqe()
187 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_get_rwqe()
209 qp->r_sge.sg_list = qp->r_sg_list; ipath_get_rwqe()
210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); ipath_get_rwqe()
211 qp->r_wr_id = wqe->wr_id; ipath_get_rwqe()
215 set_bit(IPATH_R_WRID_VALID, &qp->r_aflags); ipath_get_rwqe()
235 ev.device = qp->ibqp.device; ipath_get_rwqe()
236 ev.element.srq = qp->ibqp.srq; ipath_get_rwqe()
262 struct ipath_qp *qp; ipath_ruc_loopback() local
275 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); ipath_ruc_loopback()
311 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_ruc_loopback()
337 if (!ipath_get_rwqe(qp, 0)) ipath_ruc_loopback()
342 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ipath_ruc_loopback()
346 if (!ipath_get_rwqe(qp, 1)) ipath_ruc_loopback()
350 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ipath_ruc_loopback()
354 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, ipath_ruc_loopback()
362 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) ipath_ruc_loopback()
364 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, ipath_ruc_loopback()
369 qp->r_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback()
370 qp->r_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback()
371 qp->r_sge.num_sge = wqe->wr.num_sge; ipath_ruc_loopback()
376 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) ipath_ruc_loopback()
378 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), ipath_ruc_loopback()
384 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ipath_ruc_loopback()
389 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, ipath_ruc_loopback()
407 ipath_copy_sge(&qp->r_sge, sge->vaddr, len); ipath_ruc_loopback()
428 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) ipath_ruc_loopback()
435 wc.wr_id = qp->r_wr_id; ipath_ruc_loopback()
438 wc.qp = &qp->ibqp; ipath_ruc_loopback()
439 wc.src_qp = qp->remote_qpn; ipath_ruc_loopback()
440 wc.slid = qp->remote_ah_attr.dlid; ipath_ruc_loopback()
441 wc.sl = qp->remote_ah_attr.sl; ipath_ruc_loopback()
444 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ruc_loopback()
456 if (qp->ibqp.qp_type == IB_QPT_UC) ipath_ruc_loopback()
473 sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer]; ipath_ruc_loopback()
487 ipath_rc_error(qp, wc.status); ipath_ruc_loopback()
501 ev.element.qp = &sqp->ibqp; ipath_ruc_loopback()
512 if (qp && atomic_dec_and_test(&qp->refcount)) ipath_ruc_loopback()
513 wake_up(&qp->wait); ipath_ruc_loopback()
516 static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) want_buffer() argument
519 qp->ibqp.qp_type == IB_QPT_SMI) { want_buffer()
533 * @qp: the QP that caused the problem
540 static int ipath_no_bufs_available(struct ipath_qp *qp, ipath_no_bufs_available() argument
552 spin_lock_irqsave(&qp->s_lock, flags); ipath_no_bufs_available()
553 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { ipath_no_bufs_available()
555 qp->s_flags |= IPATH_S_WAITING; ipath_no_bufs_available()
556 qp->s_flags &= ~IPATH_S_BUSY; ipath_no_bufs_available()
558 if (list_empty(&qp->piowait)) ipath_no_bufs_available()
559 list_add_tail(&qp->piowait, &dev->piowait); ipath_no_bufs_available()
563 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_no_bufs_available()
565 want_buffer(dev->dd, qp); ipath_no_bufs_available()
599 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, ipath_make_ruc_header() argument
608 extra_bytes = -qp->s_cur_size & 3; ipath_make_ruc_header()
609 nwords = (qp->s_cur_size + extra_bytes) >> 2; ipath_make_ruc_header()
611 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { ipath_make_ruc_header()
612 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, ipath_make_ruc_header()
613 &qp->remote_ah_attr.grh, ipath_make_ruc_header()
614 qp->s_hdrwords, nwords); ipath_make_ruc_header()
617 lrh0 |= qp->remote_ah_attr.sl << 4; ipath_make_ruc_header()
618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); ipath_make_ruc_header()
619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); ipath_make_ruc_header()
620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); ipath_make_ruc_header()
621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid | ipath_make_ruc_header()
622 qp->remote_ah_attr.src_path_bits); ipath_make_ruc_header()
623 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); ipath_make_ruc_header()
626 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ipath_make_ruc_header()
640 struct ipath_qp *qp = (struct ipath_qp *)data; ipath_do_send() local
641 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_do_send()
642 int (*make_req)(struct ipath_qp *qp); ipath_do_send()
645 if ((qp->ibqp.qp_type == IB_QPT_RC || ipath_do_send()
646 qp->ibqp.qp_type == IB_QPT_UC) && ipath_do_send()
647 qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_do_send()
648 ipath_ruc_loopback(qp); ipath_do_send()
652 if (qp->ibqp.qp_type == IB_QPT_RC) ipath_do_send()
654 else if (qp->ibqp.qp_type == IB_QPT_UC) ipath_do_send()
659 spin_lock_irqsave(&qp->s_lock, flags); ipath_do_send()
662 if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || ipath_do_send()
663 !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { ipath_do_send()
664 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_do_send()
668 qp->s_flags |= IPATH_S_BUSY; ipath_do_send()
670 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_do_send()
674 if (qp->s_hdrwords != 0) { ipath_do_send()
679 if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, ipath_do_send()
680 qp->s_cur_sge, qp->s_cur_size)) { ipath_do_send()
681 if (ipath_no_bufs_available(qp, dev)) ipath_do_send()
686 qp->s_hdrwords = 0; ipath_do_send()
689 if (make_req(qp)) ipath_do_send()
698 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, ipath_send_complete() argument
703 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) ipath_send_complete()
707 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || ipath_send_complete()
716 wc.qp = &qp->ibqp; ipath_send_complete()
719 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, ipath_send_complete()
723 old_last = last = qp->s_last; ipath_send_complete()
724 if (++last >= qp->s_size) ipath_send_complete()
726 qp->s_last = last; ipath_send_complete()
727 if (qp->s_cur == old_last) ipath_send_complete()
728 qp->s_cur = last; ipath_send_complete()
729 if (qp->s_tail == old_last) ipath_send_complete()
730 qp->s_tail = last; ipath_send_complete()
731 if (qp->state == IB_QPS_SQD && last == qp->s_cur) ipath_send_complete()
732 qp->s_draining = 0; ipath_send_complete()
H A Dipath_ud.c52 struct ipath_qp *qp; ipath_ud_loopback() local
67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn); ipath_ud_loopback()
68 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_ud_loopback()
78 if (unlikely(qp->ibqp.qp_num && ipath_ud_loopback()
80 sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) { ipath_ud_loopback()
106 if (qp->ibqp.srq) { ipath_ud_loopback()
107 srq = to_isrq(qp->ibqp.srq); ipath_ud_loopback()
113 rq = &qp->r_rq; ipath_ud_loopback()
133 rsge.sg_list = qp->r_ud_sg_list; ipath_ud_loopback()
134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { ipath_ud_loopback()
168 ev.device = qp->ibqp.device; ipath_ud_loopback()
169 ev.element.srq = qp->ibqp.srq; ipath_ud_loopback()
214 wc.qp = &qp->ibqp; ipath_ud_loopback()
226 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ud_loopback()
229 if (atomic_dec_and_test(&qp->refcount)) ipath_ud_loopback()
230 wake_up(&qp->wait); ipath_ud_loopback()
236 * @qp: the QP
240 int ipath_make_ud_req(struct ipath_qp *qp) ipath_make_ud_req() argument
242 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_make_ud_req()
255 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_ud_req()
257 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { ipath_make_ud_req()
258 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_ud_req()
261 if (qp->s_last == qp->s_head) ipath_make_ud_req()
264 if (atomic_read(&qp->s_dma_busy)) { ipath_make_ud_req()
265 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_ud_req()
268 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_ud_req()
269 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_ud_req()
273 if (qp->s_cur == qp->s_head) ipath_make_ud_req()
276 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_ud_req()
277 next_cur = qp->s_cur + 1; ipath_make_ud_req()
278 if (next_cur >= qp->s_size) ipath_make_ud_req()
299 if (atomic_read(&qp->s_dma_busy)) { ipath_make_ud_req()
300 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_ud_req()
303 qp->s_cur = next_cur; ipath_make_ud_req()
304 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_ud_req()
305 ipath_ud_loopback(qp, wqe); ipath_make_ud_req()
306 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_ud_req()
307 ipath_send_complete(qp, wqe, IB_WC_SUCCESS); ipath_make_ud_req()
312 qp->s_cur = next_cur; ipath_make_ud_req()
317 qp->s_hdrwords = 7; ipath_make_ud_req()
318 qp->s_cur_size = wqe->length; ipath_make_ud_req()
319 qp->s_cur_sge = &qp->s_sge; ipath_make_ud_req()
320 qp->s_dmult = ah_attr->static_rate; ipath_make_ud_req()
321 qp->s_wqe = wqe; ipath_make_ud_req()
322 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_ud_req()
323 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_ud_req()
324 qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge; ipath_make_ud_req()
328 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, ipath_make_ud_req()
330 qp->s_hdrwords, nwords); ipath_make_ud_req()
332 ohdr = &qp->s_hdr.u.l.oth; ipath_make_ud_req()
340 ohdr = &qp->s_hdr.u.oth; ipath_make_ud_req()
343 qp->s_hdrwords++; ipath_make_ud_req()
349 if (qp->ibqp.qp_type == IB_QPT_SMI) ipath_make_ud_req()
351 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); ipath_make_ud_req()
352 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ ipath_make_ud_req()
353 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + ipath_make_ud_req()
359 qp->s_hdr.lrh[3] = cpu_to_be16(lid); ipath_make_ud_req()
361 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; ipath_make_ud_req()
365 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : ipath_make_ud_req()
366 ipath_get_pkey(dev->dd, qp->s_pkey_index); ipath_make_ud_req()
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); ipath_make_ud_req()
381 qp->qkey : wqe->ud_wr.remote_qkey); ipath_make_ud_req()
382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); ipath_make_ud_req()
389 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_ud_req()
391 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_ud_req()
402 * @qp: the QP the packet came on
409 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_ud_rcv()
452 if (qp->ibqp.qp_num) { ipath_ud_rcv()
458 if (unlikely(qkey != qp->qkey)) { ipath_ud_rcv()
479 if (qp->ibqp.qp_num > 1 && ipath_ud_rcv()
506 if (unlikely((qp->ibqp.qp_num == 0 && ipath_ud_rcv()
509 (qp->ibqp.qp_num == 1 && ipath_ud_rcv()
525 if (qp->r_flags & IPATH_R_REUSE_SGE) ipath_ud_rcv()
526 qp->r_flags &= ~IPATH_R_REUSE_SGE; ipath_ud_rcv()
527 else if (!ipath_get_rwqe(qp, 0)) { ipath_ud_rcv()
534 if (qp->ibqp.qp_num == 0) ipath_ud_rcv()
541 if (wc.byte_len > qp->r_len) { ipath_ud_rcv()
542 qp->r_flags |= IPATH_R_REUSE_SGE; ipath_ud_rcv()
547 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, ipath_ud_rcv()
551 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); ipath_ud_rcv()
552 ipath_copy_sge(&qp->r_sge, data, ipath_ud_rcv()
554 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) ipath_ud_rcv()
556 wc.wr_id = qp->r_wr_id; ipath_ud_rcv()
560 wc.qp = &qp->ibqp; ipath_ud_rcv()
574 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ud_rcv()
408 ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_ud_rcv() argument
H A Dipath_verbs.c333 * @qp: the QP to post on
336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) ipath_post_one_send() argument
345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_post_one_send()
347 spin_lock_irqsave(&qp->s_lock, flags); ipath_post_one_send()
349 if (qp->ibqp.qp_type != IB_QPT_SMI && ipath_post_one_send()
356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) ipath_post_one_send()
360 if (wr->num_sge > qp->s_max_sge) ipath_post_one_send()
368 if (qp->ibqp.qp_type == IB_QPT_UC) { ipath_post_one_send()
371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { ipath_post_one_send()
377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd) ipath_post_one_send()
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) ipath_post_one_send()
389 next = qp->s_head + 1; ipath_post_one_send()
390 if (next >= qp->s_size) ipath_post_one_send()
392 if (next == qp->s_last) { ipath_post_one_send()
397 wqe = get_swqe_ptr(qp, qp->s_head); ipath_post_one_send()
399 if (qp->ibqp.qp_type != IB_QPT_UC && ipath_post_one_send()
400 qp->ibqp.qp_type != IB_QPT_RC) ipath_post_one_send()
422 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], ipath_post_one_send()
431 if (qp->ibqp.qp_type == IB_QPT_UC || ipath_post_one_send()
432 qp->ibqp.qp_type == IB_QPT_RC) { ipath_post_one_send()
435 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) ipath_post_one_send()
437 wqe->ssn = qp->s_ssn++; ipath_post_one_send()
438 qp->s_head = next; ipath_post_one_send()
446 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_post_one_send()
461 struct ipath_qp *qp = to_iqp(ibqp); ipath_post_send() local
465 err = ipath_post_one_send(qp, wr); ipath_post_send()
473 ipath_do_send((unsigned long) qp); ipath_post_send()
490 struct ipath_qp *qp = to_iqp(ibqp); ipath_post_receive() local
491 struct ipath_rwq *wq = qp->r_rq.wq; ipath_post_receive()
496 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { ipath_post_receive()
507 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { ipath_post_receive()
513 spin_lock_irqsave(&qp->r_rq.lock, flags); ipath_post_receive()
515 if (next >= qp->r_rq.size) ipath_post_receive()
518 spin_unlock_irqrestore(&qp->r_rq.lock, flags); ipath_post_receive()
524 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); ipath_post_receive()
532 spin_unlock_irqrestore(&qp->r_rq.lock, flags); ipath_post_receive()
547 * @qp: the QP the packet came on
555 void *data, u32 tlen, struct ipath_qp *qp) ipath_qp_rcv()
558 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_qp_rcv()
563 switch (qp->ibqp.qp_type) { ipath_qp_rcv()
570 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv()
574 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv()
578 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv()
601 struct ipath_qp *qp; ipath_ib_rcv() local
657 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); ipath_ib_rcv()
665 qp = ipath_lookup_qpn(&dev->qp_table, qp_num); ipath_ib_rcv()
666 if (qp) { ipath_ib_rcv()
669 tlen, qp); ipath_ib_rcv()
674 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_rcv()
675 wake_up(&qp->wait); ipath_ib_rcv()
695 struct ipath_qp *qp; ipath_ib_timer() local
708 qp = list_entry(last->next, struct ipath_qp, timerwait); ipath_ib_timer()
709 list_del_init(&qp->timerwait); ipath_ib_timer()
710 qp->timer_next = resend; ipath_ib_timer()
711 resend = qp; ipath_ib_timer()
712 atomic_inc(&qp->refcount); ipath_ib_timer()
716 qp = list_entry(last->next, struct ipath_qp, timerwait); ipath_ib_timer()
717 if (--qp->s_rnr_timeout == 0) { ipath_ib_timer()
719 list_del_init(&qp->timerwait); ipath_ib_timer()
720 qp->timer_next = rnr; ipath_ib_timer()
721 rnr = qp; ipath_ib_timer()
722 atomic_inc(&qp->refcount); ipath_ib_timer()
725 qp = list_entry(last->next, struct ipath_qp, ipath_ib_timer()
727 } while (qp->s_rnr_timeout == 0); ipath_ib_timer()
763 qp = resend; ipath_ib_timer()
764 resend = qp->timer_next; ipath_ib_timer()
766 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_timer()
767 if (qp->s_last != qp->s_tail && ipath_ib_timer()
768 ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { ipath_ib_timer()
770 ipath_restart_rc(qp, qp->s_last_psn + 1); ipath_ib_timer()
772 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_timer()
775 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_timer()
776 wake_up(&qp->wait); ipath_ib_timer()
779 qp = rnr; ipath_ib_timer()
780 rnr = qp->timer_next; ipath_ib_timer()
782 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_timer()
783 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_ib_timer()
784 ipath_schedule_send(qp); ipath_ib_timer()
785 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_timer()
788 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_timer()
789 wake_up(&qp->wait); ipath_ib_timer()
1044 struct ipath_qp *qp = tx->qp; sdma_complete() local
1045 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); sdma_complete()
1050 if (atomic_dec_and_test(&qp->s_dma_busy)) { sdma_complete()
1051 spin_lock_irqsave(&qp->s_lock, flags); sdma_complete()
1053 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete()
1054 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && sdma_complete()
1055 qp->s_last != qp->s_head) || sdma_complete()
1056 (qp->s_flags & IPATH_S_WAIT_DMA)) sdma_complete()
1057 ipath_schedule_send(qp); sdma_complete()
1058 spin_unlock_irqrestore(&qp->s_lock, flags); sdma_complete()
1059 wake_up(&qp->wait_dma); sdma_complete()
1061 spin_lock_irqsave(&qp->s_lock, flags); sdma_complete()
1062 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete()
1063 spin_unlock_irqrestore(&qp->s_lock, flags); sdma_complete()
1070 if (atomic_dec_and_test(&qp->refcount)) sdma_complete()
1071 wake_up(&qp->wait); sdma_complete()
1074 static void decrement_dma_busy(struct ipath_qp *qp) decrement_dma_busy() argument
1078 if (atomic_dec_and_test(&qp->s_dma_busy)) { decrement_dma_busy()
1079 spin_lock_irqsave(&qp->s_lock, flags); decrement_dma_busy()
1080 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && decrement_dma_busy()
1081 qp->s_last != qp->s_head) || decrement_dma_busy()
1082 (qp->s_flags & IPATH_S_WAIT_DMA)) decrement_dma_busy()
1083 ipath_schedule_send(qp); decrement_dma_busy()
1084 spin_unlock_irqrestore(&qp->s_lock, flags); decrement_dma_busy()
1085 wake_up(&qp->wait_dma); decrement_dma_busy()
1110 static int ipath_verbs_send_dma(struct ipath_qp *qp, ipath_verbs_send_dma() argument
1115 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_verbs_send_dma()
1123 tx = qp->s_tx; ipath_verbs_send_dma()
1125 qp->s_tx = NULL; ipath_verbs_send_dma()
1127 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma()
1130 qp->s_tx = tx; ipath_verbs_send_dma()
1131 decrement_dma_busy(qp); ipath_verbs_send_dma()
1147 control = qp->s_pkt_delay; ipath_verbs_send_dma()
1148 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); ipath_verbs_send_dma()
1150 tx->qp = qp; ipath_verbs_send_dma()
1151 atomic_inc(&qp->refcount); ipath_verbs_send_dma()
1152 tx->wqe = qp->s_wqe; ipath_verbs_send_dma()
1183 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma()
1189 qp->s_tx = tx; ipath_verbs_send_dma()
1190 decrement_dma_busy(qp); ipath_verbs_send_dma()
1211 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma()
1221 qp->s_tx = tx; ipath_verbs_send_dma()
1222 decrement_dma_busy(qp); ipath_verbs_send_dma()
1228 if (atomic_dec_and_test(&qp->refcount)) ipath_verbs_send_dma()
1229 wake_up(&qp->wait); ipath_verbs_send_dma()
1235 static int ipath_verbs_send_pio(struct ipath_qp *qp, ipath_verbs_send_pio() argument
1240 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_verbs_send_pio()
1259 control = qp->s_pkt_delay; ipath_verbs_send_pio()
1260 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); ipath_verbs_send_pio()
1317 if (qp->s_wqe) { ipath_verbs_send_pio()
1318 spin_lock_irqsave(&qp->s_lock, flags); ipath_verbs_send_pio()
1319 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); ipath_verbs_send_pio()
1320 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_verbs_send_pio()
1329 * @qp: the QP to send on
1335 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, ipath_verbs_send() argument
1338 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_verbs_send()
1354 if (qp->ibqp.qp_type == IB_QPT_SMI || ipath_verbs_send()
1356 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, ipath_verbs_send()
1359 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, ipath_verbs_send()
1474 struct ipath_qp *qp; ipath_ib_piobufavail() local
1485 qp = list_entry(list->next, struct ipath_qp, piowait); ipath_ib_piobufavail()
1486 list_del_init(&qp->piowait); ipath_ib_piobufavail()
1487 qp->pio_next = qplist; ipath_ib_piobufavail()
1488 qplist = qp; ipath_ib_piobufavail()
1489 atomic_inc(&qp->refcount); ipath_ib_piobufavail()
1494 qp = qplist; ipath_ib_piobufavail()
1495 qplist = qp->pio_next; ipath_ib_piobufavail()
1497 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_piobufavail()
1498 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_ib_piobufavail()
1499 ipath_schedule_send(qp); ipath_ib_piobufavail()
1500 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_piobufavail()
1503 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_piobufavail()
1504 wake_up(&qp->wait); ipath_ib_piobufavail()
553 ipath_qp_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_qp_rcv() argument
H A Dipath_verbs_mcast.c50 * @qp: the QP to link
52 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) ipath_mcast_qp_alloc() argument
60 mqp->qp = qp; ipath_mcast_qp_alloc()
61 atomic_inc(&qp->refcount); ipath_mcast_qp_alloc()
69 struct ipath_qp *qp = mqp->qp; ipath_mcast_qp_free() local
72 if (atomic_dec_and_test(&qp->refcount)) ipath_mcast_qp_free()
73 wake_up(&qp->wait); ipath_mcast_qp_free()
192 if (p->qp == mqp->qp) { ipath_mcast_add()
237 struct ipath_qp *qp = to_iqp(ibqp); ipath_multicast_attach() local
252 mqp = ipath_mcast_qp_alloc(qp); ipath_multicast_attach()
286 struct ipath_qp *qp = to_iqp(ibqp); ipath_multicast_detach() local
318 if (p->qp != qp) ipath_multicast_detach()
H A Dipath_keys.c121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, ipath_lkey_ok() argument
124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; ipath_lkey_ok()
136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_lkey_ok()
151 qp->ibqp.pd != mr->pd)) { ipath_lkey_ok()
199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, ipath_rkey_ok() argument
202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_rkey_ok()
216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_rkey_ok()
234 qp->ibqp.pd != mr->pd)) { ipath_rkey_ok()
H A Dipath_verbs.h158 struct ipath_qp *qp; member in struct:ipath_mcast_qp
277 * in qp->s_max_sge.
297 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
486 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, get_swqe_ptr() argument
489 return (struct ipath_swqe *)((char *)qp->s_wq + get_swqe_ptr()
491 qp->s_max_sge * get_swqe_ptr()
652 struct ipath_qp *qp; member in struct:ipath_verbs_txreq
699 static inline void ipath_schedule_send(struct ipath_qp *qp) ipath_schedule_send() argument
701 if (qp->s_flags & IPATH_S_ANY_WAIT) ipath_schedule_send()
702 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_schedule_send()
703 if (!(qp->s_flags & IPATH_S_BUSY)) ipath_schedule_send()
704 tasklet_hi_schedule(&qp->s_task); ipath_schedule_send()
740 __be32 ipath_compute_aeth(struct ipath_qp *qp);
750 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
762 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
766 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
774 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
777 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
779 void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
781 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
783 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
786 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
793 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
796 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
864 void ipath_insert_rnr_queue(struct ipath_qp *qp);
866 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
869 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
874 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
880 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
883 int ipath_make_rc_req(struct ipath_qp *qp);
885 int ipath_make_uc_req(struct ipath_qp *qp);
887 int ipath_make_ud_req(struct ipath_qp *qp);
H A Dipath_cq.c46 * This may be called with qp->s_lock held.
87 wc->uqueue[head].qp_num = entry->qp->qp_num; ipath_cq_enter()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
H A DMakefile3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
H A Dqp.c45 #include <linux/mlx4/qp.h>
82 struct mlx4_ib_qp qp; member in struct:mlx4_ib_sqp
123 return container_of(mqp, struct mlx4_ib_sqp, qp); to_msqp()
126 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_tunnel_qp() argument
131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + is_tunnel_qp()
136 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_sqp() argument
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { is_sqp()
161 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_qp0() argument
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); is_qp0()
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { is_qp0()
184 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) get_wqe() argument
186 return mlx4_buf_offset(&qp->buf, offset); get_wqe()
189 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) get_recv_wqe() argument
191 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); get_recv_wqe()
194 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) get_send_wqe() argument
196 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); get_send_wqe()
208 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) stamp_send_wqe() argument
218 if (qp->sq_max_wqes_per_wr > 1) { stamp_send_wqe()
219 s = roundup(size, 1U << qp->sq.wqe_shift); stamp_send_wqe()
221 ind = (i >> qp->sq.wqe_shift) + n; stamp_send_wqe()
222 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : stamp_send_wqe()
224 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); stamp_send_wqe()
225 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); stamp_send_wqe()
229 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); stamp_send_wqe()
238 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) post_nop_wqe() argument
245 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); post_nop_wqe()
248 if (qp->ibqp.qp_type == IB_QPT_UD) { post_nop_wqe()
252 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); post_nop_wqe()
270 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); post_nop_wqe()
272 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); post_nop_wqe()
276 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) pad_wraparound() argument
278 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); pad_wraparound()
279 if (unlikely(s < qp->sq_max_wqes_per_wr)) { pad_wraparound()
280 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); pad_wraparound()
286 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) mlx4_ib_qp_event() argument
289 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; mlx4_ib_qp_event()
292 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; mlx4_ib_qp_event()
296 event.element.qp = ibqp; mlx4_ib_qp_event()
324 "on QP %06x\n", type, qp->qpn); mlx4_ib_qp_event()
379 int is_user, int has_rq, struct mlx4_ib_qp *qp) set_rq_size()
390 qp->rq.wqe_cnt = qp->rq.max_gs = 0; set_rq_size()
396 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); set_rq_size()
397 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); set_rq_size()
398 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); set_rq_size()
403 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size()
404 cap->max_recv_sge = qp->rq.max_gs; set_rq_size()
406 cap->max_recv_wr = qp->rq.max_post = set_rq_size()
407 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); set_rq_size()
408 cap->max_recv_sge = min(qp->rq.max_gs, set_rq_size()
417 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) set_kernel_sq_size()
424 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + set_kernel_sq_size()
439 send_wqe_overhead(type, qp->flags); set_kernel_sq_size()
476 qp->sq_signal_bits && BITS_PER_LONG == 64 && set_kernel_sq_size()
480 qp->sq.wqe_shift = ilog2(64); set_kernel_sq_size()
482 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); set_kernel_sq_size()
485 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); set_kernel_sq_size()
491 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; set_kernel_sq_size()
492 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * set_kernel_sq_size()
493 qp->sq_max_wqes_per_wr + set_kernel_sq_size()
494 qp->sq_spare_wqes); set_kernel_sq_size()
496 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) set_kernel_sq_size()
499 if (qp->sq_max_wqes_per_wr <= 1) set_kernel_sq_size()
502 ++qp->sq.wqe_shift; set_kernel_sq_size()
505 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, set_kernel_sq_size()
506 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - set_kernel_sq_size()
507 send_wqe_overhead(type, qp->flags)) / set_kernel_sq_size()
510 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_kernel_sq_size()
511 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_kernel_sq_size()
512 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { set_kernel_sq_size()
513 qp->rq.offset = 0; set_kernel_sq_size()
514 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; set_kernel_sq_size()
516 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; set_kernel_sq_size()
517 qp->sq.offset = 0; set_kernel_sq_size()
520 cap->max_send_wr = qp->sq.max_post = set_kernel_sq_size()
521 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; set_kernel_sq_size()
522 cap->max_send_sge = min(qp->sq.max_gs, set_kernel_sq_size()
532 struct mlx4_ib_qp *qp, set_user_sq_size()
542 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; set_user_sq_size()
543 qp->sq.wqe_shift = ucmd->log_sq_stride; set_user_sq_size()
545 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_user_sq_size()
546 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_user_sq_size()
551 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) alloc_proxy_bufs() argument
555 qp->sqp_proxy_rcv = alloc_proxy_bufs()
556 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, alloc_proxy_bufs()
558 if (!qp->sqp_proxy_rcv) alloc_proxy_bufs()
560 for (i = 0; i < qp->rq.wqe_cnt; i++) { alloc_proxy_bufs()
561 qp->sqp_proxy_rcv[i].addr = alloc_proxy_bufs()
564 if (!qp->sqp_proxy_rcv[i].addr) alloc_proxy_bufs()
566 qp->sqp_proxy_rcv[i].map = alloc_proxy_bufs()
567 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, alloc_proxy_bufs()
570 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { alloc_proxy_bufs()
571 kfree(qp->sqp_proxy_rcv[i].addr); alloc_proxy_bufs()
580 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, alloc_proxy_bufs()
583 kfree(qp->sqp_proxy_rcv[i].addr); alloc_proxy_bufs()
585 kfree(qp->sqp_proxy_rcv); alloc_proxy_bufs()
586 qp->sqp_proxy_rcv = NULL; alloc_proxy_bufs()
590 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) free_proxy_bufs() argument
594 for (i = 0; i < qp->rq.wqe_cnt; i++) { free_proxy_bufs()
595 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, free_proxy_bufs()
598 kfree(qp->sqp_proxy_rcv[i].addr); free_proxy_bufs()
600 kfree(qp->sqp_proxy_rcv); free_proxy_bufs()
622 struct mlx4_ib_qp *qp) mlx4_ib_free_qp_counter()
624 mutex_lock(&dev->counters_table[qp->port - 1].mutex); mlx4_ib_free_qp_counter()
625 mlx4_counter_free(dev->dev, qp->counter_index->index); mlx4_ib_free_qp_counter()
626 list_del(&qp->counter_index->list); mlx4_ib_free_qp_counter()
627 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); mlx4_ib_free_qp_counter()
629 kfree(qp->counter_index); mlx4_ib_free_qp_counter()
630 qp->counter_index = NULL; mlx4_ib_free_qp_counter()
641 struct mlx4_ib_qp *qp; create_qp_common() local
646 /* When tunneling special qps, we use a plain UD qp */ create_qp_common()
694 qp = &sqp->qp; create_qp_common()
695 qp->pri.vid = 0xFFFF; create_qp_common()
696 qp->alt.vid = 0xFFFF; create_qp_common()
698 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); create_qp_common()
699 if (!qp) create_qp_common()
701 qp->pri.vid = 0xFFFF; create_qp_common()
702 qp->alt.vid = 0xFFFF; create_qp_common()
705 qp = *caller_qp; create_qp_common()
707 qp->mlx4_ib_qp_type = qp_type; create_qp_common()
709 mutex_init(&qp->mutex); create_qp_common()
710 spin_lock_init(&qp->sq.lock); create_qp_common()
711 spin_lock_init(&qp->rq.lock); create_qp_common()
712 INIT_LIST_HEAD(&qp->gid_list); create_qp_common()
713 INIT_LIST_HEAD(&qp->steering_rules); create_qp_common()
715 qp->state = IB_QPS_RESET; create_qp_common()
717 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); create_qp_common()
719 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); create_qp_common()
731 qp->sq_no_prefetch = ucmd.sq_no_prefetch; create_qp_common()
733 err = set_user_sq_size(dev, qp, &ucmd); create_qp_common()
737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_qp_common()
738 qp->buf_size, 0, 0); create_qp_common()
739 if (IS_ERR(qp->umem)) { create_qp_common()
740 err = PTR_ERR(qp->umem); create_qp_common()
744 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), create_qp_common()
745 ilog2(qp->umem->page_size), &qp->mtt); create_qp_common()
749 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); create_qp_common()
755 ucmd.db_addr, &qp->db); create_qp_common()
760 qp->sq_no_prefetch = 0; create_qp_common()
763 qp->flags |= MLX4_IB_QP_LSO; create_qp_common()
768 qp->flags |= MLX4_IB_QP_NETIF; create_qp_common()
773 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); create_qp_common()
778 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); create_qp_common()
782 *qp->db.db = 0; create_qp_common()
785 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { create_qp_common()
790 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, create_qp_common()
791 &qp->mtt); create_qp_common()
795 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); create_qp_common()
799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp); create_qp_common()
800 if (!qp->sq.wrid) create_qp_common()
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), create_qp_common()
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp); create_qp_common()
804 if (!qp->rq.wrid) create_qp_common()
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), create_qp_common()
807 if (!qp->sq.wrid || !qp->rq.wrid) { create_qp_common()
814 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | create_qp_common()
816 if (alloc_proxy_bufs(pd->device, qp)) { create_qp_common()
832 if (qp->flags & MLX4_IB_QP_NETIF) create_qp_common()
842 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; create_qp_common()
844 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); create_qp_common()
849 qp->mqp.qpn |= (1 << 23); create_qp_common()
856 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common()
858 qp->mqp.event = mlx4_ib_qp_event; create_qp_common()
860 *caller_qp = qp; create_qp_common()
868 list_add_tail(&qp->qps_list, &dev->qp_list); create_qp_common()
873 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); create_qp_common()
875 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); create_qp_common()
883 if (qp->flags & MLX4_IB_QP_NETIF) create_qp_common()
889 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) create_qp_common()
890 free_proxy_bufs(pd->device, qp); create_qp_common()
894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); create_qp_common()
896 kvfree(qp->sq.wrid); create_qp_common()
897 kvfree(qp->rq.wrid); create_qp_common()
901 mlx4_mtt_cleanup(dev->dev, &qp->mtt); create_qp_common()
905 ib_umem_release(qp->umem); create_qp_common()
907 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); create_qp_common()
911 mlx4_db_free(dev->dev, &qp->db); create_qp_common()
915 kfree(qp); create_qp_common()
963 static void del_gid_entries(struct mlx4_ib_qp *qp) del_gid_entries() argument
967 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { del_gid_entries()
973 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) get_pd() argument
975 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) get_pd()
976 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); get_pd()
978 return to_mpd(qp->ibqp.pd); get_pd()
981 static void get_cqs(struct mlx4_ib_qp *qp, get_cqs() argument
984 switch (qp->ibqp.qp_type) { get_cqs()
986 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); get_cqs()
990 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs()
994 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs()
995 *recv_cq = to_mcq(qp->ibqp.recv_cq); get_cqs()
1000 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, destroy_qp_common() argument
1006 if (qp->state != IB_QPS_RESET) { destroy_qp_common()
1007 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), destroy_qp_common()
1008 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) destroy_qp_common()
1010 qp->mqp.qpn); destroy_qp_common()
1011 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { destroy_qp_common()
1012 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); destroy_qp_common()
1013 qp->pri.smac = 0; destroy_qp_common()
1014 qp->pri.smac_port = 0; destroy_qp_common()
1016 if (qp->alt.smac) { destroy_qp_common()
1017 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); destroy_qp_common()
1018 qp->alt.smac = 0; destroy_qp_common()
1020 if (qp->pri.vid < 0x1000) { destroy_qp_common()
1021 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); destroy_qp_common()
1022 qp->pri.vid = 0xFFFF; destroy_qp_common()
1023 qp->pri.candidate_vid = 0xFFFF; destroy_qp_common()
1024 qp->pri.update_vid = 0; destroy_qp_common()
1026 if (qp->alt.vid < 0x1000) { destroy_qp_common()
1027 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); destroy_qp_common()
1028 qp->alt.vid = 0xFFFF; destroy_qp_common()
1029 qp->alt.candidate_vid = 0xFFFF; destroy_qp_common()
1030 qp->alt.update_vid = 0; destroy_qp_common()
1034 get_cqs(qp, &send_cq, &recv_cq); destroy_qp_common()
1040 list_del(&qp->qps_list); destroy_qp_common()
1041 list_del(&qp->cq_send_list); destroy_qp_common()
1042 list_del(&qp->cq_recv_list); destroy_qp_common()
1044 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common()
1045 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); destroy_qp_common()
1047 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common()
1050 mlx4_qp_remove(dev->dev, &qp->mqp); destroy_qp_common()
1055 mlx4_qp_free(dev->dev, &qp->mqp); destroy_qp_common()
1057 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { destroy_qp_common()
1058 if (qp->flags & MLX4_IB_QP_NETIF) destroy_qp_common()
1059 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); destroy_qp_common()
1061 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); destroy_qp_common()
1064 mlx4_mtt_cleanup(dev->dev, &qp->mtt); destroy_qp_common()
1067 if (qp->rq.wqe_cnt) destroy_qp_common()
1068 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), destroy_qp_common()
1069 &qp->db); destroy_qp_common()
1070 ib_umem_release(qp->umem); destroy_qp_common()
1072 kvfree(qp->sq.wrid); destroy_qp_common()
1073 kvfree(qp->rq.wrid); destroy_qp_common()
1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | destroy_qp_common()
1076 free_proxy_bufs(&dev->ib_dev, qp); destroy_qp_common()
1077 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); destroy_qp_common()
1078 if (qp->rq.wqe_cnt) destroy_qp_common()
1079 mlx4_db_free(dev->dev, &qp->db); destroy_qp_common()
1082 del_gid_entries(qp); destroy_qp_common()
1106 struct mlx4_ib_qp *qp = NULL; mlx4_ib_create_qp() local
1155 qp = kzalloc(sizeof *qp, gfp); mlx4_ib_create_qp()
1156 if (!qp) mlx4_ib_create_qp()
1158 qp->pri.vid = 0xFFFF; mlx4_ib_create_qp()
1159 qp->alt.vid = 0xFFFF; mlx4_ib_create_qp()
1164 udata, 0, &qp, gfp); mlx4_ib_create_qp()
1168 qp->ibqp.qp_num = qp->mqp.qpn; mlx4_ib_create_qp()
1169 qp->xrcdn = xrcdn; mlx4_ib_create_qp()
1182 &qp, gfp); mlx4_ib_create_qp()
1186 qp->port = init_attr->port_num; mlx4_ib_create_qp()
1187 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; mlx4_ib_create_qp()
1196 return &qp->ibqp; mlx4_ib_create_qp()
1199 int mlx4_ib_destroy_qp(struct ib_qp *qp) mlx4_ib_destroy_qp() argument
1201 struct mlx4_ib_dev *dev = to_mdev(qp->device); mlx4_ib_destroy_qp()
1202 struct mlx4_ib_qp *mqp = to_mqp(qp); mlx4_ib_destroy_qp()
1252 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, to_mlx4_access_flags() argument
1262 dest_rd_atomic = qp->resp_depth; to_mlx4_access_flags()
1267 access_flags = qp->atomic_rd_en; to_mlx4_access_flags()
1364 /* no current vlan tag in qp */ _mlx4_set_path()
1377 /* have current vlan tag. unregister it at modify-qp success */ _mlx4_set_path()
1416 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, mlx4_set_path() argument
1422 return _mlx4_set_path(dev, &qp->ah_attr, mlx4_set_path()
1429 const struct ib_qp_attr *qp, mlx4_set_alt_path()
1434 return _mlx4_set_path(dev, &qp->alt_ah_attr, mlx4_set_alt_path()
1440 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) update_mcg_macs() argument
1444 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { update_mcg_macs()
1445 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { update_mcg_macs()
1447 ge->port = qp->port; update_mcg_macs()
1453 struct mlx4_ib_qp *qp, handle_eth_ud_smac_index()
1459 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); handle_eth_ud_smac_index()
1461 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); handle_eth_ud_smac_index()
1462 if (!qp->pri.smac && !qp->pri.smac_port) { handle_eth_ud_smac_index()
1463 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); handle_eth_ud_smac_index()
1465 qp->pri.candidate_smac_index = smac_index; handle_eth_ud_smac_index()
1466 qp->pri.candidate_smac = u64_mac; handle_eth_ud_smac_index()
1467 qp->pri.candidate_smac_port = qp->port; handle_eth_ud_smac_index()
1476 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) create_qp_lb_counter() argument
1482 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != create_qp_lb_counter()
1484 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || create_qp_lb_counter()
1500 qp->counter_index = new_counter_index; create_qp_lb_counter()
1502 mutex_lock(&dev->counters_table[qp->port - 1].mutex); create_qp_lb_counter()
1504 &dev->counters_table[qp->port - 1].counters_list); create_qp_lb_counter()
1505 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); create_qp_lb_counter()
1515 struct mlx4_ib_qp *qp = to_mqp(ibqp); __mlx4_ib_modify_qp() local
1527 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == __mlx4_ib_modify_qp()
1536 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); __mlx4_ib_modify_qp()
1560 if (qp->flags & MLX4_IB_QP_LSO) __mlx4_ib_modify_qp()
1575 if (qp->rq.wqe_cnt) __mlx4_ib_modify_qp()
1576 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; __mlx4_ib_modify_qp()
1577 context->rq_size_stride |= qp->rq.wqe_shift - 4; __mlx4_ib_modify_qp()
1579 if (qp->sq.wqe_cnt) __mlx4_ib_modify_qp()
1580 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; __mlx4_ib_modify_qp()
1581 context->sq_size_stride |= qp->sq.wqe_shift - 4; __mlx4_ib_modify_qp()
1583 if (new_state == IB_QPS_RESET && qp->counter_index) __mlx4_ib_modify_qp()
1584 mlx4_ib_free_qp_counter(dev, qp); __mlx4_ib_modify_qp()
1587 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; __mlx4_ib_modify_qp()
1588 context->xrcd = cpu_to_be32((u32) qp->xrcdn); __mlx4_ib_modify_qp()
1593 if (qp->ibqp.uobject) __mlx4_ib_modify_qp()
1610 err = create_qp_lb_counter(dev, qp); __mlx4_ib_modify_qp()
1615 dev->counters_table[qp->port - 1].default_counter; __mlx4_ib_modify_qp()
1616 if (qp->counter_index) __mlx4_ib_modify_qp()
1617 counter_index = qp->counter_index->index; __mlx4_ib_modify_qp()
1622 if (qp->counter_index) { __mlx4_ib_modify_qp()
1632 if (qp->flags & MLX4_IB_QP_NETIF) { __mlx4_ib_modify_qp()
1633 mlx4_ib_steer_qp_reg(dev, qp, 1); __mlx4_ib_modify_qp()
1639 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) __mlx4_ib_modify_qp()
1647 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; __mlx4_ib_modify_qp()
1671 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, __mlx4_ib_modify_qp()
1693 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, __mlx4_ib_modify_qp()
1703 pd = get_pd(qp); __mlx4_ib_modify_qp()
1704 get_cqs(qp, &send_cq, &recv_cq); __mlx4_ib_modify_qp()
1711 if (!qp->ibqp.uobject) __mlx4_ib_modify_qp()
1742 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); __mlx4_ib_modify_qp()
1756 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ __mlx4_ib_modify_qp()
1758 if (qp->mlx4_ib_qp_type & __mlx4_ib_modify_qp()
1763 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && __mlx4_ib_modify_qp()
1780 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) __mlx4_ib_modify_qp()
1781 context->db_rec_addr = cpu_to_be64(qp->db.dma); __mlx4_ib_modify_qp()
1788 context->pri_path.sched_queue = (qp->port - 1) << 6; __mlx4_ib_modify_qp()
1789 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || __mlx4_ib_modify_qp()
1790 qp->mlx4_ib_qp_type & __mlx4_ib_modify_qp()
1793 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) __mlx4_ib_modify_qp()
1796 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) __mlx4_ib_modify_qp()
1800 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == __mlx4_ib_modify_qp()
1802 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || __mlx4_ib_modify_qp()
1803 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) __mlx4_ib_modify_qp()
1806 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || __mlx4_ib_modify_qp()
1807 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || __mlx4_ib_modify_qp()
1808 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { __mlx4_ib_modify_qp()
1809 err = handle_eth_ud_smac_index(dev, qp, context); __mlx4_ib_modify_qp()
1814 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) __mlx4_ib_modify_qp()
1815 dev->qp1_proxy[qp->port - 1] = qp; __mlx4_ib_modify_qp()
1820 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { __mlx4_ib_modify_qp()
1832 &dev->ib_dev, qp->port) == __mlx4_ib_modify_qp()
1860 for (i = 0; i < qp->sq.wqe_cnt; ++i) { __mlx4_ib_modify_qp()
1861 ctrl = get_send_wqe(qp, i); __mlx4_ib_modify_qp()
1863 if (qp->sq_max_wqes_per_wr == 1) __mlx4_ib_modify_qp()
1864 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); __mlx4_ib_modify_qp()
1866 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); __mlx4_ib_modify_qp()
1870 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), __mlx4_ib_modify_qp()
1872 sqd_event, &qp->mqp); __mlx4_ib_modify_qp()
1876 qp->state = new_state; __mlx4_ib_modify_qp()
1879 qp->atomic_rd_en = attr->qp_access_flags; __mlx4_ib_modify_qp()
1881 qp->resp_depth = attr->max_dest_rd_atomic; __mlx4_ib_modify_qp()
1883 qp->port = attr->port_num; __mlx4_ib_modify_qp()
1884 update_mcg_macs(dev, qp); __mlx4_ib_modify_qp()
1887 qp->alt_port = attr->alt_port_num; __mlx4_ib_modify_qp()
1889 if (is_sqp(dev, qp)) __mlx4_ib_modify_qp()
1890 store_sqp_attrs(to_msqp(qp), attr, attr_mask); __mlx4_ib_modify_qp()
1896 if (is_qp0(dev, qp)) { __mlx4_ib_modify_qp()
1898 if (mlx4_INIT_PORT(dev->dev, qp->port)) __mlx4_ib_modify_qp()
1900 qp->port); __mlx4_ib_modify_qp()
1904 mlx4_CLOSE_PORT(dev->dev, qp->port); __mlx4_ib_modify_qp()
1913 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx4_ib_modify_qp()
1916 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx4_ib_modify_qp()
1918 qp->rq.head = 0; __mlx4_ib_modify_qp()
1919 qp->rq.tail = 0; __mlx4_ib_modify_qp()
1920 qp->sq.head = 0; __mlx4_ib_modify_qp()
1921 qp->sq.tail = 0; __mlx4_ib_modify_qp()
1922 qp->sq_next_wqe = 0; __mlx4_ib_modify_qp()
1923 if (qp->rq.wqe_cnt) __mlx4_ib_modify_qp()
1924 *qp->db.db = 0; __mlx4_ib_modify_qp()
1926 if (qp->flags & MLX4_IB_QP_NETIF) __mlx4_ib_modify_qp()
1927 mlx4_ib_steer_qp_reg(dev, qp, 0); __mlx4_ib_modify_qp()
1929 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { __mlx4_ib_modify_qp()
1930 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); __mlx4_ib_modify_qp()
1931 qp->pri.smac = 0; __mlx4_ib_modify_qp()
1932 qp->pri.smac_port = 0; __mlx4_ib_modify_qp()
1934 if (qp->alt.smac) { __mlx4_ib_modify_qp()
1935 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); __mlx4_ib_modify_qp()
1936 qp->alt.smac = 0; __mlx4_ib_modify_qp()
1938 if (qp->pri.vid < 0x1000) { __mlx4_ib_modify_qp()
1939 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); __mlx4_ib_modify_qp()
1940 qp->pri.vid = 0xFFFF; __mlx4_ib_modify_qp()
1941 qp->pri.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp()
1942 qp->pri.update_vid = 0; __mlx4_ib_modify_qp()
1945 if (qp->alt.vid < 0x1000) { __mlx4_ib_modify_qp()
1946 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); __mlx4_ib_modify_qp()
1947 qp->alt.vid = 0xFFFF; __mlx4_ib_modify_qp()
1948 qp->alt.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp()
1949 qp->alt.update_vid = 0; __mlx4_ib_modify_qp()
1953 if (err && qp->counter_index) __mlx4_ib_modify_qp()
1954 mlx4_ib_free_qp_counter(dev, qp); __mlx4_ib_modify_qp()
1956 mlx4_ib_steer_qp_reg(dev, qp, 0); __mlx4_ib_modify_qp()
1958 if (qp->pri.candidate_smac || __mlx4_ib_modify_qp()
1959 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { __mlx4_ib_modify_qp()
1961 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); __mlx4_ib_modify_qp()
1963 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) __mlx4_ib_modify_qp()
1964 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); __mlx4_ib_modify_qp()
1965 qp->pri.smac = qp->pri.candidate_smac; __mlx4_ib_modify_qp()
1966 qp->pri.smac_index = qp->pri.candidate_smac_index; __mlx4_ib_modify_qp()
1967 qp->pri.smac_port = qp->pri.candidate_smac_port; __mlx4_ib_modify_qp()
1969 qp->pri.candidate_smac = 0; __mlx4_ib_modify_qp()
1970 qp->pri.candidate_smac_index = 0; __mlx4_ib_modify_qp()
1971 qp->pri.candidate_smac_port = 0; __mlx4_ib_modify_qp()
1973 if (qp->alt.candidate_smac) { __mlx4_ib_modify_qp()
1975 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); __mlx4_ib_modify_qp()
1977 if (qp->alt.smac) __mlx4_ib_modify_qp()
1978 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); __mlx4_ib_modify_qp()
1979 qp->alt.smac = qp->alt.candidate_smac; __mlx4_ib_modify_qp()
1980 qp->alt.smac_index = qp->alt.candidate_smac_index; __mlx4_ib_modify_qp()
1981 qp->alt.smac_port = qp->alt.candidate_smac_port; __mlx4_ib_modify_qp()
1983 qp->alt.candidate_smac = 0; __mlx4_ib_modify_qp()
1984 qp->alt.candidate_smac_index = 0; __mlx4_ib_modify_qp()
1985 qp->alt.candidate_smac_port = 0; __mlx4_ib_modify_qp()
1988 if (qp->pri.update_vid) { __mlx4_ib_modify_qp()
1990 if (qp->pri.candidate_vid < 0x1000) __mlx4_ib_modify_qp()
1991 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, __mlx4_ib_modify_qp()
1992 qp->pri.candidate_vid); __mlx4_ib_modify_qp()
1994 if (qp->pri.vid < 0x1000) __mlx4_ib_modify_qp()
1995 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, __mlx4_ib_modify_qp()
1996 qp->pri.vid); __mlx4_ib_modify_qp()
1997 qp->pri.vid = qp->pri.candidate_vid; __mlx4_ib_modify_qp()
1998 qp->pri.vlan_port = qp->pri.candidate_vlan_port; __mlx4_ib_modify_qp()
1999 qp->pri.vlan_index = qp->pri.candidate_vlan_index; __mlx4_ib_modify_qp()
2001 qp->pri.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp()
2002 qp->pri.update_vid = 0; __mlx4_ib_modify_qp()
2005 if (qp->alt.update_vid) { __mlx4_ib_modify_qp()
2007 if (qp->alt.candidate_vid < 0x1000) __mlx4_ib_modify_qp()
2008 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, __mlx4_ib_modify_qp()
2009 qp->alt.candidate_vid); __mlx4_ib_modify_qp()
2011 if (qp->alt.vid < 0x1000) __mlx4_ib_modify_qp()
2012 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, __mlx4_ib_modify_qp()
2013 qp->alt.vid); __mlx4_ib_modify_qp()
2014 qp->alt.vid = qp->alt.candidate_vid; __mlx4_ib_modify_qp()
2015 qp->alt.vlan_port = qp->alt.candidate_vlan_port; __mlx4_ib_modify_qp()
2016 qp->alt.vlan_index = qp->alt.candidate_vlan_index; __mlx4_ib_modify_qp()
2018 qp->alt.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp()
2019 qp->alt.update_vid = 0; __mlx4_ib_modify_qp()
2029 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_modify_qp() local
2033 mutex_lock(&qp->mutex); mlx4_ib_modify_qp()
2035 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; mlx4_ib_modify_qp()
2041 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx4_ib_modify_qp()
2086 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx4_ib_modify_qp()
2125 mutex_unlock(&qp->mutex); mlx4_ib_modify_qp()
2146 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); build_sriov_qp0_header()
2168 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) build_sriov_qp0_header()
2173 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { build_sriov_qp0_header()
2190 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); build_sriov_qp0_header()
2192 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) build_sriov_qp0_header()
2196 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); build_sriov_qp0_header()
2200 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header()
2203 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header()
2207 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); build_sriov_qp0_header()
2268 struct ib_device *ib_dev = sqp->qp.ibqp.device; build_mlx_header()
2289 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; build_mlx_header()
2340 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header()
2343 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header()
2358 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | build_mlx_header()
2395 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); build_mlx_header()
2414 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; build_mlx_header()
2419 if (!sqp->qp.ibqp.qp_num) build_mlx_header()
2420 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); build_mlx_header()
2422 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); build_mlx_header()
2428 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); build_mlx_header()
2707 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, build_lso_seg()
2715 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && build_lso_seg()
2716 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) build_lso_seg()
2751 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_post_send() local
2769 spin_lock_irqsave(&qp->sq.lock, flags); mlx4_ib_post_send()
2777 ind = qp->sq_next_wqe; mlx4_ib_post_send()
2783 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mlx4_ib_post_send()
2789 if (unlikely(wr->num_sge > qp->sq.max_gs)) { mlx4_ib_post_send()
2795 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); mlx4_ib_post_send()
2796 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; mlx4_ib_post_send()
2806 qp->sq_signal_bits; mlx4_ib_post_send()
2813 switch (qp->mlx4_ib_qp_type) { mlx4_ib_post_send()
2884 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), mlx4_ib_post_send()
2895 /* this is a UD qp used in MAD responses to slaves. */ mlx4_ib_post_send()
2908 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, mlx4_ib_post_send()
2921 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), mlx4_ib_post_send()
2939 /* If we are tunneling special qps, this is a UD qp. mlx4_ib_post_send()
2941 * the tunnel qp, and then add a header with address mlx4_ib_post_send()
2945 qp->mlx4_ib_qp_type); mlx4_ib_post_send()
2955 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl, mlx4_ib_post_send()
2981 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || mlx4_ib_post_send()
2982 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || mlx4_ib_post_send()
2983 qp->mlx4_ib_qp_type & mlx4_ib_post_send()
3017 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; mlx4_ib_post_send()
3019 stamp = ind + qp->sq_spare_wqes; mlx4_ib_post_send()
3020 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); mlx4_ib_post_send()
3032 stamp_send_wqe(qp, stamp, size * 16); mlx4_ib_post_send()
3033 ind = pad_wraparound(qp, ind); mlx4_ib_post_send()
3039 qp->sq.head += nreq; mlx4_ib_post_send()
3047 writel(qp->doorbell_qpn, mlx4_ib_post_send()
3056 stamp_send_wqe(qp, stamp, size * 16); mlx4_ib_post_send()
3058 ind = pad_wraparound(qp, ind); mlx4_ib_post_send()
3059 qp->sq_next_wqe = ind; mlx4_ib_post_send()
3062 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx4_ib_post_send()
3070 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_post_recv() local
3080 max_gs = qp->rq.max_gs; mlx4_ib_post_recv()
3081 spin_lock_irqsave(&qp->rq.lock, flags); mlx4_ib_post_recv()
3090 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); mlx4_ib_post_recv()
3093 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mlx4_ib_post_recv()
3099 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mlx4_ib_post_recv()
3105 scat = get_recv_wqe(qp, ind); mlx4_ib_post_recv()
3107 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | mlx4_ib_post_recv()
3110 qp->sqp_proxy_rcv[ind].map, mlx4_ib_post_recv()
3117 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); mlx4_ib_post_recv()
3131 qp->rq.wrid[ind] = wr->wr_id; mlx4_ib_post_recv()
3133 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); mlx4_ib_post_recv()
3138 qp->rq.head += nreq; mlx4_ib_post_recv()
3146 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); mlx4_ib_post_recv()
3149 spin_unlock_irqrestore(&qp->rq.lock, flags); mlx4_ib_post_recv()
3233 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_query_qp() local
3238 mutex_lock(&qp->mutex); mlx4_ib_query_qp()
3240 if (qp->state == IB_QPS_RESET) { mlx4_ib_query_qp()
3245 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); mlx4_ib_query_qp()
3253 qp->state = to_ib_qp_state(mlx4_state); mlx4_ib_query_qp()
3254 qp_attr->qp_state = qp->state; mlx4_ib_query_qp()
3265 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { mlx4_ib_query_qp()
3274 qp_attr->port_num = qp->port; mlx4_ib_query_qp()
3278 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mlx4_ib_query_qp()
3294 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; mlx4_ib_query_qp()
3295 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mlx4_ib_query_qp()
3298 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx4_ib_query_qp()
3299 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx4_ib_query_qp()
3314 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) mlx4_ib_query_qp()
3317 if (qp->flags & MLX4_IB_QP_LSO) mlx4_ib_query_qp()
3320 if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_query_qp()
3324 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? mlx4_ib_query_qp()
3328 mutex_unlock(&qp->mutex); mlx4_ib_query_qp()
378 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) set_rq_size() argument
416 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) set_kernel_sq_size() argument
531 set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) set_user_sq_size() argument
621 mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) mlx4_ib_free_qp_counter() argument
1428 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) mlx4_set_alt_path() argument
1452 handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_qp_context *context) handle_eth_ud_smac_index() argument
2706 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) build_lso_seg() argument
H A Dmad.c157 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); mlx4_MAD_IFC()
200 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, update_sm_ah()
480 /* check if proxy qp created */ mlx4_ib_send_to_slave()
485 tun_qp = &tun_ctx->qp[0]; mlx4_ib_send_to_slave()
487 tun_qp = &tun_ctx->qp[1]; mlx4_ib_send_to_slave()
506 src_qp = tun_qp->qp; mlx4_ib_send_to_slave()
641 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); mlx4_ib_demux_mad()
643 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", mlx4_ib_demux_mad()
712 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); mlx4_ib_demux_mad()
714 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", mlx4_ib_demux_mad()
727 if (in_wc && in_wc->qp->qp_num) { ib_process_mad()
732 in_wc->qp->qp_num, ib_process_mad()
1143 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? mlx4_ib_post_pv_qp_buf()
1157 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); mlx4_ib_post_pv_qp_buf()
1206 /* check if proxy qp created */ mlx4_ib_send_to_wire()
1212 sqp = &sqp_ctx->qp[0]; mlx4_ib_send_to_wire()
1216 sqp = &sqp_ctx->qp[1]; mlx4_ib_send_to_wire()
1220 send_qp = sqp->qp; mlx4_ib_send_to_wire()
1306 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; mlx4_ib_multiplex_mad()
1427 tun_qp = &ctx->qp[qp_type]; mlx4_ib_alloc_pv_bufs()
1521 tun_qp = &ctx->qp[qp_type]; mlx4_ib_free_pv_qp_bufs()
1558 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_tunnel_comp_worker()
1624 tun_qp = &ctx->qp[qp_type]; create_pv_sqp()
1650 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); create_pv_sqp()
1651 if (IS_ERR(tun_qp->qp)) { create_pv_sqp()
1652 ret = PTR_ERR(tun_qp->qp); create_pv_sqp()
1653 tun_qp->qp = NULL; create_pv_sqp()
1671 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); create_pv_sqp()
1673 pr_err("Couldn't change %s qp state to INIT (%d)\n", create_pv_sqp()
1678 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); create_pv_sqp()
1680 pr_err("Couldn't change %s qp state to RTR (%d)\n", create_pv_sqp()
1686 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); create_pv_sqp()
1688 pr_err("Couldn't change %s qp state to RTS (%d)\n", create_pv_sqp()
1704 ib_destroy_qp(tun_qp->qp); create_pv_sqp()
1705 tun_qp->qp = NULL; create_pv_sqp()
1724 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_sqp_comp_worker()
1880 ib_destroy_qp(ctx->qp[1].qp); create_pv_resources()
1881 ctx->qp[1].qp = NULL; create_pv_resources()
1886 ib_destroy_qp(ctx->qp[0].qp); create_pv_resources()
1887 ctx->qp[0].qp = NULL; create_pv_resources()
1918 ib_destroy_qp(ctx->qp[0].qp); destroy_pv_resources()
1919 ctx->qp[0].qp = NULL; destroy_pv_resources()
1922 ib_destroy_qp(ctx->qp[1].qp); destroy_pv_resources()
1923 ctx->qp[1].qp = NULL; destroy_pv_resources()
1944 /* destroy the tunnel qp resources */ mlx4_ib_tunnels_update()
1950 /* create the tunnel qp resources */ mlx4_ib_tunnels_update()
2050 ib_destroy_qp(sqp_ctx->qp[0].qp); mlx4_ib_free_sqp_ctx()
2051 sqp_ctx->qp[0].qp = NULL; mlx4_ib_free_sqp_ctx()
2054 ib_destroy_qp(sqp_ctx->qp[1].qp); mlx4_ib_free_sqp_ctx()
2055 sqp_ctx->qp[1].qp = NULL; mlx4_ib_free_sqp_ctx()
H A Dcq.c35 #include <linux/mlx4/qp.h>
579 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, use_tunnel_data() argument
584 ib_dma_sync_single_for_cpu(qp->ibqp.device, use_tunnel_data()
585 qp->sqp_proxy_rcv[tail].map, use_tunnel_data()
588 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); use_tunnel_data()
607 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, mlx4_ib_qp_sw_comp() argument
614 wq = is_send ? &qp->sq : &qp->rq; mlx4_ib_qp_sw_comp()
626 wc->qp = &qp->ibqp; mlx4_ib_qp_sw_comp()
634 struct mlx4_ib_qp *qp; mlx4_ib_poll_sw_comp() local
640 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { mlx4_ib_poll_sw_comp()
641 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); mlx4_ib_poll_sw_comp()
646 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { mlx4_ib_poll_sw_comp()
647 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); mlx4_ib_poll_sw_comp()
732 wc->qp = &(*cur_qp)->ibqp; mlx4_ib_poll_one()
734 if (wc->qp->qp_type == IB_QPT_XRC_TGT) { mlx4_ib_poll_one()
852 is_eth = (rdma_port_get_link_layer(wc->qp->device, mlx4_ib_poll_one()
H A Dmain.c54 #include <linux/mlx4/qp.h>
455 props->max_qp = dev->dev->quotas.qp; mlx4_ib_query_device()
1368 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, __mlx4_ib_default_rules_match() argument
1374 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); __mlx4_ib_default_rules_match()
1424 struct ib_qp *qp, __mlx4_ib_create_default_rules()
1460 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, __mlx4_ib_create_flow() argument
1468 struct mlx4_ib_dev *mdev = to_mdev(qp->device); __mlx4_ib_create_flow()
1502 ctrl->qpn = cpu_to_be32(qp->qp_num); __mlx4_ib_create_flow()
1507 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); __mlx4_ib_create_flow()
1510 mdev, qp, default_table + default_flow, __mlx4_ib_create_flow()
1519 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, __mlx4_ib_create_flow()
1555 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, mlx4_ib_tunnel_steer_add() argument
1560 struct mlx4_dev *dev = to_mdev(qp->device)->dev; mlx4_ib_tunnel_steer_add()
1573 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, mlx4_ib_tunnel_steer_add()
1574 flow_attr->port, qp->qp_num, mlx4_ib_tunnel_steer_add()
1580 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, mlx4_ib_create_flow() argument
1587 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; mlx4_ib_create_flow()
1622 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], mlx4_ib_create_flow()
1631 err = __mlx4_ib_create_flow(qp, flow_attr, mlx4_ib_create_flow()
1644 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, mlx4_ib_create_flow()
1651 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, mlx4_ib_create_flow()
1666 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mlx4_ib_create_flow()
1672 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mlx4_ib_create_flow()
1685 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); mlx4_ib_destroy_flow()
1767 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) find_gid_entry() argument
1773 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { find_gid_entry()
1938 struct mlx4_ib_qp *qp; mlx4_ib_update_qps() local
1951 qp = ibdev->qp1_proxy[port - 1]; mlx4_ib_update_qps()
1952 if (qp) { mlx4_ib_update_qps()
1957 mutex_lock(&qp->mutex); mlx4_ib_update_qps()
1958 old_smac = qp->pri.smac; mlx4_ib_update_qps()
1968 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, mlx4_ib_update_qps()
1974 if (qp->pri.smac_port) mlx4_ib_update_qps()
1976 qp->pri.smac = new_smac; mlx4_ib_update_qps()
1977 qp->pri.smac_port = port; mlx4_ib_update_qps()
1978 qp->pri.smac_index = new_smac_index; mlx4_ib_update_qps()
1984 if (qp) mlx4_ib_update_qps()
1985 mutex_unlock(&qp->mutex); mlx4_ib_update_qps()
2620 pr_err("failed to allocate memory for tunneling qp update\n"); do_slave_init()
2627 pr_err("failed to allocate memory for tunneling qp update work struct\n"); do_slave_init()
2667 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ mlx4_ib_handle_catas_error()
1422 __mlx4_ib_create_default_rules( struct mlx4_ib_dev *mdev, struct ib_qp *qp, const struct default_rules *pdefault_rules, struct _rule_hw *mlx4_spec) __mlx4_ib_create_default_rules() argument
H A Dmlx4_ib.h409 struct ib_qp *qp; member in struct:mlx4_ib_demux_pv_qp
435 struct mlx4_ib_demux_pv_qp qp[2]; member in struct:mlx4_ib_demux_pv_ctx
707 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
746 int mlx4_ib_destroy_qp(struct ib_qp *qp);
H A Dmr.c369 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, mlx4_ib_bind_mw() argument
384 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); mlx4_ib_bind_mw()
H A Dsrq.c34 #include <linux/mlx4/qp.h>
/linux-4.4.14/drivers/infiniband/hw/mlx5/
H A DMakefile3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
H A Dqp.c84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) get_wqe() argument
86 return mlx5_buf_offset(&qp->buf, offset); get_wqe()
89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) get_recv_wqe() argument
91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); get_recv_wqe()
94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) mlx5_get_send_wqe() argument
96 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); mlx5_get_send_wqe()
102 * @qp: QP to copy from.
116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, mlx5_ib_read_user_wqe() argument
119 struct ib_device *ibdev = qp->ibqp.device; mlx5_ib_read_user_wqe()
121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; mlx5_ib_read_user_wqe()
124 struct ib_umem *umem = qp->umem; mlx5_ib_read_user_wqe()
131 qp->ibqp.qp_type); mlx5_ib_read_user_wqe()
170 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) mlx5_ib_qp_event() argument
172 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; mlx5_ib_qp_event()
176 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; mlx5_ib_qp_event()
180 event.element.qp = ibqp; mlx5_ib_qp_event()
207 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); mlx5_ib_qp_event()
216 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_rq_size()
226 qp->rq.max_gs = 0; set_rq_size()
227 qp->rq.wqe_cnt = 0; set_rq_size()
228 qp->rq.wqe_shift = 0; set_rq_size()
231 qp->rq.wqe_cnt = ucmd->rq_wqe_count; set_rq_size()
232 qp->rq.wqe_shift = ucmd->rq_wqe_shift; set_rq_size()
233 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; set_rq_size()
234 qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size()
236 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; set_rq_size()
241 qp->rq.wqe_cnt = wq_size / wqe_size; set_rq_size()
249 qp->rq.wqe_shift = ilog2(wqe_size); set_rq_size()
250 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; set_rq_size()
251 qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size()
325 struct mlx5_ib_qp *qp) calc_sq_size()
344 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - calc_sq_size()
346 attr->cap.max_inline_data = qp->max_inline_data; calc_sq_size()
349 qp->signature_en = true; calc_sq_size()
352 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; calc_sq_size()
353 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { calc_sq_size()
355 qp->sq.wqe_cnt, calc_sq_size()
359 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); calc_sq_size()
360 qp->sq.max_gs = attr->cap.max_send_sge; calc_sq_size()
361 qp->sq.max_post = wq_size / wqe_size; calc_sq_size()
362 attr->cap.max_send_wr = qp->sq.max_post; calc_sq_size()
368 struct mlx5_ib_qp *qp, set_user_buf_size()
371 int desc_sz = 1 << qp->sq.wqe_shift; set_user_buf_size()
385 qp->sq.wqe_cnt = ucmd->sq_wqe_count; set_user_buf_size()
387 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { set_user_buf_size()
389 qp->sq.wqe_cnt, set_user_buf_size()
394 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_user_buf_size()
395 (qp->sq.wqe_cnt << 6); set_user_buf_size()
594 struct mlx5_ib_qp *qp, struct ib_udata *udata, create_user_qp()
637 qp->rq.offset = 0; create_user_qp()
638 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); create_user_qp()
639 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_user_qp()
641 err = set_user_buf_size(dev, qp, &ucmd); create_user_qp()
645 if (ucmd.buf_addr && qp->buf_size) { create_user_qp()
646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_user_qp()
647 qp->buf_size, 0, 0); create_user_qp()
648 if (IS_ERR(qp->umem)) { create_user_qp()
650 err = PTR_ERR(qp->umem); create_user_qp()
654 qp->umem = NULL; create_user_qp()
657 if (qp->umem) { create_user_qp()
658 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, create_user_qp()
666 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); create_user_qp()
675 if (qp->umem) create_user_qp()
676 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); create_user_qp()
683 qp->uuarn = uuarn; create_user_qp()
685 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); create_user_qp()
696 qp->create_type = MLX5_QP_USER; create_user_qp()
701 mlx5_ib_db_unmap_user(context, &qp->db); create_user_qp()
707 if (qp->umem) create_user_qp()
708 ib_umem_release(qp->umem); create_user_qp()
715 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) destroy_qp_user() argument
720 mlx5_ib_db_unmap_user(context, &qp->db); destroy_qp_user()
721 if (qp->umem) destroy_qp_user()
722 ib_umem_release(qp->umem); destroy_qp_user()
723 free_uuar(&context->uuari, qp->uuarn); destroy_qp_user()
728 struct mlx5_ib_qp *qp, create_kernel_qp()
750 qp->bf = &uuari->bfs[uuarn]; create_kernel_qp()
751 uar_index = qp->bf->uar->index; create_kernel_qp()
753 err = calc_sq_size(dev, init_attr, qp); create_kernel_qp()
759 qp->rq.offset = 0; create_kernel_qp()
760 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_kernel_qp()
761 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); create_kernel_qp()
763 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf); create_kernel_qp()
769 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); create_kernel_qp()
770 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; create_kernel_qp()
778 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); create_kernel_qp()
783 mlx5_fill_page_array(&qp->buf, (*in)->pas); create_kernel_qp()
785 err = mlx5_db_alloc(dev->mdev, &qp->db); create_kernel_qp()
791 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); create_kernel_qp()
792 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); create_kernel_qp()
793 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); create_kernel_qp()
794 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); create_kernel_qp()
795 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); create_kernel_qp()
797 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || create_kernel_qp()
798 !qp->sq.w_list || !qp->sq.wqe_head) { create_kernel_qp()
802 qp->create_type = MLX5_QP_KERNEL; create_kernel_qp()
807 mlx5_db_free(dev->mdev, &qp->db); create_kernel_qp()
808 kfree(qp->sq.wqe_head); create_kernel_qp()
809 kfree(qp->sq.w_list); create_kernel_qp()
810 kfree(qp->sq.wrid); create_kernel_qp()
811 kfree(qp->sq.wr_data); create_kernel_qp()
812 kfree(qp->rq.wrid); create_kernel_qp()
818 mlx5_buf_free(dev->mdev, &qp->buf); create_kernel_qp()
825 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) destroy_qp_kernel() argument
827 mlx5_db_free(dev->mdev, &qp->db); destroy_qp_kernel()
828 kfree(qp->sq.wqe_head); destroy_qp_kernel()
829 kfree(qp->sq.w_list); destroy_qp_kernel()
830 kfree(qp->sq.wrid); destroy_qp_kernel()
831 kfree(qp->sq.wr_data); destroy_qp_kernel()
832 kfree(qp->rq.wrid); destroy_qp_kernel()
833 mlx5_buf_free(dev->mdev, &qp->buf); destroy_qp_kernel()
834 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); destroy_qp_kernel()
837 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) get_rx_type() argument
842 else if (!qp->has_rq) get_rx_type()
858 struct ib_udata *udata, struct mlx5_ib_qp *qp) create_qp_common()
868 mlx5_ib_odp_create_qp(qp); create_qp_common()
870 mutex_init(&qp->mutex); create_qp_common()
871 spin_lock_init(&qp->sq.lock); create_qp_common()
872 spin_lock_init(&qp->rq.lock); create_qp_common()
879 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; create_qp_common()
884 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; create_qp_common()
892 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); create_qp_common()
893 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); create_qp_common()
895 qp->wq_sig = !!wq_signature; create_qp_common()
898 qp->has_rq = qp_has_rq(init_attr); create_qp_common()
899 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, create_qp_common()
900 qp, (pd && pd->uobject) ? &ucmd : NULL); create_qp_common()
911 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || create_qp_common()
912 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { create_qp_common()
921 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); create_qp_common()
925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); create_qp_common()
937 qp->create_type = MLX5_QP_EMPTY; create_qp_common()
941 qp->port = init_attr->port_num; create_qp_common()
951 if (qp->wq_sig) create_qp_common()
954 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) create_qp_common()
957 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { create_qp_common()
977 if (qp->rq.wqe_cnt) { create_qp_common()
978 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); create_qp_common()
979 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; create_qp_common()
982 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); create_qp_common()
984 if (qp->sq.wqe_cnt) create_qp_common()
985 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); create_qp_common()
1019 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); create_qp_common()
1021 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); create_qp_common()
1023 mlx5_ib_dbg(dev, "create qp failed\n"); create_qp_common()
1032 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common()
1034 qp->mqp.event = mlx5_ib_qp_event; create_qp_common()
1039 if (qp->create_type == MLX5_QP_USER) create_qp_common()
1040 destroy_qp_user(pd, qp); create_qp_common()
1041 else if (qp->create_type == MLX5_QP_KERNEL) create_qp_common()
1042 destroy_qp_kernel(dev, qp); create_qp_common()
1106 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) get_pd() argument
1108 return to_mpd(qp->ibqp.pd); get_pd()
1111 static void get_cqs(struct mlx5_ib_qp *qp, get_cqs() argument
1114 switch (qp->ibqp.qp_type) { get_cqs()
1121 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs()
1132 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs()
1133 *recv_cq = to_mcq(qp->ibqp.recv_cq); get_cqs()
1145 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) destroy_qp_common() argument
1155 if (qp->state != IB_QPS_RESET) { destroy_qp_common()
1156 mlx5_ib_qp_disable_pagefaults(qp); destroy_qp_common()
1157 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), destroy_qp_common()
1158 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) destroy_qp_common()
1160 qp->mqp.qpn); destroy_qp_common()
1163 get_cqs(qp, &send_cq, &recv_cq); destroy_qp_common()
1165 if (qp->create_type == MLX5_QP_KERNEL) { destroy_qp_common()
1167 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common()
1168 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); destroy_qp_common()
1170 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common()
1174 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); destroy_qp_common()
1176 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); destroy_qp_common()
1180 if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_common()
1181 destroy_qp_kernel(dev, qp); destroy_qp_common()
1182 else if (qp->create_type == MLX5_QP_USER) destroy_qp_common()
1183 destroy_qp_user(&get_pd(qp)->ibpd, qp); destroy_qp_common()
1222 struct mlx5_ib_qp *qp; mlx5_ib_create_qp() local
1259 qp = kzalloc(sizeof(*qp), GFP_KERNEL); mlx5_ib_create_qp()
1260 if (!qp) mlx5_ib_create_qp()
1263 err = create_qp_common(dev, pd, init_attr, udata, qp); mlx5_ib_create_qp()
1266 kfree(qp); mlx5_ib_create_qp()
1271 qp->ibqp.qp_num = 0; mlx5_ib_create_qp()
1273 qp->ibqp.qp_num = 1; mlx5_ib_create_qp()
1275 qp->ibqp.qp_num = qp->mqp.qpn; mlx5_ib_create_qp()
1278 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, mlx5_ib_create_qp()
1281 qp->xrcdn = xrcdn; mlx5_ib_create_qp()
1290 mlx5_ib_dbg(dev, "unsupported qp type %d\n", mlx5_ib_create_qp()
1296 return &qp->ibqp; mlx5_ib_create_qp()
1299 int mlx5_ib_destroy_qp(struct ib_qp *qp) mlx5_ib_destroy_qp() argument
1301 struct mlx5_ib_dev *dev = to_mdev(qp->device); mlx5_ib_destroy_qp()
1302 struct mlx5_ib_qp *mqp = to_mqp(qp); mlx5_ib_destroy_qp()
1311 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, to_mlx5_access_flags() argument
1321 dest_rd_atomic = qp->resp_depth; to_mlx5_access_flags()
1326 access_flags = qp->atomic_rd_en; to_mlx5_access_flags()
1557 struct mlx5_ib_qp *qp = to_mqp(ibqp); __mlx5_ib_modify_qp() local
1620 context->pri_path.port = qp->port; __mlx5_ib_modify_qp()
1627 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, __mlx5_ib_modify_qp()
1643 pd = get_pd(qp); __mlx5_ib_modify_qp()
1644 get_cqs(qp, &send_cq, &recv_cq); __mlx5_ib_modify_qp()
1673 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); __mlx5_ib_modify_qp()
1684 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) __mlx5_ib_modify_qp()
1685 context->db_rec_addr = cpu_to_be64(qp->db.dma); __mlx5_ib_modify_qp()
1710 mlx5_ib_qp_disable_pagefaults(qp); __mlx5_ib_modify_qp()
1717 &qp->mqp); __mlx5_ib_modify_qp()
1722 mlx5_ib_qp_enable_pagefaults(qp); __mlx5_ib_modify_qp()
1724 qp->state = new_state; __mlx5_ib_modify_qp()
1727 qp->atomic_rd_en = attr->qp_access_flags; __mlx5_ib_modify_qp()
1729 qp->resp_depth = attr->max_dest_rd_atomic; __mlx5_ib_modify_qp()
1731 qp->port = attr->port_num; __mlx5_ib_modify_qp()
1733 qp->alt_port = attr->alt_port_num; __mlx5_ib_modify_qp()
1740 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx5_ib_modify_qp()
1743 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx5_ib_modify_qp()
1745 qp->rq.head = 0; __mlx5_ib_modify_qp()
1746 qp->rq.tail = 0; __mlx5_ib_modify_qp()
1747 qp->sq.head = 0; __mlx5_ib_modify_qp()
1748 qp->sq.tail = 0; __mlx5_ib_modify_qp()
1749 qp->sq.cur_post = 0; __mlx5_ib_modify_qp()
1750 qp->sq.last_poll = 0; __mlx5_ib_modify_qp()
1751 qp->db.db[MLX5_RCV_DBR] = 0; __mlx5_ib_modify_qp()
1752 qp->db.db[MLX5_SND_DBR] = 0; __mlx5_ib_modify_qp()
1764 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_modify_qp() local
1769 mutex_lock(&qp->mutex); mlx5_ib_modify_qp()
1771 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; mlx5_ib_modify_qp()
1785 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx5_ib_modify_qp()
1809 mutex_unlock(&qp->mutex); mlx5_ib_modify_qp()
2078 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, set_data_inl_seg() argument
2082 void *qend = qp->sq.qend; set_data_inl_seg()
2096 if (unlikely(inl > qp->max_inline_data)) set_data_inl_seg()
2104 wqe = mlx5_get_send_wqe(qp, 0); set_data_inl_seg()
2225 struct mlx5_ib_qp *qp, void **seg, int *size) set_sig_data_segment()
2309 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment()
2310 *seg = mlx5_get_send_wqe(qp, 0); set_sig_data_segment()
2319 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment()
2320 *seg = mlx5_get_send_wqe(qp, 0); set_sig_data_segment()
2357 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, set_sig_umr_wr() argument
2362 u32 pdn = get_pd(qp)->pdn; set_sig_umr_wr()
2368 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || set_sig_umr_wr()
2390 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr()
2391 *seg = mlx5_get_send_wqe(qp, 0); set_sig_umr_wr()
2396 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr()
2397 *seg = mlx5_get_send_wqe(qp, 0); set_sig_umr_wr()
2399 ret = set_sig_data_segment(wr, qp, seg, size); set_sig_umr_wr()
2433 static int set_reg_wr(struct mlx5_ib_qp *qp, set_reg_wr() argument
2438 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); set_reg_wr()
2441 mlx5_ib_warn(to_mdev(qp->ibqp.device), set_reg_wr()
2449 if (unlikely((*seg == qp->sq.qend))) set_reg_wr()
2450 *seg = mlx5_get_send_wqe(qp, 0); set_reg_wr()
2455 if (unlikely((*seg == qp->sq.qend))) set_reg_wr()
2456 *seg = mlx5_get_send_wqe(qp, 0); set_reg_wr()
2465 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) set_linv_wr() argument
2470 if (unlikely((*seg == qp->sq.qend))) set_linv_wr()
2471 *seg = mlx5_get_send_wqe(qp, 0); set_linv_wr()
2475 if (unlikely((*seg == qp->sq.qend))) set_linv_wr()
2476 *seg = mlx5_get_send_wqe(qp, 0); set_linv_wr()
2479 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) dump_wqe() argument
2485 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); dump_wqe()
2488 void *buf = mlx5_get_send_wqe(qp, tidx); dump_wqe()
2489 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); dump_wqe()
2500 unsigned bytecnt, struct mlx5_ib_qp *qp) mlx5_bf_copy()
2512 if (unlikely(src == qp->sq.qend)) mlx5_bf_copy()
2513 src = mlx5_get_send_wqe(qp, 0); mlx5_bf_copy()
2534 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, begin_wqe() argument
2541 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { begin_wqe()
2546 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); begin_wqe()
2547 *seg = mlx5_get_send_wqe(qp, *idx); begin_wqe()
2551 (*ctrl)->fm_ce_se = qp->sq_signal_bits | begin_wqe()
2563 static void finish_wqe(struct mlx5_ib_qp *qp, finish_wqe() argument
2571 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | finish_wqe()
2573 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); finish_wqe()
2575 qp->fm_cache = next_fence; finish_wqe()
2576 if (unlikely(qp->wq_sig)) finish_wqe()
2579 qp->sq.wrid[idx] = wr_id; finish_wqe()
2580 qp->sq.w_list[idx].opcode = mlx5_opcode; finish_wqe()
2581 qp->sq.wqe_head[idx] = qp->sq.head + nreq; finish_wqe()
2582 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); finish_wqe()
2583 qp->sq.w_list[idx].next = qp->sq.cur_post; finish_wqe()
2592 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_post_send() local
2596 struct mlx5_bf *bf = qp->bf; mlx5_ib_post_send()
2598 void *qend = qp->sq.qend; mlx5_ib_post_send()
2610 spin_lock_irqsave(&qp->sq.lock, flags); mlx5_ib_post_send()
2620 fence = qp->fm_cache; mlx5_ib_post_send()
2622 if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_post_send()
2629 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); mlx5_ib_post_send()
2664 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; mlx5_ib_post_send()
2666 set_linv_wr(qp, &seg, &size); mlx5_ib_post_send()
2672 qp->sq.wr_data[idx] = IB_WR_REG_MR; mlx5_ib_post_send()
2674 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); mlx5_ib_post_send()
2683 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; mlx5_ib_post_send()
2687 err = set_sig_umr_wr(wr, qp, &seg, &size); mlx5_ib_post_send()
2694 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send()
2703 err = begin_wqe(qp, &seg, &ctrl, wr, mlx5_ib_post_send()
2721 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send()
2724 err = begin_wqe(qp, &seg, &ctrl, wr, mlx5_ib_post_send()
2743 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send()
2776 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send()
2785 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; mlx5_ib_post_send()
2791 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send()
2796 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send()
2806 err = set_data_inl_seg(qp, wr, seg, &sz); mlx5_ib_post_send()
2818 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send()
2829 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, mlx5_ib_post_send()
2834 dump_wqe(qp, idx, size); mlx5_ib_post_send()
2839 qp->sq.head += nreq; mlx5_ib_post_send()
2846 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); mlx5_ib_post_send()
2859 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); mlx5_ib_post_send()
2876 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx5_ib_post_send()
2889 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_post_recv() local
2898 spin_lock_irqsave(&qp->rq.lock, flags); mlx5_ib_post_recv()
2900 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); mlx5_ib_post_recv()
2903 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mlx5_ib_post_recv()
2909 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mlx5_ib_post_recv()
2915 scat = get_recv_wqe(qp, ind); mlx5_ib_post_recv()
2916 if (qp->wq_sig) mlx5_ib_post_recv()
2922 if (i < qp->rq.max_gs) { mlx5_ib_post_recv()
2928 if (qp->wq_sig) { mlx5_ib_post_recv()
2930 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); mlx5_ib_post_recv()
2933 qp->rq.wrid[ind] = wr->wr_id; mlx5_ib_post_recv()
2935 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); mlx5_ib_post_recv()
2940 qp->rq.head += nreq; mlx5_ib_post_recv()
2947 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); mlx5_ib_post_recv()
2950 spin_unlock_irqrestore(&qp->rq.lock, flags); mlx5_ib_post_recv()
3028 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_query_qp() local
3042 mutex_lock(&qp->mutex); mlx5_ib_query_qp()
3049 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); mlx5_ib_query_qp()
3055 qp->state = to_ib_qp_state(mlx5_state); mlx5_ib_query_qp()
3056 qp_attr->qp_state = qp->state; mlx5_ib_query_qp()
3067 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { mlx5_ib_query_qp()
3077 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mlx5_ib_query_qp()
3091 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; mlx5_ib_query_qp()
3092 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mlx5_ib_query_qp()
3095 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx5_ib_query_qp()
3096 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx5_ib_query_qp()
3110 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) mlx5_ib_query_qp()
3113 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? mlx5_ib_query_qp()
3120 mutex_unlock(&qp->mutex); mlx5_ib_query_qp()
215 set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_rq_size() argument
324 calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) calc_sq_size() argument
367 set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_user_buf_size() argument
593 create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_create_qp_mbox_in **in, struct mlx5_ib_create_qp_resp *resp, int *inlen) create_user_qp() argument
726 create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, struct mlx5_create_qp_mbox_in **in, int *inlen) create_kernel_qp() argument
856 create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx5_ib_qp *qp) create_qp_common() argument
2224 set_sig_data_segment(struct ib_sig_handover_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) set_sig_data_segment() argument
2499 mlx5_bf_copy(u64 __iomem *dst, u64 *src, unsigned bytecnt, struct mlx5_ib_qp *qp) mlx5_bf_copy() argument
H A Dodp.c154 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, mlx5_ib_page_fault_resume() argument
157 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_page_fault_resume()
158 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, mlx5_ib_page_fault_resume()
163 qp->mqp.qpn); mlx5_ib_page_fault_resume()
178 static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, pagefault_single_data_segment() argument
183 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); pagefault_single_data_segment()
212 if (mr->ibmr.pd != qp->ibqp.pd) { pagefault_single_data_segment()
295 * @qp the QP on which the fault occurred.
310 static int pagefault_data_segments(struct mlx5_ib_qp *qp, pagefault_data_segments() argument
323 if (receive_queue && qp->ibqp.srq) pagefault_data_segments()
369 ret = pagefault_single_data_segment(qp, pfault, key, io_virt, pagefault_data_segments()
384 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, mlx5_ib_mr_initiator_pfault_handler()
387 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_initiator_pfault_handler()
404 wqe_index, qp->mqp.qpn); mlx5_ib_mr_initiator_pfault_handler()
414 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler()
421 if (qp->mqp.qpn != ctrl_qpn) { mlx5_ib_mr_initiator_pfault_handler()
423 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler()
434 switch (qp->ibqp.qp_type) { mlx5_ib_mr_initiator_pfault_handler()
477 qp->ibqp.qp_type, opcode); mlx5_ib_mr_initiator_pfault_handler()
489 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, mlx5_ib_mr_responder_pfault_handler()
492 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_responder_pfault_handler()
493 struct mlx5_ib_wq *wq = &qp->rq; mlx5_ib_mr_responder_pfault_handler()
496 if (qp->ibqp.srq) { mlx5_ib_mr_responder_pfault_handler()
501 if (qp->wq_sig) { mlx5_ib_mr_responder_pfault_handler()
511 switch (qp->ibqp.qp_type) { mlx5_ib_mr_responder_pfault_handler()
520 qp->ibqp.qp_type); mlx5_ib_mr_responder_pfault_handler()
529 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_wqe_pfault_handler() argument
532 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_wqe_pfault_handler()
548 ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, mlx5_ib_mr_wqe_pfault_handler()
552 -ret, wqe_index, qp->mqp.qpn); mlx5_ib_mr_wqe_pfault_handler()
559 ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler()
562 ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler()
575 ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, mlx5_ib_mr_wqe_pfault_handler()
587 mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); mlx5_ib_mr_wqe_pfault_handler()
589 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); mlx5_ib_mr_wqe_pfault_handler()
600 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_rdma_pfault_handler() argument
638 ret = pagefault_single_data_segment(qp, pfault, rkey, address, length, mlx5_ib_mr_rdma_pfault_handler()
644 mlx5_ib_page_fault_resume(qp, pfault, 1); mlx5_ib_mr_rdma_pfault_handler()
648 mlx5_ib_page_fault_resume(qp, pfault, 0); mlx5_ib_mr_rdma_pfault_handler()
656 ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey, mlx5_ib_mr_rdma_pfault_handler()
663 qp->ibqp.qp_num, address, prefetch_len); mlx5_ib_mr_rdma_pfault_handler()
668 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_pfault_handler() argument
675 mlx5_ib_mr_wqe_pfault_handler(qp, pfault); mlx5_ib_mr_pfault_handler()
678 mlx5_ib_mr_rdma_pfault_handler(qp, pfault); mlx5_ib_mr_pfault_handler()
683 mlx5_ib_page_fault_resume(qp, pfault, 1); mlx5_ib_mr_pfault_handler()
695 struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp, mlx5_ib_qp_pfault_action() local
697 mlx5_ib_mr_pfault_handler(qp, pfault); mlx5_ib_qp_pfault_action()
700 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) mlx5_ib_qp_disable_pagefaults() argument
704 spin_lock_irqsave(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_disable_pagefaults()
705 qp->disable_page_faults = 1; mlx5_ib_qp_disable_pagefaults()
706 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_disable_pagefaults()
716 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) mlx5_ib_qp_enable_pagefaults() argument
720 spin_lock_irqsave(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_enable_pagefaults()
721 qp->disable_page_faults = 0; mlx5_ib_qp_enable_pagefaults()
722 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_enable_pagefaults()
725 static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp, mlx5_ib_pfault_handler() argument
735 struct mlx5_ib_qp *mibqp = to_mibqp(qp); mlx5_ib_pfault_handler()
749 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) mlx5_ib_odp_create_qp() argument
753 qp->disable_page_faults = 1; mlx5_ib_odp_create_qp()
754 spin_lock_init(&qp->disable_page_faults_lock); mlx5_ib_odp_create_qp()
756 qp->mqp.pfault_handler = mlx5_ib_pfault_handler; mlx5_ib_odp_create_qp()
759 INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); mlx5_ib_odp_create_qp()
383 mlx5_ib_mr_initiator_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_initiator_pfault_handler() argument
488 mlx5_ib_mr_responder_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_responder_pfault_handler() argument
H A Dcq.c172 struct mlx5_ib_qp *qp) handle_responder()
174 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); handle_responder()
180 if (qp->ibqp.srq || qp->ibqp.xrcd) { handle_responder()
183 if (qp->ibqp.xrcd) { handle_responder()
188 srq = to_msrq(qp->ibqp.srq); handle_responder()
198 wq = &qp->rq; handle_responder()
231 if (unlikely(is_qp1(qp->ibqp.qp_type))) { handle_responder()
234 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, handle_responder()
312 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) is_atomic_response() argument
319 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) mlx5_get_atomic_laddr() argument
324 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + mlx5_get_atomic_laddr()
331 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, handle_atomic() argument
338 if (!is_atomic_response(qp, idx)) handle_atomic()
342 addr = mlx5_get_atomic_laddr(qp, idx); handle_atomic()
356 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, handle_atomics() argument
362 idx = tail & (qp->sq.wqe_cnt - 1); handle_atomics()
363 handle_atomic(qp, cqe64, idx); handle_atomics()
367 tail = qp->sq.w_list[idx].next; handle_atomics()
369 tail = qp->sq.w_list[idx].next; handle_atomics()
370 qp->sq.last_poll = tail; handle_atomics()
471 wc->qp = &(*cur_qp)->ibqp; mlx5_poll_one()
171 handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_qp *qp) handle_responder() argument
H A Dmlx5_ib.h42 #include <linux/mlx5/qp.h>
192 /* serialize qp state modifications
355 struct ib_qp *qp; member in struct:umr_common
531 int mlx5_ib_destroy_qp(struct ib_qp *qp);
536 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
537 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
612 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
614 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
619 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
620 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
630 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} mlx5_ib_odp_init_one() argument
635 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} mlx5_ib_qp_enable_pagefaults() argument
636 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} mlx5_ib_qp_enable_pagefaults() argument
H A Dmain.c1066 mlx5_ib_destroy_qp(dev->umrc.qp); destroy_umrc_res()
1081 struct ib_qp *qp; create_umr_res() local
1116 qp = mlx5_ib_create_qp(pd, init_attr, NULL); create_umr_res()
1117 if (IS_ERR(qp)) { create_umr_res()
1119 ret = PTR_ERR(qp); create_umr_res()
1122 qp->device = &dev->ib_dev; create_umr_res()
1123 qp->real_qp = qp; create_umr_res()
1124 qp->uobject = NULL; create_umr_res()
1125 qp->qp_type = MLX5_IB_QPT_REG_UMR; create_umr_res()
1129 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | create_umr_res()
1140 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); create_umr_res()
1148 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); create_umr_res()
1154 dev->umrc.qp = qp; create_umr_res()
1171 mlx5_ib_destroy_qp(qp); create_umr_res()
/linux-4.4.14/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) is_sqp() argument
197 return qp->qpn >= dev->qp_table.sqp_start && is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; is_sqp()
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) is_qp0() argument
203 return qp->qpn >= dev->qp_table.sqp_start && is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; is_qp0()
207 static void *get_recv_wqe(struct mthca_qp *qp, int n) get_recv_wqe() argument
209 if (qp->is_direct) get_recv_wqe()
210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); get_recv_wqe()
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + get_recv_wqe()
213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); get_recv_wqe()
216 static void *get_send_wqe(struct mthca_qp *qp, int n) get_send_wqe() argument
218 if (qp->is_direct) get_send_wqe()
219 return qp->queue.direct.buf + qp->send_wqe_offset + get_send_wqe()
220 (n << qp->sq.wqe_shift); get_send_wqe()
222 return qp->queue.page_list[(qp->send_wqe_offset + get_send_wqe()
223 (n << qp->sq.wqe_shift)) >> get_send_wqe()
225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & get_send_wqe()
240 struct mthca_qp *qp; mthca_qp_event() local
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); mthca_qp_event()
245 if (qp) mthca_qp_event()
246 ++qp->refcount; mthca_qp_event()
249 if (!qp) { mthca_qp_event()
256 qp->port = qp->alt_port; mthca_qp_event()
260 event.element.qp = &qp->ibqp; mthca_qp_event()
261 if (qp->ibqp.event_handler) mthca_qp_event()
262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); mthca_qp_event()
265 if (!--qp->refcount) mthca_qp_event()
266 wake_up(&qp->wait); mthca_qp_event()
327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, get_hw_access_flags() argument
337 dest_rd_atomic = qp->resp_depth; get_hw_access_flags()
342 access_flags = qp->atomic_rd_en; get_hw_access_flags()
428 struct mthca_qp *qp = to_mqp(ibqp); mthca_query_qp() local
435 mutex_lock(&qp->mutex); mthca_query_qp()
437 if (qp->state == IB_QPS_RESET) { mthca_query_qp()
448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); mthca_query_qp()
458 qp->state = to_ib_qp_state(mthca_state); mthca_query_qp()
459 qp_attr->qp_state = qp->state; mthca_query_qp()
470 if (qp->transport == RC || qp->transport == UC) { mthca_query_qp()
482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mthca_query_qp()
498 qp_attr->cap.max_send_wr = qp->sq.max; mthca_query_qp()
499 qp_attr->cap.max_recv_wr = qp->rq.max; mthca_query_qp()
500 qp_attr->cap.max_send_sge = qp->sq.max_gs; mthca_query_qp()
501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mthca_query_qp()
502 qp_attr->cap.max_inline_data = qp->max_inline_data; mthca_query_qp()
505 qp_init_attr->sq_sig_type = qp->sq_policy; mthca_query_qp()
511 mutex_unlock(&qp->mutex); mthca_query_qp()
548 struct mthca_qp *qp = to_mqp(ibqp); __mthca_modify_qp() local
565 (to_mthca_st(qp->transport) << 16)); __mthca_modify_qp()
586 if (qp->transport == MLX || qp->transport == UD) __mthca_modify_qp()
598 if (qp->rq.max) __mthca_modify_qp()
599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; __mthca_modify_qp()
600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; __mthca_modify_qp()
602 if (qp->sq.max) __mthca_modify_qp()
603 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; __mthca_modify_qp()
604 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; __mthca_modify_qp()
609 if (qp->ibqp.uobject) __mthca_modify_qp()
611 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); __mthca_modify_qp()
614 qp_context->local_qpn = cpu_to_be32(qp->qpn); __mthca_modify_qp()
619 if (qp->transport == MLX) __mthca_modify_qp()
621 cpu_to_be32(qp->port << 24); __mthca_modify_qp()
645 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) __mthca_modify_qp()
695 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); __mthca_modify_qp()
699 if (qp->sq_policy == IB_SIGNAL_ALL_WR) __mthca_modify_qp()
722 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); __mthca_modify_qp()
723 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); __mthca_modify_qp()
735 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); __mthca_modify_qp()
755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << __mthca_modify_qp()
761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); __mthca_modify_qp()
777 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, __mthca_modify_qp()
785 qp->state = new_state; __mthca_modify_qp()
787 qp->atomic_rd_en = attr->qp_access_flags; __mthca_modify_qp()
789 qp->resp_depth = attr->max_dest_rd_atomic; __mthca_modify_qp()
791 qp->port = attr->port_num; __mthca_modify_qp()
793 qp->alt_port = attr->alt_port_num; __mthca_modify_qp()
795 if (is_sqp(dev, qp)) __mthca_modify_qp()
796 store_attrs(to_msqp(qp), attr, attr_mask); __mthca_modify_qp()
802 if (is_qp0(dev, qp)) { __mthca_modify_qp()
805 init_port(dev, qp->port); __mthca_modify_qp()
811 mthca_CLOSE_IB(dev, qp->port); __mthca_modify_qp()
818 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { __mthca_modify_qp()
819 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, __mthca_modify_qp()
820 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); __mthca_modify_qp()
821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) __mthca_modify_qp()
822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); __mthca_modify_qp()
824 mthca_wq_reset(&qp->sq); __mthca_modify_qp()
825 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); __mthca_modify_qp()
827 mthca_wq_reset(&qp->rq); __mthca_modify_qp()
828 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); __mthca_modify_qp()
831 *qp->sq.db = 0; __mthca_modify_qp()
832 *qp->rq.db = 0; __mthca_modify_qp()
846 struct mthca_qp *qp = to_mqp(ibqp); mthca_modify_qp() local
850 mutex_lock(&qp->mutex); mthca_modify_qp()
854 spin_lock_irq(&qp->sq.lock); mthca_modify_qp()
855 spin_lock(&qp->rq.lock); mthca_modify_qp()
856 cur_state = qp->state; mthca_modify_qp()
857 spin_unlock(&qp->rq.lock); mthca_modify_qp()
858 spin_unlock_irq(&qp->sq.lock); mthca_modify_qp()
867 qp->transport, cur_state, new_state, mthca_modify_qp()
907 mutex_unlock(&qp->mutex); mthca_modify_qp()
911 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) mthca_max_data_size() argument
919 switch (qp->transport) { mthca_max_data_size()
947 struct mthca_qp *qp) mthca_adjust_qp_caps()
949 int max_data_size = mthca_max_data_size(dev, qp, mthca_adjust_qp_caps()
951 1 << qp->sq.wqe_shift)); mthca_adjust_qp_caps()
953 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); mthca_adjust_qp_caps()
955 qp->sq.max_gs = min_t(int, dev->limits.max_sg, mthca_adjust_qp_caps()
957 qp->rq.max_gs = min_t(int, dev->limits.max_sg, mthca_adjust_qp_caps()
958 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - mthca_adjust_qp_caps()
964 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
972 struct mthca_qp *qp) mthca_alloc_wqe_buf()
978 qp->rq.max_gs * sizeof (struct mthca_data_seg); mthca_alloc_wqe_buf()
983 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; mthca_alloc_wqe_buf()
984 qp->rq.wqe_shift++) mthca_alloc_wqe_buf()
987 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); mthca_alloc_wqe_buf()
988 switch (qp->transport) { mthca_alloc_wqe_buf()
1027 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; mthca_alloc_wqe_buf()
1028 qp->sq.wqe_shift++) mthca_alloc_wqe_buf()
1031 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, mthca_alloc_wqe_buf()
1032 1 << qp->sq.wqe_shift); mthca_alloc_wqe_buf()
1042 size = PAGE_ALIGN(qp->send_wqe_offset + mthca_alloc_wqe_buf()
1043 (qp->sq.max << qp->sq.wqe_shift)); mthca_alloc_wqe_buf()
1045 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), mthca_alloc_wqe_buf()
1047 if (!qp->wrid) mthca_alloc_wqe_buf()
1051 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); mthca_alloc_wqe_buf()
1058 kfree(qp->wrid); mthca_alloc_wqe_buf()
1063 struct mthca_qp *qp) mthca_free_wqe_buf()
1065 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + mthca_free_wqe_buf()
1066 (qp->sq.max << qp->sq.wqe_shift)), mthca_free_wqe_buf()
1067 &qp->queue, qp->is_direct, &qp->mr); mthca_free_wqe_buf()
1068 kfree(qp->wrid); mthca_free_wqe_buf()
1072 struct mthca_qp *qp) mthca_map_memfree()
1077 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); mthca_map_memfree()
1081 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); mthca_map_memfree()
1086 qp->qpn << dev->qp_table.rdb_shift); mthca_map_memfree()
1095 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_map_memfree()
1098 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); mthca_map_memfree()
1104 struct mthca_qp *qp) mthca_unmap_memfree()
1107 qp->qpn << dev->qp_table.rdb_shift); mthca_unmap_memfree()
1108 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_unmap_memfree()
1109 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); mthca_unmap_memfree()
1113 struct mthca_qp *qp) mthca_alloc_memfree()
1116 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, mthca_alloc_memfree()
1117 qp->qpn, &qp->rq.db); mthca_alloc_memfree()
1118 if (qp->rq.db_index < 0) mthca_alloc_memfree()
1121 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, mthca_alloc_memfree()
1122 qp->qpn, &qp->sq.db); mthca_alloc_memfree()
1123 if (qp->sq.db_index < 0) { mthca_alloc_memfree()
1124 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); mthca_alloc_memfree()
1133 struct mthca_qp *qp) mthca_free_memfree()
1136 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_memfree()
1137 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); mthca_free_memfree()
1146 struct mthca_qp *qp) mthca_alloc_qp_common()
1152 qp->refcount = 1; mthca_alloc_qp_common()
1153 init_waitqueue_head(&qp->wait); mthca_alloc_qp_common()
1154 mutex_init(&qp->mutex); mthca_alloc_qp_common()
1155 qp->state = IB_QPS_RESET; mthca_alloc_qp_common()
1156 qp->atomic_rd_en = 0; mthca_alloc_qp_common()
1157 qp->resp_depth = 0; mthca_alloc_qp_common()
1158 qp->sq_policy = send_policy; mthca_alloc_qp_common()
1159 mthca_wq_reset(&qp->sq); mthca_alloc_qp_common()
1160 mthca_wq_reset(&qp->rq); mthca_alloc_qp_common()
1162 spin_lock_init(&qp->sq.lock); mthca_alloc_qp_common()
1163 spin_lock_init(&qp->rq.lock); mthca_alloc_qp_common()
1165 ret = mthca_map_memfree(dev, qp); mthca_alloc_qp_common()
1169 ret = mthca_alloc_wqe_buf(dev, pd, qp); mthca_alloc_qp_common()
1171 mthca_unmap_memfree(dev, qp); mthca_alloc_qp_common()
1175 mthca_adjust_qp_caps(dev, pd, qp); mthca_alloc_qp_common()
1185 ret = mthca_alloc_memfree(dev, qp); mthca_alloc_qp_common()
1187 mthca_free_wqe_buf(dev, qp); mthca_alloc_qp_common()
1188 mthca_unmap_memfree(dev, qp); mthca_alloc_qp_common()
1195 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; mthca_alloc_qp_common()
1197 for (i = 0; i < qp->rq.max; ++i) { mthca_alloc_qp_common()
1198 next = get_recv_wqe(qp, i); mthca_alloc_qp_common()
1199 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << mthca_alloc_qp_common()
1200 qp->rq.wqe_shift); mthca_alloc_qp_common()
1204 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); mthca_alloc_qp_common()
1209 for (i = 0; i < qp->sq.max; ++i) { mthca_alloc_qp_common()
1210 next = get_send_wqe(qp, i); mthca_alloc_qp_common()
1211 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << mthca_alloc_qp_common()
1212 qp->sq.wqe_shift) + mthca_alloc_qp_common()
1213 qp->send_wqe_offset); mthca_alloc_qp_common()
1216 for (i = 0; i < qp->rq.max; ++i) { mthca_alloc_qp_common()
1217 next = get_recv_wqe(qp, i); mthca_alloc_qp_common()
1218 next->nda_op = htonl((((i + 1) % qp->rq.max) << mthca_alloc_qp_common()
1219 qp->rq.wqe_shift) | 1); mthca_alloc_qp_common()
1224 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_alloc_qp_common()
1225 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); mthca_alloc_qp_common()
1231 struct mthca_pd *pd, struct mthca_qp *qp) mthca_set_qp_size()
1233 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); mthca_set_qp_size()
1247 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) mthca_set_qp_size()
1251 qp->rq.max = cap->max_recv_wr ? mthca_set_qp_size()
1253 qp->sq.max = cap->max_send_wr ? mthca_set_qp_size()
1256 qp->rq.max = cap->max_recv_wr; mthca_set_qp_size()
1257 qp->sq.max = cap->max_send_wr; mthca_set_qp_size()
1260 qp->rq.max_gs = cap->max_recv_sge; mthca_set_qp_size()
1261 qp->sq.max_gs = max_t(int, cap->max_send_sge, mthca_set_qp_size()
1276 struct mthca_qp *qp) mthca_alloc_qp()
1281 case IB_QPT_RC: qp->transport = RC; break; mthca_alloc_qp()
1282 case IB_QPT_UC: qp->transport = UC; break; mthca_alloc_qp()
1283 case IB_QPT_UD: qp->transport = UD; break; mthca_alloc_qp()
1287 err = mthca_set_qp_size(dev, cap, pd, qp); mthca_alloc_qp()
1291 qp->qpn = mthca_alloc(&dev->qp_table.alloc); mthca_alloc_qp()
1292 if (qp->qpn == -1) mthca_alloc_qp()
1296 qp->port = 0; mthca_alloc_qp()
1299 send_policy, qp); mthca_alloc_qp()
1301 mthca_free(&dev->qp_table.alloc, qp->qpn); mthca_alloc_qp()
1306 mthca_array_set(&dev->qp_table.qp, mthca_alloc_qp()
1307 qp->qpn & (dev->limits.num_qps - 1), qp); mthca_alloc_qp()
1356 sqp->qp.transport = MLX; mthca_alloc_sqp()
1357 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); mthca_alloc_sqp()
1361 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; mthca_alloc_sqp()
1368 if (mthca_array_get(&dev->qp_table.qp, mqpn)) mthca_alloc_sqp()
1371 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); mthca_alloc_sqp()
1377 sqp->qp.port = port; mthca_alloc_sqp()
1378 sqp->qp.qpn = mqpn; mthca_alloc_sqp()
1379 sqp->qp.transport = MLX; mthca_alloc_sqp()
1382 send_policy, &sqp->qp); mthca_alloc_sqp()
1398 mthca_array_clear(&dev->qp_table.qp, mqpn); mthca_alloc_sqp()
1410 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) get_qp_refcount() argument
1415 c = qp->refcount; get_qp_refcount()
1422 struct mthca_qp *qp) mthca_free_qp()
1427 send_cq = to_mcq(qp->ibqp.send_cq); mthca_free_qp()
1428 recv_cq = to_mcq(qp->ibqp.recv_cq); mthca_free_qp()
1437 mthca_array_clear(&dev->qp_table.qp, mthca_free_qp()
1438 qp->qpn & (dev->limits.num_qps - 1)); mthca_free_qp()
1439 --qp->refcount; mthca_free_qp()
1444 wait_event(qp->wait, !get_qp_refcount(dev, qp)); mthca_free_qp()
1446 if (qp->state != IB_QPS_RESET) mthca_free_qp()
1447 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, mthca_free_qp()
1455 if (!qp->ibqp.uobject) { mthca_free_qp()
1456 mthca_cq_clean(dev, recv_cq, qp->qpn, mthca_free_qp()
1457 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_free_qp()
1459 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); mthca_free_qp()
1461 mthca_free_memfree(dev, qp); mthca_free_qp()
1462 mthca_free_wqe_buf(dev, qp); mthca_free_qp()
1465 mthca_unmap_memfree(dev, qp); mthca_free_qp()
1467 if (is_sqp(dev, qp)) { mthca_free_qp()
1468 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); mthca_free_qp()
1470 to_msqp(qp)->header_buf_size, mthca_free_qp()
1471 to_msqp(qp)->header_buf, mthca_free_qp()
1472 to_msqp(qp)->header_dma); mthca_free_qp()
1474 mthca_free(&dev->qp_table.alloc, qp->qpn); mthca_free_qp()
1495 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | build_mlx_header()
1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; build_mlx_header()
1520 if (!sqp->qp.ibqp.qp_num) build_mlx_header()
1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, build_mlx_header()
1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, build_mlx_header()
1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); build_mlx_header()
1538 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); build_mlx_header()
1606 struct mthca_qp *qp = to_mqp(ibqp); mthca_tavor_post_send() local
1626 spin_lock_irqsave(&qp->sq.lock, flags); mthca_tavor_post_send()
1630 ind = qp->sq.next_ind; mthca_tavor_post_send()
1633 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_tavor_post_send()
1635 " %d max, %d nreq)\n", qp->qpn, mthca_tavor_post_send()
1636 qp->sq.head, qp->sq.tail, mthca_tavor_post_send()
1637 qp->sq.max, nreq); mthca_tavor_post_send()
1643 wqe = get_send_wqe(qp, ind); mthca_tavor_post_send()
1644 prev_wqe = qp->sq.last; mthca_tavor_post_send()
1645 qp->sq.last = wqe; mthca_tavor_post_send()
1662 switch (qp->transport) { mthca_tavor_post_send()
1717 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), mthca_tavor_post_send()
1729 if (wr->num_sge > qp->sq.max_gs) { mthca_tavor_post_send()
1743 if (qp->transport == MLX) { mthca_tavor_post_send()
1751 qp->wrid[ind + qp->rq.max] = wr->wr_id; mthca_tavor_post_send()
1761 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_tavor_post_send()
1762 qp->send_wqe_offset) | mthca_tavor_post_send()
1778 if (unlikely(ind >= qp->sq.max)) mthca_tavor_post_send()
1779 ind -= qp->sq.max; mthca_tavor_post_send()
1786 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + mthca_tavor_post_send()
1787 qp->send_wqe_offset) | f0 | op0, mthca_tavor_post_send()
1788 (qp->qpn << 8) | size0, mthca_tavor_post_send()
1798 qp->sq.next_ind = ind; mthca_tavor_post_send()
1799 qp->sq.head += nreq; mthca_tavor_post_send()
1801 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_tavor_post_send()
1809 struct mthca_qp *qp = to_mqp(ibqp); mthca_tavor_post_receive() local
1827 spin_lock_irqsave(&qp->rq.lock, flags); mthca_tavor_post_receive()
1831 ind = qp->rq.next_ind; mthca_tavor_post_receive()
1834 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_tavor_post_receive()
1836 " %d max, %d nreq)\n", qp->qpn, mthca_tavor_post_receive()
1837 qp->rq.head, qp->rq.tail, mthca_tavor_post_receive()
1838 qp->rq.max, nreq); mthca_tavor_post_receive()
1844 wqe = get_recv_wqe(qp, ind); mthca_tavor_post_receive()
1845 prev_wqe = qp->rq.last; mthca_tavor_post_receive()
1846 qp->rq.last = wqe; mthca_tavor_post_receive()
1855 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mthca_tavor_post_receive()
1867 qp->wrid[ind] = wr->wr_id; mthca_tavor_post_receive()
1876 if (unlikely(ind >= qp->rq.max)) mthca_tavor_post_receive()
1877 ind -= qp->rq.max; mthca_tavor_post_receive()
1885 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, mthca_tavor_post_receive()
1886 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, mthca_tavor_post_receive()
1889 qp->rq.next_ind = ind; mthca_tavor_post_receive()
1890 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; mthca_tavor_post_receive()
1898 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, mthca_tavor_post_receive()
1899 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, mthca_tavor_post_receive()
1903 qp->rq.next_ind = ind; mthca_tavor_post_receive()
1904 qp->rq.head += nreq; mthca_tavor_post_receive()
1912 spin_unlock_irqrestore(&qp->rq.lock, flags); mthca_tavor_post_receive()
1920 struct mthca_qp *qp = to_mqp(ibqp); mthca_arbel_post_send() local
1941 spin_lock_irqsave(&qp->sq.lock, flags); mthca_arbel_post_send()
1945 ind = qp->sq.head & (qp->sq.max - 1); mthca_arbel_post_send()
1952 ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send()
1954 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; mthca_arbel_post_send()
1961 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send()
1969 mthca_write64(dbhi, (qp->qpn << 8) | size0, mthca_arbel_post_send()
1974 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_arbel_post_send()
1976 " %d max, %d nreq)\n", qp->qpn, mthca_arbel_post_send()
1977 qp->sq.head, qp->sq.tail, mthca_arbel_post_send()
1978 qp->sq.max, nreq); mthca_arbel_post_send()
1984 wqe = get_send_wqe(qp, ind); mthca_arbel_post_send()
1985 prev_wqe = qp->sq.last; mthca_arbel_post_send()
1986 qp->sq.last = wqe; mthca_arbel_post_send()
2003 switch (qp->transport) { mthca_arbel_post_send()
2058 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), mthca_arbel_post_send()
2070 if (wr->num_sge > qp->sq.max_gs) { mthca_arbel_post_send()
2084 if (qp->transport == MLX) { mthca_arbel_post_send()
2092 qp->wrid[ind + qp->rq.max] = wr->wr_id; mthca_arbel_post_send()
2102 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_arbel_post_send()
2103 qp->send_wqe_offset) | mthca_arbel_post_send()
2119 if (unlikely(ind >= qp->sq.max)) mthca_arbel_post_send()
2120 ind -= qp->sq.max; mthca_arbel_post_send()
2125 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send()
2127 qp->sq.head += nreq; mthca_arbel_post_send()
2134 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send()
2142 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, mthca_arbel_post_send()
2152 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_arbel_post_send()
2160 struct mthca_qp *qp = to_mqp(ibqp); mthca_arbel_post_receive() local
2168 spin_lock_irqsave(&qp->rq.lock, flags); mthca_arbel_post_receive()
2172 ind = qp->rq.head & (qp->rq.max - 1); mthca_arbel_post_receive()
2175 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_arbel_post_receive()
2177 " %d max, %d nreq)\n", qp->qpn, mthca_arbel_post_receive()
2178 qp->rq.head, qp->rq.tail, mthca_arbel_post_receive()
2179 qp->rq.max, nreq); mthca_arbel_post_receive()
2185 wqe = get_recv_wqe(qp, ind); mthca_arbel_post_receive()
2191 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mthca_arbel_post_receive()
2202 if (i < qp->rq.max_gs) mthca_arbel_post_receive()
2205 qp->wrid[ind] = wr->wr_id; mthca_arbel_post_receive()
2208 if (unlikely(ind >= qp->rq.max)) mthca_arbel_post_receive()
2209 ind -= qp->rq.max; mthca_arbel_post_receive()
2213 qp->rq.head += nreq; mthca_arbel_post_receive()
2220 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); mthca_arbel_post_receive()
2223 spin_unlock_irqrestore(&qp->rq.lock, flags); mthca_arbel_post_receive()
2227 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, mthca_free_err_wqe() argument
2236 if (qp->ibqp.srq && !is_send) { mthca_free_err_wqe()
2242 next = get_send_wqe(qp, index); mthca_free_err_wqe()
2244 next = get_recv_wqe(qp, index); mthca_free_err_wqe()
2274 err = mthca_array_init(&dev->qp_table.qp, mthca_init_qp_table()
2296 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_init_qp_table()
2309 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_cleanup_qp_table()
945 mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) mthca_adjust_qp_caps() argument
970 mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) mthca_alloc_wqe_buf() argument
1062 mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_wqe_buf() argument
1071 mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_map_memfree() argument
1103 mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_unmap_memfree() argument
1112 mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_alloc_memfree() argument
1132 mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_memfree() argument
1141 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) mthca_alloc_qp_common() argument
1230 mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) mthca_set_qp_size() argument
1269 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) mthca_alloc_qp() argument
1421 mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_qp() argument
H A Dmthca_provider.c519 struct mthca_qp *qp; mthca_create_qp() local
532 qp = kmalloc(sizeof *qp, GFP_KERNEL); mthca_create_qp()
533 if (!qp) mthca_create_qp()
540 kfree(qp); mthca_create_qp()
548 kfree(qp); mthca_create_qp()
560 kfree(qp); mthca_create_qp()
564 qp->mr.ibmr.lkey = ucmd.lkey; mthca_create_qp()
565 qp->sq.db_index = ucmd.sq_db_index; mthca_create_qp()
566 qp->rq.db_index = ucmd.rq_db_index; mthca_create_qp()
573 &init_attr->cap, qp); mthca_create_qp()
588 qp->ibqp.qp_num = qp->qpn; mthca_create_qp()
598 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); mthca_create_qp()
599 if (!qp) mthca_create_qp()
602 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; mthca_create_qp()
608 qp->ibqp.qp_num, init_attr->port_num, mthca_create_qp()
609 to_msqp(qp)); mthca_create_qp()
618 kfree(qp); mthca_create_qp()
622 init_attr->cap.max_send_wr = qp->sq.max; mthca_create_qp()
623 init_attr->cap.max_recv_wr = qp->rq.max; mthca_create_qp()
624 init_attr->cap.max_send_sge = qp->sq.max_gs; mthca_create_qp()
625 init_attr->cap.max_recv_sge = qp->rq.max_gs; mthca_create_qp()
626 init_attr->cap.max_inline_data = qp->max_inline_data; mthca_create_qp()
628 return &qp->ibqp; mthca_create_qp()
631 static int mthca_destroy_qp(struct ib_qp *qp) mthca_destroy_qp() argument
633 if (qp->uobject) { mthca_destroy_qp()
634 mthca_unmap_user_db(to_mdev(qp->device), mthca_destroy_qp()
635 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp()
636 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp()
637 to_mqp(qp)->sq.db_index); mthca_destroy_qp()
638 mthca_unmap_user_db(to_mdev(qp->device), mthca_destroy_qp()
639 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp()
640 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp()
641 to_mqp(qp)->rq.db_index); mthca_destroy_qp()
643 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); mthca_destroy_qp()
644 kfree(qp); mthca_destroy_qp()
H A Dmthca_mcg.c43 __be32 qp[MTHCA_QP_PER_MGM]; member in struct:mthca_mgm
165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { mthca_multicast_attach()
170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { mthca_multicast_attach()
171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); mthca_multicast_attach()
242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) mthca_multicast_detach()
244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) mthca_multicast_detach()
254 mgm->qp[loc] = mgm->qp[i - 1]; mthca_multicast_detach()
255 mgm->qp[i - 1] = 0; mthca_multicast_detach()
H A Dmthca_provider.h139 * struct mthca_cq/qp also has its own lock. An individual qp lock
141 * a qp may be locked, with the cq with the lower cqn locked first.
144 * Each struct mthca_cq/qp also has an ref count, protected by the
151 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
179 * - split cq/qp table lock into n separate (cache-aligned) locks,
289 struct mthca_qp qp; member in struct:mthca_sqp
339 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) to_msqp() argument
341 return container_of(qp, struct mthca_sqp, qp); to_msqp()
H A Dmthca_eq.c144 } __attribute__((packed)) qp; member in union:mthca_eqe::__anon5264
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
H A Dmthca_dev.h259 struct mthca_array qp; member in struct:mthca_qp_table
541 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
550 struct mthca_qp *qp);
560 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
H A Dmthca_cq.c378 struct mthca_qp *qp, int wqe_index, int is_send, handle_error_cqe()
466 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); handle_error_cqe()
525 *cur_qp = mthca_array_get(&dev->qp_table.qp, mthca_poll_one()
536 entry->qp = &(*cur_qp)->ibqp; mthca_poll_one()
667 struct mthca_qp *qp = NULL; mthca_poll_cq() local
678 err = mthca_poll_one(dev, cq, &qp, mthca_poll_cq()
377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) handle_error_cqe() argument
H A Dmthca_mad.c89 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, update_sm_ah()
/linux-4.4.14/drivers/ntb/
H A Dntb_transport.c106 struct ntb_transport_qp *qp; member in struct:ntb_queue_entry
134 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
249 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
439 struct ntb_transport_qp *qp; debugfs_read() local
443 qp = filp->private_data; debugfs_read()
445 if (!qp || !qp->link_is_up) debugfs_read()
458 "rx_bytes - \t%llu\n", qp->rx_bytes); debugfs_read()
460 "rx_pkts - \t%llu\n", qp->rx_pkts); debugfs_read()
462 "rx_memcpy - \t%llu\n", qp->rx_memcpy); debugfs_read()
464 "rx_async - \t%llu\n", qp->rx_async); debugfs_read()
466 "rx_ring_empty - %llu\n", qp->rx_ring_empty); debugfs_read()
468 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); debugfs_read()
470 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); debugfs_read()
472 "rx_err_ver - \t%llu\n", qp->rx_err_ver); debugfs_read()
474 "rx_buff - \t0x%p\n", qp->rx_buff); debugfs_read()
476 "rx_index - \t%u\n", qp->rx_index); debugfs_read()
478 "rx_max_entry - \t%u\n\n", qp->rx_max_entry); debugfs_read()
481 "tx_bytes - \t%llu\n", qp->tx_bytes); debugfs_read()
483 "tx_pkts - \t%llu\n", qp->tx_pkts); debugfs_read()
485 "tx_memcpy - \t%llu\n", qp->tx_memcpy); debugfs_read()
487 "tx_async - \t%llu\n", qp->tx_async); debugfs_read()
489 "tx_ring_full - \t%llu\n", qp->tx_ring_full); debugfs_read()
491 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); debugfs_read()
493 "tx_mw - \t0x%p\n", qp->tx_mw); debugfs_read()
495 "tx_index (H) - \t%u\n", qp->tx_index); debugfs_read()
498 qp->remote_rx_info->entry); debugfs_read()
500 "tx_max_entry - \t%u\n", qp->tx_max_entry); debugfs_read()
503 ntb_transport_tx_free_entry(qp)); debugfs_read()
509 qp->tx_dma_chan ? "Yes" : "No"); debugfs_read()
512 qp->rx_dma_chan ? "Yes" : "No"); debugfs_read()
515 qp->link_is_up ? "Up" : "Down"); debugfs_read()
587 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_mw() local
608 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); ntb_transport_setup_qp_mw()
611 qp->remote_rx_info = qp->rx_buff + rx_size; ntb_transport_setup_qp_mw()
614 qp->rx_max_frame = min(transport_mtu, rx_size / 2); ntb_transport_setup_qp_mw()
615 qp->rx_max_entry = rx_size / qp->rx_max_frame; ntb_transport_setup_qp_mw()
616 qp->rx_index = 0; ntb_transport_setup_qp_mw()
618 qp->remote_rx_info->entry = qp->rx_max_entry - 1; ntb_transport_setup_qp_mw()
621 for (i = 0; i < qp->rx_max_entry; i++) { ntb_transport_setup_qp_mw()
622 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - ntb_transport_setup_qp_mw()
627 qp->rx_pkts = 0; ntb_transport_setup_qp_mw()
628 qp->tx_pkts = 0; ntb_transport_setup_qp_mw()
629 qp->tx_index = 0; ntb_transport_setup_qp_mw()
709 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) ntb_qp_link_down_reset() argument
711 qp->link_is_up = false; ntb_qp_link_down_reset()
713 qp->tx_index = 0; ntb_qp_link_down_reset()
714 qp->rx_index = 0; ntb_qp_link_down_reset()
715 qp->rx_bytes = 0; ntb_qp_link_down_reset()
716 qp->rx_pkts = 0; ntb_qp_link_down_reset()
717 qp->rx_ring_empty = 0; ntb_qp_link_down_reset()
718 qp->rx_err_no_buf = 0; ntb_qp_link_down_reset()
719 qp->rx_err_oflow = 0; ntb_qp_link_down_reset()
720 qp->rx_err_ver = 0; ntb_qp_link_down_reset()
721 qp->rx_memcpy = 0; ntb_qp_link_down_reset()
722 qp->rx_async = 0; ntb_qp_link_down_reset()
723 qp->tx_bytes = 0; ntb_qp_link_down_reset()
724 qp->tx_pkts = 0; ntb_qp_link_down_reset()
725 qp->tx_ring_full = 0; ntb_qp_link_down_reset()
726 qp->tx_err_no_buf = 0; ntb_qp_link_down_reset()
727 qp->tx_memcpy = 0; ntb_qp_link_down_reset()
728 qp->tx_async = 0; ntb_qp_link_down_reset()
731 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ntb_qp_link_cleanup() argument
733 struct ntb_transport_ctx *nt = qp->transport; ntb_qp_link_cleanup()
736 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); ntb_qp_link_cleanup()
738 cancel_delayed_work_sync(&qp->link_work); ntb_qp_link_cleanup()
739 ntb_qp_link_down_reset(qp); ntb_qp_link_cleanup()
741 if (qp->event_handler) ntb_qp_link_cleanup()
742 qp->event_handler(qp->cb_data, qp->link_is_up); ntb_qp_link_cleanup()
747 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_cleanup_work() local
750 struct ntb_transport_ctx *nt = qp->transport; ntb_qp_link_cleanup_work()
752 ntb_qp_link_cleanup(qp); ntb_qp_link_cleanup_work()
755 schedule_delayed_work(&qp->link_work, ntb_qp_link_cleanup_work()
759 static void ntb_qp_link_down(struct ntb_transport_qp *qp) ntb_qp_link_down() argument
761 schedule_work(&qp->link_cleanup); ntb_qp_link_down()
766 struct ntb_transport_qp *qp; ntb_transport_link_cleanup() local
775 qp = &nt->qp_vec[i]; ntb_transport_link_cleanup()
776 ntb_qp_link_cleanup(qp); ntb_transport_link_cleanup()
777 cancel_work_sync(&qp->link_cleanup); ntb_transport_link_cleanup()
778 cancel_delayed_work_sync(&qp->link_work); ntb_transport_link_cleanup()
875 struct ntb_transport_qp *qp = &nt->qp_vec[i]; ntb_transport_link_work() local
879 if (qp->client_ready) ntb_transport_link_work()
880 schedule_delayed_work(&qp->link_work, 0); ntb_transport_link_work()
896 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_work() local
899 struct pci_dev *pdev = qp->ndev->pdev; ntb_qp_link_work()
900 struct ntb_transport_ctx *nt = qp->transport; ntb_qp_link_work()
907 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); ntb_qp_link_work()
909 /* query remote spad for qp ready bits */ ntb_qp_link_work()
914 if (val & BIT(qp->qp_num)) { ntb_qp_link_work()
915 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); ntb_qp_link_work()
916 qp->link_is_up = true; ntb_qp_link_work()
918 if (qp->event_handler) ntb_qp_link_work()
919 qp->event_handler(qp->cb_data, qp->link_is_up); ntb_qp_link_work()
921 tasklet_schedule(&qp->rxc_db_work); ntb_qp_link_work()
923 schedule_delayed_work(&qp->link_work, ntb_qp_link_work()
930 struct ntb_transport_qp *qp; ntb_transport_init_queue() local
942 qp = &nt->qp_vec[qp_num]; ntb_transport_init_queue()
943 qp->qp_num = qp_num; ntb_transport_init_queue()
944 qp->transport = nt; ntb_transport_init_queue()
945 qp->ndev = nt->ndev; ntb_transport_init_queue()
946 qp->client_ready = false; ntb_transport_init_queue()
947 qp->event_handler = NULL; ntb_transport_init_queue()
948 ntb_qp_link_down_reset(qp); ntb_transport_init_queue()
961 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; ntb_transport_init_queue()
962 if (!qp->tx_mw) ntb_transport_init_queue()
965 qp->tx_mw_phys = mw_base + qp_offset; ntb_transport_init_queue()
966 if (!qp->tx_mw_phys) ntb_transport_init_queue()
970 qp->rx_info = qp->tx_mw + tx_size; ntb_transport_init_queue()
973 qp->tx_max_frame = min(transport_mtu, tx_size / 2); ntb_transport_init_queue()
974 qp->tx_max_entry = tx_size / qp->tx_max_frame; ntb_transport_init_queue()
979 snprintf(debugfs_name, 4, "qp%d", qp_num); ntb_transport_init_queue()
980 qp->debugfs_dir = debugfs_create_dir(debugfs_name, ntb_transport_init_queue()
983 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, ntb_transport_init_queue()
984 qp->debugfs_dir, qp, ntb_transport_init_queue()
987 qp->debugfs_dir = NULL; ntb_transport_init_queue()
988 qp->debugfs_stats = NULL; ntb_transport_init_queue()
991 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); ntb_transport_init_queue()
992 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); ntb_transport_init_queue()
994 spin_lock_init(&qp->ntb_rx_q_lock); ntb_transport_init_queue()
995 spin_lock_init(&qp->ntb_tx_free_q_lock); ntb_transport_init_queue()
997 INIT_LIST_HEAD(&qp->rx_post_q); ntb_transport_init_queue()
998 INIT_LIST_HEAD(&qp->rx_pend_q); ntb_transport_init_queue()
999 INIT_LIST_HEAD(&qp->rx_free_q); ntb_transport_init_queue()
1000 INIT_LIST_HEAD(&qp->tx_free_q); ntb_transport_init_queue()
1002 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, ntb_transport_init_queue()
1003 (unsigned long)qp); ntb_transport_init_queue()
1132 struct ntb_transport_qp *qp; ntb_transport_free() local
1142 /* verify that all the qp's are freed */ ntb_transport_free()
1144 qp = &nt->qp_vec[i]; ntb_transport_free()
1146 ntb_transport_free_queue(qp); ntb_transport_free()
1147 debugfs_remove_recursive(qp->debugfs_dir); ntb_transport_free()
1165 static void ntb_complete_rxc(struct ntb_transport_qp *qp) ntb_complete_rxc() argument
1172 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); ntb_complete_rxc()
1174 while (!list_empty(&qp->rx_post_q)) { ntb_complete_rxc()
1175 entry = list_first_entry(&qp->rx_post_q, ntb_complete_rxc()
1181 iowrite32(entry->index, &qp->rx_info->entry); ntb_complete_rxc()
1186 list_move_tail(&entry->entry, &qp->rx_free_q); ntb_complete_rxc()
1188 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); ntb_complete_rxc()
1190 if (qp->rx_handler && qp->client_ready) ntb_complete_rxc()
1191 qp->rx_handler(qp, qp->cb_data, cb_data, len); ntb_complete_rxc()
1193 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); ntb_complete_rxc()
1196 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); ntb_complete_rxc()
1205 ntb_complete_rxc(entry->qp); ntb_rx_copy_callback()
1224 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx() local
1225 struct dma_chan *chan = qp->rx_dma_chan; ntb_async_rx()
1282 qp->last_cookie = cookie; ntb_async_rx()
1284 qp->rx_async++; ntb_async_rx()
1294 qp->rx_memcpy++; ntb_async_rx()
1297 static int ntb_process_rxc(struct ntb_transport_qp *qp) ntb_process_rxc() argument
1303 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; ntb_process_rxc()
1304 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); ntb_process_rxc()
1306 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", ntb_process_rxc()
1307 qp->qp_num, hdr->ver, hdr->len, hdr->flags); ntb_process_rxc()
1310 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); ntb_process_rxc()
1311 qp->rx_ring_empty++; ntb_process_rxc()
1316 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); ntb_process_rxc()
1317 ntb_qp_link_down(qp); ntb_process_rxc()
1322 if (hdr->ver != (u32)qp->rx_pkts) { ntb_process_rxc()
1323 dev_dbg(&qp->ndev->pdev->dev, ntb_process_rxc()
1325 qp->rx_pkts, hdr->ver); ntb_process_rxc()
1326 qp->rx_err_ver++; ntb_process_rxc()
1330 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); ntb_process_rxc()
1332 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); ntb_process_rxc()
1333 qp->rx_err_no_buf++; ntb_process_rxc()
1338 entry->index = qp->rx_index; ntb_process_rxc()
1341 dev_dbg(&qp->ndev->pdev->dev, ntb_process_rxc()
1344 qp->rx_err_oflow++; ntb_process_rxc()
1349 ntb_complete_rxc(qp); ntb_process_rxc()
1351 dev_dbg(&qp->ndev->pdev->dev, ntb_process_rxc()
1353 qp->rx_index, hdr->ver, hdr->len, entry->len); ntb_process_rxc()
1355 qp->rx_bytes += hdr->len; ntb_process_rxc()
1356 qp->rx_pkts++; ntb_process_rxc()
1363 qp->rx_index++; ntb_process_rxc()
1364 qp->rx_index %= qp->rx_max_entry; ntb_process_rxc()
1371 struct ntb_transport_qp *qp = (void *)data; ntb_transport_rxc_db() local
1374 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", ntb_transport_rxc_db()
1375 __func__, qp->qp_num); ntb_transport_rxc_db()
1380 for (i = 0; i < qp->rx_max_entry; i++) { ntb_transport_rxc_db()
1381 rc = ntb_process_rxc(qp); ntb_transport_rxc_db()
1386 if (i && qp->rx_dma_chan) ntb_transport_rxc_db()
1387 dma_async_issue_pending(qp->rx_dma_chan); ntb_transport_rxc_db()
1389 if (i == qp->rx_max_entry) { ntb_transport_rxc_db()
1391 tasklet_schedule(&qp->rxc_db_work); ntb_transport_rxc_db()
1392 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { ntb_transport_rxc_db()
1394 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); ntb_transport_rxc_db()
1396 ntb_db_read(qp->ndev); ntb_transport_rxc_db()
1402 tasklet_schedule(&qp->rxc_db_work); ntb_transport_rxc_db()
1409 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback() local
1414 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); ntb_tx_copy_callback()
1421 qp->tx_bytes += entry->len; ntb_tx_copy_callback()
1423 if (qp->tx_handler) ntb_tx_copy_callback()
1424 qp->tx_handler(qp, qp->cb_data, entry->cb_data, ntb_tx_copy_callback()
1428 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); ntb_tx_copy_callback()
1449 static void ntb_async_tx(struct ntb_transport_qp *qp, ntb_async_tx() argument
1454 struct dma_chan *chan = qp->tx_dma_chan; ntb_async_tx()
1464 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; ntb_async_tx()
1465 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); ntb_async_tx()
1469 iowrite32((u32)qp->tx_pkts, &hdr->ver); ntb_async_tx()
1478 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; ntb_async_tx()
1513 qp->tx_async++; ntb_async_tx()
1522 qp->tx_memcpy++; ntb_async_tx()
1525 static int ntb_process_tx(struct ntb_transport_qp *qp, ntb_process_tx() argument
1528 if (qp->tx_index == qp->remote_rx_info->entry) { ntb_process_tx()
1529 qp->tx_ring_full++; ntb_process_tx()
1533 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { ntb_process_tx()
1534 if (qp->tx_handler) ntb_process_tx()
1535 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); ntb_process_tx()
1537 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_process_tx()
1538 &qp->tx_free_q); ntb_process_tx()
1542 ntb_async_tx(qp, entry); ntb_process_tx()
1544 qp->tx_index++; ntb_process_tx()
1545 qp->tx_index %= qp->tx_max_entry; ntb_process_tx()
1547 qp->tx_pkts++; ntb_process_tx()
1552 static void ntb_send_link_down(struct ntb_transport_qp *qp) ntb_send_link_down() argument
1554 struct pci_dev *pdev = qp->ndev->pdev; ntb_send_link_down()
1558 if (!qp->link_is_up) ntb_send_link_down()
1561 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); ntb_send_link_down()
1564 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_send_link_down()
1578 rc = ntb_process_tx(qp, entry); ntb_send_link_down()
1581 qp->qp_num); ntb_send_link_down()
1583 ntb_qp_link_down_reset(qp); ntb_send_link_down()
1613 struct ntb_transport_qp *qp; ntb_transport_create_queue() local
1633 qp = &nt->qp_vec[free_queue]; ntb_transport_create_queue()
1634 qp_bit = BIT_ULL(qp->qp_num); ntb_transport_create_queue()
1638 qp->cb_data = data; ntb_transport_create_queue()
1639 qp->rx_handler = handlers->rx_handler; ntb_transport_create_queue()
1640 qp->tx_handler = handlers->tx_handler; ntb_transport_create_queue()
1641 qp->event_handler = handlers->event_handler; ntb_transport_create_queue()
1647 qp->tx_dma_chan = ntb_transport_create_queue()
1650 if (!qp->tx_dma_chan) ntb_transport_create_queue()
1653 qp->rx_dma_chan = ntb_transport_create_queue()
1656 if (!qp->rx_dma_chan) ntb_transport_create_queue()
1659 qp->tx_dma_chan = NULL; ntb_transport_create_queue()
1660 qp->rx_dma_chan = NULL; ntb_transport_create_queue()
1664 qp->tx_dma_chan ? "DMA" : "CPU"); ntb_transport_create_queue()
1667 qp->rx_dma_chan ? "DMA" : "CPU"); ntb_transport_create_queue()
1674 entry->qp = qp; ntb_transport_create_queue()
1675 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, ntb_transport_create_queue()
1676 &qp->rx_free_q); ntb_transport_create_queue()
1684 entry->qp = qp; ntb_transport_create_queue()
1685 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_create_queue()
1686 &qp->tx_free_q); ntb_transport_create_queue()
1689 ntb_db_clear(qp->ndev, qp_bit); ntb_transport_create_queue()
1690 ntb_db_clear_mask(qp->ndev, qp_bit); ntb_transport_create_queue()
1692 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); ntb_transport_create_queue()
1694 return qp; ntb_transport_create_queue()
1697 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_create_queue()
1700 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) ntb_transport_create_queue()
1702 if (qp->tx_dma_chan) ntb_transport_create_queue()
1703 dma_release_channel(qp->tx_dma_chan); ntb_transport_create_queue()
1704 if (qp->rx_dma_chan) ntb_transport_create_queue()
1705 dma_release_channel(qp->rx_dma_chan); ntb_transport_create_queue()
1714 * @qp: NTB queue to be freed
1718 void ntb_transport_free_queue(struct ntb_transport_qp *qp) ntb_transport_free_queue() argument
1724 if (!qp) ntb_transport_free_queue()
1727 pdev = qp->ndev->pdev; ntb_transport_free_queue()
1729 if (qp->tx_dma_chan) { ntb_transport_free_queue()
1730 struct dma_chan *chan = qp->tx_dma_chan; ntb_transport_free_queue()
1734 qp->tx_dma_chan = NULL; ntb_transport_free_queue()
1739 dma_sync_wait(chan, qp->last_cookie); ntb_transport_free_queue()
1744 if (qp->rx_dma_chan) { ntb_transport_free_queue()
1745 struct dma_chan *chan = qp->rx_dma_chan; ntb_transport_free_queue()
1749 qp->rx_dma_chan = NULL; ntb_transport_free_queue()
1754 dma_sync_wait(chan, qp->last_cookie); ntb_transport_free_queue()
1759 qp_bit = BIT_ULL(qp->qp_num); ntb_transport_free_queue()
1761 ntb_db_set_mask(qp->ndev, qp_bit); ntb_transport_free_queue()
1762 tasklet_disable(&qp->rxc_db_work); ntb_transport_free_queue()
1764 cancel_delayed_work_sync(&qp->link_work); ntb_transport_free_queue()
1766 qp->cb_data = NULL; ntb_transport_free_queue()
1767 qp->rx_handler = NULL; ntb_transport_free_queue()
1768 qp->tx_handler = NULL; ntb_transport_free_queue()
1769 qp->event_handler = NULL; ntb_transport_free_queue()
1771 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) ntb_transport_free_queue()
1774 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { ntb_transport_free_queue()
1779 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { ntb_transport_free_queue()
1784 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_free_queue()
1787 qp->transport->qp_bitmap_free |= qp_bit; ntb_transport_free_queue()
1789 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); ntb_transport_free_queue()
1795 * @qp: NTB queue to be freed
1799 * shutdown of qp.
1803 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) ntb_transport_rx_remove() argument
1808 if (!qp || qp->client_ready) ntb_transport_rx_remove()
1811 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); ntb_transport_rx_remove()
1818 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); ntb_transport_rx_remove()
1826 * @qp: NTB transport layer queue the entry is to be enqueued on
1836 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_transport_rx_enqueue() argument
1841 if (!qp) ntb_transport_rx_enqueue()
1844 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); ntb_transport_rx_enqueue()
1853 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); ntb_transport_rx_enqueue()
1855 tasklet_schedule(&qp->rxc_db_work); ntb_transport_rx_enqueue()
1863 * @qp: NTB transport layer queue the entry is to be enqueued on
1870 * serialize access to the qp.
1874 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_transport_tx_enqueue() argument
1880 if (!qp || !qp->link_is_up || !len) ntb_transport_tx_enqueue()
1883 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_transport_tx_enqueue()
1885 qp->tx_err_no_buf++; ntb_transport_tx_enqueue()
1894 rc = ntb_process_tx(qp, entry); ntb_transport_tx_enqueue()
1896 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_tx_enqueue()
1897 &qp->tx_free_q); ntb_transport_tx_enqueue()
1905 * @qp: NTB transport layer queue to be enabled
1909 void ntb_transport_link_up(struct ntb_transport_qp *qp) ntb_transport_link_up() argument
1911 if (!qp) ntb_transport_link_up()
1914 qp->client_ready = true; ntb_transport_link_up()
1916 if (qp->transport->link_is_up) ntb_transport_link_up()
1917 schedule_delayed_work(&qp->link_work, 0); ntb_transport_link_up()
1923 * @qp: NTB transport layer queue to be disabled
1929 void ntb_transport_link_down(struct ntb_transport_qp *qp) ntb_transport_link_down() argument
1933 if (!qp) ntb_transport_link_down()
1936 qp->client_ready = false; ntb_transport_link_down()
1938 val = ntb_spad_read(qp->ndev, QP_LINKS); ntb_transport_link_down()
1940 ntb_peer_spad_write(qp->ndev, QP_LINKS, ntb_transport_link_down()
1941 val & ~BIT(qp->qp_num)); ntb_transport_link_down()
1943 if (qp->link_is_up) ntb_transport_link_down()
1944 ntb_send_link_down(qp); ntb_transport_link_down()
1946 cancel_delayed_work_sync(&qp->link_work); ntb_transport_link_down()
1952 * @qp: NTB transport layer queue to be queried
1958 bool ntb_transport_link_query(struct ntb_transport_qp *qp) ntb_transport_link_query() argument
1960 if (!qp) ntb_transport_link_query()
1963 return qp->link_is_up; ntb_transport_link_query()
1968 * ntb_transport_qp_num - Query the qp number
1969 * @qp: NTB transport layer queue to be queried
1971 * Query qp number of the NTB transport queue
1973 * RETURNS: a zero based number specifying the qp number
1975 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) ntb_transport_qp_num() argument
1977 if (!qp) ntb_transport_qp_num()
1980 return qp->qp_num; ntb_transport_qp_num()
1985 * ntb_transport_max_size - Query the max payload size of a qp
1986 * @qp: NTB transport layer queue to be queried
1988 * Query the maximum payload size permissible on the given qp
1990 * RETURNS: the max payload size of a qp
1992 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) ntb_transport_max_size() argument
1998 if (!qp) ntb_transport_max_size()
2001 rx_chan = qp->rx_dma_chan; ntb_transport_max_size()
2002 tx_chan = qp->tx_dma_chan; ntb_transport_max_size()
2008 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); ntb_transport_max_size()
2015 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) ntb_transport_tx_free_entry() argument
2017 unsigned int head = qp->tx_index; ntb_transport_tx_free_entry()
2018 unsigned int tail = qp->remote_rx_info->entry; ntb_transport_tx_free_entry()
2020 return tail > head ? tail - head : qp->tx_max_entry + tail - head; ntb_transport_tx_free_entry()
2027 struct ntb_transport_qp *qp; ntb_transport_doorbell_callback() local
2036 qp = &nt->qp_vec[qp_num]; ntb_transport_doorbell_callback()
2038 tasklet_schedule(&qp->rxc_db_work); ntb_transport_doorbell_callback()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
H A DMakefile5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
H A Ddevice.c230 struct c4iw_qp *qp = p; dump_qp() local
235 if (id != qp->wq.sq.qid) dump_qp()
242 if (qp->ep) { dump_qp()
243 if (qp->ep->com.local_addr.ss_family == AF_INET) { dump_qp()
245 &qp->ep->com.local_addr; dump_qp()
247 &qp->ep->com.remote_addr; dump_qp()
249 &qp->ep->com.mapped_local_addr; dump_qp()
251 &qp->ep->com.mapped_remote_addr; dump_qp()
254 "rc qp sq id %u rq id %u state %u " dump_qp()
257 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
258 (int)qp->attr.state, dump_qp()
259 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
260 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp()
267 &qp->ep->com.local_addr; dump_qp()
269 &qp->ep->com.remote_addr; dump_qp()
272 &qp->ep->com.mapped_local_addr; dump_qp()
275 &qp->ep->com.mapped_remote_addr; dump_qp()
278 "rc qp sq id %u rq id %u state %u " dump_qp()
281 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
282 (int)qp->attr.state, dump_qp()
283 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
284 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp()
294 "qp sq id %u rq id %u state %u onchip %u\n", dump_qp()
295 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
296 (int)qp->attr.state, dump_qp()
297 qp->wq.sq.flags & T4_SQ_ONCHIP); dump_qp()
563 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep()
567 ep, ep->com.cm_id, ep->com.qp, dump_ep()
587 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep()
591 ep, ep->com.cm_id, ep->com.qp, dump_ep()
787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || c4iw_rdev_open()
788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { c4iw_rdev_open()
789 pr_err(MOD "%s: unsupported qp and cq id ranges " c4iw_rdev_open()
790 "qp start %u size %u cq start %u size %u\n", c4iw_rdev_open()
791 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, c4iw_rdev_open()
792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, c4iw_rdev_open()
802 "qp qid start %u size %u cq qid start %u size %u\n", c4iw_rdev_open()
808 rdev->lldi.vr->qp.start, c4iw_rdev_open()
809 rdev->lldi.vr->qp.size, c4iw_rdev_open()
829 rdev->stats.qid.total = rdev->lldi.vr->qp.size; c4iw_rdev_open()
918 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && rdma_supported()
966 * For T4 devices with onchip qp mem, we map only that part c4iw_alloc()
1239 struct c4iw_qp *qp = p; disable_qp_db() local
1241 t4_disable_wq_db(&qp->wq); disable_qp_db()
1261 struct c4iw_qp *qp = p; enable_qp_db() local
1263 t4_enable_wq_db(&qp->wq); enable_qp_db()
1267 static void resume_rc_qp(struct c4iw_qp *qp) resume_rc_qp() argument
1269 spin_lock(&qp->lock); resume_rc_qp()
1270 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); resume_rc_qp()
1271 qp->wq.sq.wq_pidx_inc = 0; resume_rc_qp()
1272 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); resume_rc_qp()
1273 qp->wq.rq.wq_pidx_inc = 0; resume_rc_qp()
1274 spin_unlock(&qp->lock); resume_rc_qp()
1280 struct c4iw_qp *qp; resume_a_chunk() local
1283 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, resume_a_chunk()
1285 list_del_init(&qp->db_fc_entry); resume_a_chunk()
1286 resume_rc_qp(qp); resume_a_chunk()
1342 struct c4iw_qp *qp = p; add_and_ref_qp() local
1344 c4iw_qp_add_ref(&qp->ibqp); add_and_ref_qp()
1345 qp_listp->qps[qp_listp->idx++] = qp; add_and_ref_qp()
1370 struct c4iw_qp *qp = qp_list->qps[idx]; recover_lost_dbs() local
1372 spin_lock_irq(&qp->rhp->lock); recover_lost_dbs()
1373 spin_lock(&qp->lock); recover_lost_dbs()
1374 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], recover_lost_dbs()
1375 qp->wq.sq.qid, recover_lost_dbs()
1376 t4_sq_host_wq_pidx(&qp->wq), recover_lost_dbs()
1377 t4_sq_wq_size(&qp->wq)); recover_lost_dbs()
1382 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); recover_lost_dbs()
1383 spin_unlock(&qp->lock); recover_lost_dbs()
1384 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs()
1387 qp->wq.sq.wq_pidx_inc = 0; recover_lost_dbs()
1389 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], recover_lost_dbs()
1390 qp->wq.rq.qid, recover_lost_dbs()
1391 t4_rq_host_wq_pidx(&qp->wq), recover_lost_dbs()
1392 t4_rq_wq_size(&qp->wq)); recover_lost_dbs()
1398 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); recover_lost_dbs()
1399 spin_unlock(&qp->lock); recover_lost_dbs()
1400 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs()
1403 qp->wq.rq.wq_pidx_inc = 0; recover_lost_dbs()
1404 spin_unlock(&qp->lock); recover_lost_dbs()
1405 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs()
1408 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { recover_lost_dbs()
1448 /* add and ref each qp so it doesn't get freed */ recover_queues()
H A Dresource.c43 rdev->lldi.vr->qp.start, c4iw_init_qid_table()
44 rdev->lldi.vr->qp.size, c4iw_init_qid_table()
45 rdev->lldi.vr->qp.size, 0)) c4iw_init_qid_table()
48 for (i = rdev->lldi.vr->qp.start; c4iw_init_qid_table()
49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) c4iw_init_qid_table()
126 * now put the same ids on the qp list since they all c4iw_get_cqid()
H A Dcm.c154 c4iw_qp_rem_ref(&ep->com.qp->ibqp); deref_qp()
161 c4iw_qp_add_ref(&ep->com.qp->ibqp); ref_qp()
1529 err = c4iw_modify_qp(ep->com.qp->rhp, process_mpa_reply()
1530 ep->com.qp, mask, &attrs, 1); process_mpa_reply()
1544 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply()
1564 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply()
1761 BUG_ON(!ep->com.qp); rx_data()
1765 __func__, ep->com.qp->wq.sq.qid, ep, rx_data()
1768 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rx_data()
2026 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); c4iw_reconnect()
2608 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
2624 if (ep->com.cm_id && ep->com.qp) { peer_close()
2626 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
2717 if (ep->com.cm_id && ep->com.qp) { peer_abort()
2719 ret = c4iw_modify_qp(ep->com.qp->rhp, peer_abort()
2720 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, peer_abort()
2724 "%s - qp <- error failed!\n", peer_abort()
2806 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl()
2808 c4iw_modify_qp(ep->com.qp->rhp, close_con_rpl()
2809 ep->com.qp, close_con_rpl()
2841 if (ep && ep->com.qp) { terminate()
2843 ep->com.qp->wq.sq.qid); terminate()
2845 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, terminate()
2848 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); terminate()
2921 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); c4iw_accept_cr() local
2932 BUG_ON(!qp); c4iw_accept_cr()
2984 ep->com.qp = qp; c4iw_accept_cr()
3001 err = c4iw_modify_qp(ep->com.qp->rhp, c4iw_accept_cr()
3002 ep->com.qp, mask, &attrs, 1); c4iw_accept_cr()
3126 ep->com.qp = get_qhp(dev, conn_param->qpn); c4iw_connect()
3127 if (!ep->com.qp) { c4iw_connect()
3133 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, c4iw_connect()
3134 ep->com.qp, cm_id); c4iw_connect()
3958 if (ep->com.cm_id && ep->com.qp) { process_timeout()
3960 c4iw_modify_qp(ep->com.qp->rhp, process_timeout()
3961 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, process_timeout()
H A Dqp.c686 void c4iw_qp_add_ref(struct ib_qp *qp) c4iw_qp_add_ref() argument
688 PDBG("%s ib_qp %p\n", __func__, qp); c4iw_qp_add_ref()
689 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); c4iw_qp_add_ref()
692 void c4iw_qp_rem_ref(struct ib_qp *qp) c4iw_qp_rem_ref() argument
694 PDBG("%s ib_qp %p\n", __func__, qp); c4iw_qp_rem_ref()
695 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) c4iw_qp_rem_ref()
696 wake_up(&(to_c4iw_qp(qp)->wait)); c4iw_qp_rem_ref()
940 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) c4iw_bind_mw() argument
1126 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
1143 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
H A Dev.c109 event.element.qp = &qhp->ibqp; post_qp_event()
H A Diw_cxgb4.h786 struct c4iw_qp *qp; member in struct:c4iw_ep_common
950 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
957 void c4iw_qp_add_ref(struct ib_qp *qp);
958 void c4iw_qp_rem_ref(struct ib_qp *qp);
H A Dprovider.c329 props->max_qp = dev->rdev.lldi.vr->qp.size / 2; c4iw_query_device()
337 props->max_cq = dev->rdev.lldi.vr->qp.size; c4iw_query_device()
/linux-4.4.14/net/ipv4/
H A Dip_fragment.c98 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
125 const struct ipq *qp; ip4_frag_match() local
128 qp = container_of(q, struct ipq, q); ip4_frag_match()
129 return qp->id == arg->iph->id && ip4_frag_match()
130 qp->saddr == arg->iph->saddr && ip4_frag_match()
131 qp->daddr == arg->iph->daddr && ip4_frag_match()
132 qp->protocol == arg->iph->protocol && ip4_frag_match()
133 qp->user == arg->user && ip4_frag_match()
134 qp->vif == arg->vif; ip4_frag_match()
139 struct ipq *qp = container_of(q, struct ipq, q); ip4_frag_init() local
146 qp->protocol = arg->iph->protocol; ip4_frag_init()
147 qp->id = arg->iph->id; ip4_frag_init()
148 qp->ecn = ip4_frag_ecn(arg->iph->tos); ip4_frag_init()
149 qp->saddr = arg->iph->saddr; ip4_frag_init()
150 qp->daddr = arg->iph->daddr; ip4_frag_init()
151 qp->vif = arg->vif; ip4_frag_init()
152 qp->user = arg->user; ip4_frag_init()
153 qp->peer = sysctl_ipfrag_max_dist ? ip4_frag_init()
160 struct ipq *qp; ip4_frag_free() local
162 qp = container_of(q, struct ipq, q); ip4_frag_free()
163 if (qp->peer) ip4_frag_free()
164 inet_putpeer(qp->peer); ip4_frag_free()
197 struct ipq *qp; ip_expire() local
200 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); ip_expire()
201 net = container_of(qp->q.net, struct net, ipv4.frags); ip_expire()
203 spin_lock(&qp->q.lock); ip_expire()
205 if (qp->q.flags & INET_FRAG_COMPLETE) ip_expire()
208 ipq_kill(qp); ip_expire()
211 if (!inet_frag_evicting(&qp->q)) { ip_expire()
212 struct sk_buff *head = qp->q.fragments; ip_expire()
218 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) ip_expire()
222 head->dev = dev_get_by_index_rcu(net, qp->iif); ip_expire()
236 if (frag_expire_skip_icmp(qp->user) && ip_expire()
246 spin_unlock(&qp->q.lock); ip_expire()
247 ipq_put(qp); ip_expire()
275 static int ip_frag_too_far(struct ipq *qp) ip_frag_too_far() argument
277 struct inet_peer *peer = qp->peer; ip_frag_too_far()
286 start = qp->rid; ip_frag_too_far()
288 qp->rid = end; ip_frag_too_far()
290 rc = qp->q.fragments && (end - start) > max; ip_frag_too_far()
295 net = container_of(qp->q.net, struct net, ipv4.frags); ip_frag_too_far()
302 static int ip_frag_reinit(struct ipq *qp) ip_frag_reinit() argument
307 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { ip_frag_reinit()
308 atomic_inc(&qp->q.refcnt); ip_frag_reinit()
312 fp = qp->q.fragments; ip_frag_reinit()
320 sub_frag_mem_limit(qp->q.net, sum_truesize); ip_frag_reinit()
322 qp->q.flags = 0; ip_frag_reinit()
323 qp->q.len = 0; ip_frag_reinit()
324 qp->q.meat = 0; ip_frag_reinit()
325 qp->q.fragments = NULL; ip_frag_reinit()
326 qp->q.fragments_tail = NULL; ip_frag_reinit()
327 qp->iif = 0; ip_frag_reinit()
328 qp->ecn = 0; ip_frag_reinit()
334 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ip_frag_queue() argument
344 if (qp->q.flags & INET_FRAG_COMPLETE) ip_frag_queue()
348 unlikely(ip_frag_too_far(qp)) && ip_frag_queue()
349 unlikely(err = ip_frag_reinit(qp))) { ip_frag_queue()
350 ipq_kill(qp); ip_frag_queue()
370 if (end < qp->q.len || ip_frag_queue()
371 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) ip_frag_queue()
373 qp->q.flags |= INET_FRAG_LAST_IN; ip_frag_queue()
374 qp->q.len = end; ip_frag_queue()
381 if (end > qp->q.len) { ip_frag_queue()
383 if (qp->q.flags & INET_FRAG_LAST_IN) ip_frag_queue()
385 qp->q.len = end; ip_frag_queue()
403 prev = qp->q.fragments_tail; ip_frag_queue()
409 for (next = qp->q.fragments; next != NULL; next = next->next) { ip_frag_queue()
448 qp->q.meat -= i; ip_frag_queue()
463 qp->q.fragments = next; ip_frag_queue()
465 qp->q.meat -= free_it->len; ip_frag_queue()
466 sub_frag_mem_limit(qp->q.net, free_it->truesize); ip_frag_queue()
476 qp->q.fragments_tail = skb; ip_frag_queue()
480 qp->q.fragments = skb; ip_frag_queue()
484 qp->iif = dev->ifindex; ip_frag_queue()
487 qp->q.stamp = skb->tstamp; ip_frag_queue()
488 qp->q.meat += skb->len; ip_frag_queue()
489 qp->ecn |= ecn; ip_frag_queue()
490 add_frag_mem_limit(qp->q.net, skb->truesize); ip_frag_queue()
492 qp->q.flags |= INET_FRAG_FIRST_IN; ip_frag_queue()
496 if (fragsize > qp->q.max_size) ip_frag_queue()
497 qp->q.max_size = fragsize; ip_frag_queue()
500 fragsize > qp->max_df_size) ip_frag_queue()
501 qp->max_df_size = fragsize; ip_frag_queue()
503 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && ip_frag_queue()
504 qp->q.meat == qp->q.len) { ip_frag_queue()
508 err = ip_frag_reasm(qp, prev, dev); ip_frag_queue()
524 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, ip_frag_reasm() argument
527 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); ip_frag_reasm()
529 struct sk_buff *fp, *head = qp->q.fragments; ip_frag_reasm()
535 ipq_kill(qp); ip_frag_reasm()
537 ecn = ip_frag_ecn_table[qp->ecn]; ip_frag_reasm()
551 qp->q.fragments_tail = fp; ip_frag_reasm()
554 skb_morph(head, qp->q.fragments); ip_frag_reasm()
555 head->next = qp->q.fragments->next; ip_frag_reasm()
557 consume_skb(qp->q.fragments); ip_frag_reasm()
558 qp->q.fragments = head; ip_frag_reasm()
566 len = ihlen + qp->q.len; ip_frag_reasm()
597 add_frag_mem_limit(qp->q.net, clone->truesize); ip_frag_reasm()
612 sub_frag_mem_limit(qp->q.net, head->truesize); ip_frag_reasm()
616 head->tstamp = qp->q.stamp; ip_frag_reasm()
617 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); ip_frag_reasm()
631 if (qp->max_df_size == qp->q.max_size) { ip_frag_reasm()
641 qp->q.fragments = NULL; ip_frag_reasm()
642 qp->q.fragments_tail = NULL; ip_frag_reasm()
646 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); ip_frag_reasm()
650 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); ip_frag_reasm()
661 struct ipq *qp; ip_defrag() local
667 qp = ip_find(net, ip_hdr(skb), user, vif); ip_defrag()
668 if (qp) { ip_defrag()
671 spin_lock(&qp->q.lock); ip_defrag()
673 ret = ip_frag_queue(qp, skb); ip_defrag()
675 spin_unlock(&qp->q.lock); ip_defrag()
676 ipq_put(qp); ip_defrag()
H A Dinet_fragment.c331 struct inet_frag_queue *qp; inet_frag_intern() local
338 hlist_for_each_entry(qp, &hb->chain, list) { inet_frag_intern()
339 if (qp->net == nf && f->match(qp, arg)) { inet_frag_intern()
340 atomic_inc(&qp->refcnt); inet_frag_intern()
344 return qp; inet_frag_intern()
348 qp = qp_in; inet_frag_intern()
349 if (!mod_timer(&qp->timer, jiffies + nf->timeout)) inet_frag_intern()
350 atomic_inc(&qp->refcnt); inet_frag_intern()
352 atomic_inc(&qp->refcnt); inet_frag_intern()
353 hlist_add_head(&qp->list, &hb->chain); inet_frag_intern()
357 return qp; inet_frag_intern()
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_uverbs.c198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ehca_mmap_qp() argument
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); ehca_mmap_qp()
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); ehca_mmap_qp()
208 ehca_err(qp->ib_qp.device, ehca_mmap_qp()
210 ret, qp->ib_qp.qp_num); ehca_mmap_qp()
215 case 1: /* qp rqueue_addr */ ehca_mmap_qp()
216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); ehca_mmap_qp()
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, ehca_mmap_qp()
218 &qp->mm_count_rqueue); ehca_mmap_qp()
220 ehca_err(qp->ib_qp.device, ehca_mmap_qp()
222 ret, qp->ib_qp.qp_num); ehca_mmap_qp()
227 case 2: /* qp squeue_addr */ ehca_mmap_qp()
228 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); ehca_mmap_qp()
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, ehca_mmap_qp()
230 &qp->mm_count_squeue); ehca_mmap_qp()
232 ehca_err(qp->ib_qp.device, ehca_mmap_qp()
234 ret, qp->ib_qp.qp_num); ehca_mmap_qp()
240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x", ehca_mmap_qp()
241 rsrc_type, qp->ib_qp.qp_num); ehca_mmap_qp()
256 struct ehca_qp *qp; ehca_mmap() local
283 qp = idr_find(&ehca_qp_idr, idr_handle); ehca_mmap()
287 if (!qp) ehca_mmap()
290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject; ehca_mmap()
294 ret = ehca_mmap_qp(vma, qp, rsrc_type); ehca_mmap()
296 ehca_err(qp->ib_qp.device, ehca_mmap()
298 ret, qp->ib_qp.qp_num); ehca_mmap()
H A Dehca_reqs.c154 static inline int ehca_write_swqe(struct ehca_qp *qp, ehca_write_swqe() argument
164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; ehca_write_swqe()
167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { ehca_write_swqe()
170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); ehca_write_swqe()
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) ehca_write_swqe()
220 switch (qp->qp_type) { ehca_write_swqe()
228 remote_qkey = qp->qkey; ehca_write_swqe()
233 ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp); ehca_write_swqe()
237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); ehca_write_swqe()
255 if (qp->qp_type == IB_QPT_SMI || ehca_write_swqe()
256 qp->qp_type == IB_QPT_GSI) ehca_write_swqe()
258 if (qp->qp_type == IB_QPT_GSI) { ehca_write_swqe()
295 qp->message_count = qp->packet_count = 0; ehca_write_swqe()
296 qp->unsol_ack_circ = 1; ehca_write_swqe()
299 qp->packet_count += (dma_length >> qp->mtu_shift) + 1; ehca_write_swqe()
304 ehca_gen_err("Invalid qptype=%x", qp->qp_type); ehca_write_swqe()
309 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); ehca_write_swqe()
441 int ehca_post_send(struct ib_qp *qp, ehca_post_send() argument
445 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_post_send()
452 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", ehca_post_send()
453 my_qp->state, qp->qp_num); ehca_post_send()
476 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num); ehca_post_send()
494 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", ehca_post_send()
495 my_qp, qp->qp_num, wqe_cnt, ret); ehca_post_send()
584 int ehca_post_recv(struct ib_qp *qp, ehca_post_recv() argument
588 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_post_recv()
592 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", ehca_post_recv()
593 my_qp->state, qp->qp_num); ehca_post_recv()
598 return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr); ehca_post_recv()
652 struct ehca_qp *qp; ehca_poll_cq_one() local
656 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); ehca_poll_cq_one()
657 if (!qp) { ehca_poll_cq_one()
659 "could not find qp -> ignore cqe", ehca_poll_cq_one()
666 spin_lock_irqsave(&qp->spinlock_s, flags); ehca_poll_cq_one()
667 purgeflag = qp->sqerr_purgeflag; ehca_poll_cq_one()
668 spin_unlock_irqrestore(&qp->spinlock_s, flags); ehca_poll_cq_one()
682 qp->sqerr_purgeflag = 0; ehca_poll_cq_one()
706 wc->qp = &my_qp->ib_qp; ehca_poll_cq_one()
858 wc->qp = &my_qp->ib_qp; generate_flush_cqes()
H A Dehca_irq.c98 struct ehca_qp *qp = (struct ehca_qp *)data; print_error_data() local
106 qp->ib_qp.qp_num, resource); print_error_data()
179 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, dispatch_qp_event() argument
185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed) dispatch_qp_event()
191 if (qp->ext_type == EQPT_SRQ) { dispatch_qp_event()
192 if (!qp->ib_srq.event_handler) dispatch_qp_event()
195 event.element.srq = &qp->ib_srq; dispatch_qp_event()
196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); dispatch_qp_event()
198 if (!qp->ib_qp.event_handler) dispatch_qp_event()
201 event.element.qp = &qp->ib_qp; dispatch_qp_event()
202 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); dispatch_qp_event()
209 struct ehca_qp *qp; qp_event_callback() local
213 qp = idr_find(&ehca_qp_idr, token); qp_event_callback()
214 if (qp) qp_event_callback()
215 atomic_inc(&qp->nr_events); qp_event_callback()
218 if (!qp) qp_event_callback()
222 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); qp_event_callback()
224 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ? qp_event_callback()
232 if (fatal && qp->ext_type == EQPT_SRQBASE) qp_event_callback()
233 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); qp_event_callback()
235 if (atomic_dec_and_test(&qp->nr_events)) qp_event_callback()
236 wake_up(&qp->wait_completion); qp_event_callback()
H A Dehca_cq.c55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) ehca_cq_assign_qp() argument
57 unsigned int qp_num = qp->real_qp_num; ehca_cq_assign_qp()
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); ehca_cq_assign_qp()
76 struct ehca_qp *qp; ehca_cq_unassign_qp() local
81 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_unassign_qp()
82 if (qp->real_qp_num == real_qp_num) { ehca_cq_unassign_qp()
85 "removed qp from cq .cq_num=%x real_qp_num=%x", ehca_cq_unassign_qp()
94 "qp not found cq_num=%x real_qp_num=%x", ehca_cq_unassign_qp()
105 struct ehca_qp *qp; ehca_cq_get_qp() local
107 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_get_qp()
108 if (qp->real_qp_num == real_qp_num) { ehca_cq_get_qp()
109 ret = qp; ehca_cq_get_qp()
H A Dhipz_fns_core.h61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) hipz_update_sqa() argument
64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, hipz_update_sqa()
68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) hipz_update_rqa() argument
71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, hipz_update_rqa()
H A Dehca_iverbs.h104 int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
152 int ehca_destroy_qp(struct ib_qp *qp);
157 int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
160 int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
163 int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
184 int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
186 int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
206 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
H A Dehca_qp.c58 * attributes not supported by query qp
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
96 * returns ehca qp state corresponding to given ib qp state
123 * returns ib qp state corresponding to given ehca qp state
163 * returns ehca qp type corresponding to ib qp type
238 * ib qp type used by create_qp()
400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) ehca_add_to_err_list() argument
405 if (qp->ext_type == EQPT_LLQP) ehca_add_to_err_list()
409 list = &qp->send_cq->sqp_err_list; ehca_add_to_err_list()
410 node = &qp->sq_err_node; ehca_add_to_err_list()
412 list = &qp->recv_cq->rqp_err_list; ehca_add_to_err_list()
413 node = &qp->rq_err_node; ehca_add_to_err_list()
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); internal_create_qp()
651 ehca_err(pd->device, "Invalid number of qp"); internal_create_qp()
849 /* alloc array to cache subsequent modify qp parms internal_create_qp()
882 "Couldn't assign qp to send_cq ret=%i", ret); internal_create_qp()
1279 if (qp_cur_state == -EINVAL) { /* invalid qp state */ internal_modify_qp()
1335 "Invalid qp transition new_state=%x cur_state=%x " internal_modify_qp()
1346 ehca_err(ibqp->device, "Invalid new qp state=%x " internal_modify_qp()
1474 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: " internal_modify_qp()
1887 int ehca_query_qp(struct ib_qp *qp, ehca_query_qp() argument
1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_query_qp()
1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, ehca_query_qp()
1900 ehca_err(qp->device, "Invalid attribute mask " ehca_query_qp()
1902 my_qp, qp->qp_num, qp_attr_mask); ehca_query_qp()
1908 ehca_err(qp->device, "Out of memory for qpcb " ehca_query_qp()
1909 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ehca_query_qp()
1920 ehca_err(qp->device, "hipz_h_query_qp() failed " ehca_query_qp()
1922 my_qp, qp->qp_num, h_ret); ehca_query_qp()
1931 ehca_err(qp->device, "Got invalid ehca_qp_state=%x " ehca_query_qp()
1933 qpcb->qp_state, my_qp, qp->qp_num); ehca_query_qp()
2024 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); ehca_query_qp()
2159 ehca_err(dev, "Couldn't unassign qp from " internal_destroy_qp()
2228 int ehca_destroy_qp(struct ib_qp *qp) ehca_destroy_qp() argument
2230 return internal_destroy_qp(qp->device, ehca_destroy_qp()
2231 container_of(qp, struct ehca_qp, ib_qp), ehca_destroy_qp()
2232 qp->uobject); ehca_destroy_qp()
H A Dehca_classes.h153 /* struct to cache modify_qp()'s parms for GSI/SMI qp */
211 /* array to cache modify_qp()'s parms for GSI/SMI qp */
230 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
231 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
232 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
H A Dhcp_if.c583 struct ehca_qp *qp) hipz_h_destroy_qp()
588 ret = hcp_galpas_dtor(&qp->galpas); hipz_h_destroy_qp()
590 ehca_gen_err("Could not destruct qp->galpas"); hipz_h_destroy_qp()
597 qp->ipz_qp_handle.handle, /* r6 */ hipz_h_destroy_qp()
604 qp->ipz_qp_handle.handle, /* r5 */ hipz_h_destroy_qp()
582 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_qp *qp) hipz_h_destroy_qp() argument
H A Dhcp_if.h161 struct ehca_qp *qp);
/linux-4.4.14/drivers/staging/rdma/amso1100/
H A Dc2_qp.c120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) c2_set_qp_state() argument
124 pr_debug("%s: qp[%p] state modify %s --> %s\n", c2_set_qp_state()
126 qp, c2_set_qp_state()
127 to_ib_state_str(qp->state), c2_set_qp_state()
129 qp->state = new_state; c2_set_qp_state()
134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, c2_qp_modify() argument
144 pr_debug("%s:%d qp=%p, %s --> %s\n", c2_qp_modify()
146 qp, c2_qp_modify()
147 to_ib_state_str(qp->state), c2_qp_modify()
157 wr.qp_handle = qp->adapter_handle; c2_qp_modify()
173 spin_lock_irqsave(&qp->lock, flags); c2_qp_modify()
174 if (qp->cm_id && qp->state == IB_QPS_RTS) { c2_qp_modify()
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); c2_qp_modify()
178 vq_req->cm_id = qp->cm_id; c2_qp_modify()
181 spin_unlock_irqrestore(&qp->lock, flags); c2_qp_modify()
225 qp->state = next_state; c2_qp_modify()
235 spin_lock_irqsave(&qp->lock, flags); c2_qp_modify()
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { c2_qp_modify()
237 qp->cm_id->rem_ref(qp->cm_id); c2_qp_modify()
238 qp->cm_id = NULL; c2_qp_modify()
240 spin_unlock_irqrestore(&qp->lock, flags); c2_qp_modify()
246 pr_debug("%s:%d qp=%p, cur_state=%s\n", c2_qp_modify()
248 qp, c2_qp_modify()
249 to_ib_state_str(qp->state)); c2_qp_modify()
253 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, c2_qp_set_read_limits() argument
268 wr.qp_handle = qp->adapter_handle; c2_qp_set_read_limits()
302 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) destroy_qp() argument
324 wr.qp_handle = qp->adapter_handle; destroy_qp()
331 spin_lock_irqsave(&qp->lock, flags); destroy_qp()
332 if (qp->cm_id && qp->state == IB_QPS_RTS) { destroy_qp()
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); destroy_qp()
336 vq_req->qp = qp; destroy_qp()
337 vq_req->cm_id = qp->cm_id; destroy_qp()
340 spin_unlock_irqrestore(&qp->lock, flags); destroy_qp()
368 spin_lock_irqsave(&qp->lock, flags); destroy_qp()
369 if (qp->cm_id) { destroy_qp()
370 qp->cm_id->rem_ref(qp->cm_id); destroy_qp()
371 qp->cm_id = NULL; destroy_qp()
373 spin_unlock_irqrestore(&qp->lock, flags); destroy_qp()
381 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) c2_alloc_qpn() argument
388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); c2_alloc_qpn()
390 qp->qpn = ret; c2_alloc_qpn()
407 struct c2_qp *qp; c2_find_qpn() local
410 qp = idr_find(&c2dev->qp_table.idr, qpn); c2_find_qpn()
412 return qp; c2_find_qpn()
417 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) c2_alloc_qp()
429 err = c2_alloc_qpn(c2dev, qp); c2_alloc_qp()
432 qp->ibqp.qp_num = qp->qpn; c2_alloc_qp()
433 qp->ibqp.qp_type = IB_QPT_RC; c2_alloc_qp()
436 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, c2_alloc_qp()
437 &qp->sq_mq.shared_dma, GFP_KERNEL); c2_alloc_qp()
438 if (!qp->sq_mq.shared) { c2_alloc_qp()
443 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, c2_alloc_qp()
444 &qp->rq_mq.shared_dma, GFP_KERNEL); c2_alloc_qp()
445 if (!qp->rq_mq.shared) { c2_alloc_qp()
472 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); c2_alloc_qp()
473 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); c2_alloc_qp()
477 wr.user_context = (unsigned long) qp; c2_alloc_qp()
506 atomic_set(&qp->refcount, 1); c2_alloc_qp()
507 qp->adapter_handle = reply->qp_handle; c2_alloc_qp()
508 qp->state = IB_QPS_RESET; c2_alloc_qp()
509 qp->send_sgl_depth = qp_attrs->cap.max_send_sge; c2_alloc_qp()
510 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; c2_alloc_qp()
511 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; c2_alloc_qp()
512 init_waitqueue_head(&qp->wait); c2_alloc_qp()
525 c2_mq_req_init(&qp->sq_mq, c2_alloc_qp()
544 c2_mq_req_init(&qp->rq_mq, c2_alloc_qp()
558 iounmap(qp->sq_mq.peer); c2_alloc_qp()
560 destroy_qp(c2dev, qp); c2_alloc_qp()
566 c2_free_mqsp(qp->rq_mq.shared); c2_alloc_qp()
568 c2_free_mqsp(qp->sq_mq.shared); c2_alloc_qp()
570 c2_free_qpn(c2dev, qp->qpn); c2_alloc_qp()
600 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) c2_free_qp() argument
605 send_cq = to_c2cq(qp->ibqp.send_cq); c2_free_qp()
606 recv_cq = to_c2cq(qp->ibqp.recv_cq); c2_free_qp()
613 c2_free_qpn(c2dev, qp->qpn); c2_free_qp()
617 * Destroy qp in the rnic... c2_free_qp()
619 destroy_qp(c2dev, qp); c2_free_qp()
624 c2_cq_clean(c2dev, qp, send_cq->cqn); c2_free_qp()
626 c2_cq_clean(c2dev, qp, recv_cq->cqn); c2_free_qp()
631 iounmap(qp->sq_mq.peer); c2_free_qp()
632 iounmap(qp->rq_mq.peer); c2_free_qp()
633 c2_free_mqsp(qp->sq_mq.shared); c2_free_qp()
634 c2_free_mqsp(qp->rq_mq.shared); c2_free_qp()
636 atomic_dec(&qp->refcount); c2_free_qp()
637 wait_event(qp->wait, !atomic_read(&qp->refcount)); c2_free_qp()
754 * qp - ptr to user qp
762 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) qp_wr_post() argument
794 struct c2_qp *qp = to_c2qp(ibqp); c2_post_send() local
804 if (qp->state > IB_QPS_RTS) { c2_post_send()
837 if (ib_wr->num_sge > qp->send_sgl_depth) { c2_post_send()
855 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { c2_post_send()
922 spin_lock_irqsave(&qp->lock, lock_flags); c2_post_send()
923 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); c2_post_send()
925 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_send()
932 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); c2_post_send()
933 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_send()
948 struct c2_qp *qp = to_c2qp(ibqp); c2_post_receive() local
953 if (qp->state > IB_QPS_RTS) { c2_post_receive()
965 if (ib_wr->num_sge > qp->recv_sgl_depth) { c2_post_receive()
993 spin_lock_irqsave(&qp->lock, lock_flags); c2_post_receive()
994 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); c2_post_receive()
996 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_receive()
1003 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); c2_post_receive()
1004 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_receive()
415 c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) c2_alloc_qp() argument
H A Dc2_ae.c186 struct c2_qp *qp = resource_user_context; c2_ae_event() local
187 struct iw_cm_id *cm_id = qp->cm_id; c2_ae_event()
191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n", c2_ae_event()
192 qp); c2_ae_event()
205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); c2_ae_event()
220 spin_lock_irqsave(&qp->lock, flags); c2_ae_event()
221 if (qp->cm_id) { c2_ae_event()
222 qp->cm_id->rem_ref(qp->cm_id); c2_ae_event()
223 qp->cm_id = NULL; c2_ae_event()
225 spin_unlock_irqrestore(&qp->lock, flags); c2_ae_event()
235 ib_event.element.qp = &qp->ibqp; c2_ae_event()
238 if (qp->ibqp.event_handler) c2_ae_event()
239 qp->ibqp.event_handler(&ib_event, c2_ae_event()
240 qp->ibqp. c2_ae_event()
249 spin_lock_irqsave(&qp->lock, flags); c2_ae_event()
250 if (qp->cm_id) { c2_ae_event()
251 qp->cm_id->rem_ref(qp->cm_id); c2_ae_event()
252 qp->cm_id = NULL; c2_ae_event()
254 spin_unlock_irqrestore(&qp->lock, flags); c2_ae_event()
265 event_id, qp, cm_id); c2_ae_event()
H A Dc2_cm.c45 struct c2_qp *qp; c2_llp_connect() local
57 qp = to_c2qp(ibqp); c2_llp_connect()
60 cm_id->provider_data = qp; c2_llp_connect()
62 qp->cm_id = cm_id; c2_llp_connect()
74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); c2_llp_connect()
96 wr->qp_handle = qp->adapter_handle; c2_llp_connect()
129 qp->cm_id = NULL; c2_llp_connect()
291 struct c2_qp *qp; c2_llp_accept() local
301 qp = to_c2qp(ibqp); c2_llp_accept()
304 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); c2_llp_accept()
314 vq_req->qp = qp; c2_llp_accept()
329 wr->qp_handle = qp->adapter_handle; c2_llp_accept()
332 cm_id->provider_data = qp; c2_llp_accept()
334 qp->cm_id = cm_id; c2_llp_accept()
336 cm_id->provider_data = qp; c2_llp_accept()
377 c2_set_qp_state(qp, C2_QP_STATE_RTS); c2_llp_accept()
388 qp->cm_id = NULL; c2_llp_accept()
H A Dc2_provider.c207 struct c2_qp *qp; c2_add_ref() local
209 qp = to_c2qp(ibqp); c2_add_ref()
210 atomic_inc(&qp->refcount); c2_add_ref()
215 struct c2_qp *qp; c2_rem_ref() local
217 qp = to_c2qp(ibqp); c2_rem_ref()
218 if (atomic_dec_and_test(&qp->refcount)) c2_rem_ref()
219 wake_up(&qp->wait); c2_rem_ref()
225 struct c2_qp *qp; c2_get_qp() local
227 qp = c2_find_qpn(c2dev, qpn); c2_get_qp()
229 __func__, qp, qpn, device, c2_get_qp()
230 (qp?atomic_read(&qp->refcount):0)); c2_get_qp()
232 return (qp?&qp->ibqp:NULL); c2_get_qp()
239 struct c2_qp *qp; c2_create_qp() local
249 qp = kzalloc(sizeof(*qp), GFP_KERNEL); c2_create_qp()
250 if (!qp) { c2_create_qp()
254 spin_lock_init(&qp->lock); c2_create_qp()
260 to_c2pd(pd), init_attr, qp); c2_create_qp()
274 kfree(qp); c2_create_qp()
278 return &qp->ibqp; c2_create_qp()
283 struct c2_qp *qp = to_c2qp(ib_qp); c2_destroy_qp() local
285 pr_debug("%s:%u qp=%p,qp->state=%d\n", c2_destroy_qp()
286 __func__, __LINE__, ib_qp, qp->state); c2_destroy_qp()
287 c2_free_qp(to_c2dev(ib_qp->device), qp); c2_destroy_qp()
288 kfree(qp); c2_destroy_qp()
H A Dc2_cq.c82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) c2_cq_clean() argument
100 if (msg->qp_user_context == (u64) (unsigned long) qp) { c2_cq_clean()
135 struct c2_qp *qp; c2_poll_one() local
144 * if the qp returned is null then this qp has already c2_poll_one()
148 while ((qp = c2_poll_one()
158 entry->qp = &qp->ibqp; c2_poll_one()
190 c2_mq_lconsume(&qp->rq_mq, 1); c2_poll_one()
192 c2_mq_lconsume(&qp->sq_mq, c2_poll_one()
H A Dc2.h489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
490 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
492 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
494 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
516 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
H A Dc2_vq.h47 struct c2_qp *qp; member in struct:c2_vq_req
H A Dc2_intr.c185 c2_set_qp_state(req->qp, handle_vq()
H A Dc2_vq.c113 r->qp = NULL; vq_req_alloc()
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c1196 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_add_qpn_map() argument
1200 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { ocrdma_add_qpn_map()
1201 dev->qp_tbl[qp->id] = qp; ocrdma_add_qpn_map()
1207 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_del_qpn_map() argument
1209 dev->qp_tbl[qp->id] = NULL; ocrdma_del_qpn_map()
1219 pr_err("%s(%d) unsupported qp type=0x%x requested\n", ocrdma_check_qp_params()
1285 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, ocrdma_copy_qp_uresp() argument
1292 struct ocrdma_pd *pd = qp->pd; ocrdma_copy_qp_uresp()
1298 uresp.qp_id = qp->id; ocrdma_copy_qp_uresp()
1299 uresp.sq_dbid = qp->sq.dbid; ocrdma_copy_qp_uresp()
1301 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); ocrdma_copy_qp_uresp()
1302 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); ocrdma_copy_qp_uresp()
1303 uresp.num_wqe_allocated = qp->sq.max_cnt; ocrdma_copy_qp_uresp()
1305 uresp.rq_dbid = qp->rq.dbid; ocrdma_copy_qp_uresp()
1307 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); ocrdma_copy_qp_uresp()
1308 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); ocrdma_copy_qp_uresp()
1309 uresp.num_rqe_allocated = qp->rq.max_cnt; ocrdma_copy_qp_uresp()
1317 if (qp->dpp_enabled) { ocrdma_copy_qp_uresp()
1344 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_set_qp_db() argument
1348 qp->sq_db = dev->nic_info.db + ocrdma_set_qp_db()
1351 qp->rq_db = dev->nic_info.db + ocrdma_set_qp_db()
1355 qp->sq_db = dev->nic_info.db + ocrdma_set_qp_db()
1358 qp->rq_db = dev->nic_info.db + ocrdma_set_qp_db()
1364 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) ocrdma_alloc_wr_id_tbl() argument
1366 qp->wqe_wr_id_tbl = ocrdma_alloc_wr_id_tbl()
1367 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, ocrdma_alloc_wr_id_tbl()
1369 if (qp->wqe_wr_id_tbl == NULL) ocrdma_alloc_wr_id_tbl()
1371 qp->rqe_wr_id_tbl = ocrdma_alloc_wr_id_tbl()
1372 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); ocrdma_alloc_wr_id_tbl()
1373 if (qp->rqe_wr_id_tbl == NULL) ocrdma_alloc_wr_id_tbl()
1379 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, ocrdma_set_qp_init_params() argument
1383 qp->pd = pd; ocrdma_set_qp_init_params()
1384 spin_lock_init(&qp->q_lock); ocrdma_set_qp_init_params()
1385 INIT_LIST_HEAD(&qp->sq_entry); ocrdma_set_qp_init_params()
1386 INIT_LIST_HEAD(&qp->rq_entry); ocrdma_set_qp_init_params()
1388 qp->qp_type = attrs->qp_type; ocrdma_set_qp_init_params()
1389 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; ocrdma_set_qp_init_params()
1390 qp->max_inline_data = attrs->cap.max_inline_data; ocrdma_set_qp_init_params()
1391 qp->sq.max_sges = attrs->cap.max_send_sge; ocrdma_set_qp_init_params()
1392 qp->rq.max_sges = attrs->cap.max_recv_sge; ocrdma_set_qp_init_params()
1393 qp->state = OCRDMA_QPS_RST; ocrdma_set_qp_init_params()
1394 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; ocrdma_set_qp_init_params()
1413 struct ocrdma_qp *qp; ocrdma_create_qp() local
1427 qp = kzalloc(sizeof(*qp), GFP_KERNEL); ocrdma_create_qp()
1428 if (!qp) { ocrdma_create_qp()
1432 ocrdma_set_qp_init_params(qp, pd, attrs); ocrdma_create_qp()
1434 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | ocrdma_create_qp()
1438 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, ocrdma_create_qp()
1446 status = ocrdma_alloc_wr_id_tbl(qp); ocrdma_create_qp()
1451 status = ocrdma_add_qpn_map(dev, qp); ocrdma_create_qp()
1454 ocrdma_set_qp_db(dev, qp, pd); ocrdma_create_qp()
1456 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, ocrdma_create_qp()
1463 qp->ibqp.qp_num = qp->id; ocrdma_create_qp()
1465 return &qp->ibqp; ocrdma_create_qp()
1468 ocrdma_del_qpn_map(dev, qp); ocrdma_create_qp()
1470 ocrdma_mbx_destroy_qp(dev, qp); ocrdma_create_qp()
1473 kfree(qp->wqe_wr_id_tbl); ocrdma_create_qp()
1474 kfree(qp->rqe_wr_id_tbl); ocrdma_create_qp()
1475 kfree(qp); ocrdma_create_qp()
1485 struct ocrdma_qp *qp; _ocrdma_modify_qp() local
1489 qp = get_ocrdma_qp(ibqp); _ocrdma_modify_qp()
1492 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); _ocrdma_modify_qp()
1498 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); _ocrdma_modify_qp()
1508 struct ocrdma_qp *qp; ocrdma_modify_qp() local
1512 qp = get_ocrdma_qp(ibqp); ocrdma_modify_qp()
1518 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_modify_qp()
1519 old_qps = get_ibqp_state(qp->state); ocrdma_modify_qp()
1524 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_modify_qp()
1530 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, ocrdma_modify_qp()
1579 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_query_qp() local
1584 status = ocrdma_mbx_query_qp(dev, qp, &params); ocrdma_query_qp()
1588 if (qp->qp_type == IB_QPT_UD) ocrdma_query_qp()
1600 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); ocrdma_query_qp()
1601 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; ocrdma_query_qp()
1602 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; ocrdma_query_qp()
1603 qp_attr->cap.max_send_sge = qp->sq.max_sges; ocrdma_query_qp()
1604 qp_attr->cap.max_recv_sge = qp->rq.max_sges; ocrdma_query_qp()
1605 qp_attr->cap.max_inline_data = qp->max_inline_data; ocrdma_query_qp()
1611 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; ocrdma_query_qp()
1654 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); ocrdma_query_qp()
1672 static int is_hw_sq_empty(struct ocrdma_qp *qp) is_hw_sq_empty() argument
1674 return (qp->sq.tail == qp->sq.head); is_hw_sq_empty()
1677 static int is_hw_rq_empty(struct ocrdma_qp *qp) is_hw_rq_empty() argument
1679 return (qp->rq.tail == qp->rq.head); is_hw_rq_empty()
1704 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) ocrdma_discard_cqes() argument
1716 * find the matching CQE for a given qp, ocrdma_discard_cqes()
1726 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) ocrdma_discard_cqes()
1736 /* check for matching qp */ ocrdma_discard_cqes()
1737 if (qpn == 0 || qpn != qp->id) ocrdma_discard_cqes()
1741 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_discard_cqes()
1743 if (qp->srq) { ocrdma_discard_cqes()
1746 qp->srq->rq.max_wqe_idx; ocrdma_discard_cqes()
1749 spin_lock_irqsave(&qp->srq->q_lock, flags); ocrdma_discard_cqes()
1750 ocrdma_hwq_inc_tail(&qp->srq->rq); ocrdma_discard_cqes()
1751 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); ocrdma_discard_cqes()
1752 spin_unlock_irqrestore(&qp->srq->q_lock, flags); ocrdma_discard_cqes()
1755 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_discard_cqes()
1769 void ocrdma_del_flush_qp(struct ocrdma_qp *qp) ocrdma_del_flush_qp() argument
1773 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_del_flush_qp()
1777 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); ocrdma_del_flush_qp()
1779 list_del(&qp->sq_entry); ocrdma_del_flush_qp()
1780 if (!qp->srq) { ocrdma_del_flush_qp()
1781 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); ocrdma_del_flush_qp()
1783 list_del(&qp->rq_entry); ocrdma_del_flush_qp()
1791 struct ocrdma_qp *qp; ocrdma_destroy_qp() local
1797 qp = get_ocrdma_qp(ibqp); ocrdma_destroy_qp()
1800 pd = qp->pd; ocrdma_destroy_qp()
1803 if (qp->state != OCRDMA_QPS_RST) { ocrdma_destroy_qp()
1813 (void) ocrdma_mbx_destroy_qp(dev, qp); ocrdma_destroy_qp()
1819 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); ocrdma_destroy_qp()
1820 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) ocrdma_destroy_qp()
1821 spin_lock(&qp->rq_cq->cq_lock); ocrdma_destroy_qp()
1823 ocrdma_del_qpn_map(dev, qp); ocrdma_destroy_qp()
1825 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) ocrdma_destroy_qp()
1826 spin_unlock(&qp->rq_cq->cq_lock); ocrdma_destroy_qp()
1827 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); ocrdma_destroy_qp()
1830 ocrdma_discard_cqes(qp, qp->sq_cq); ocrdma_destroy_qp()
1831 ocrdma_discard_cqes(qp, qp->rq_cq); ocrdma_destroy_qp()
1836 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, ocrdma_destroy_qp()
1837 PAGE_ALIGN(qp->sq.len)); ocrdma_destroy_qp()
1838 if (!qp->srq) ocrdma_destroy_qp()
1839 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, ocrdma_destroy_qp()
1840 PAGE_ALIGN(qp->rq.len)); ocrdma_destroy_qp()
1843 ocrdma_del_flush_qp(qp); ocrdma_destroy_qp()
1845 kfree(qp->wqe_wr_id_tbl); ocrdma_destroy_qp()
1846 kfree(qp->rqe_wr_id_tbl); ocrdma_destroy_qp()
1847 kfree(qp); ocrdma_destroy_qp()
1995 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, ocrdma_build_ud_hdr() argument
2004 if (qp->qp_type == IB_QPT_GSI) ocrdma_build_ud_hdr()
2005 ud_hdr->qkey = qp->qkey; ocrdma_build_ud_hdr()
2040 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, ocrdma_build_inline_sges() argument
2048 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { ocrdma_build_inline_sges()
2050 if (unlikely(hdr->total_len > qp->max_inline_data)) { ocrdma_build_inline_sges()
2053 qp->max_inline_data, hdr->total_len); ocrdma_build_inline_sges()
2080 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_send() argument
2087 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_build_send()
2088 ocrdma_build_ud_hdr(qp, hdr, wr); ocrdma_build_send()
2095 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); ocrdma_build_send()
2099 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_write() argument
2107 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); ocrdma_build_write()
2117 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_read() argument
2146 static int ocrdma_build_reg(struct ocrdma_qp *qp, ocrdma_build_reg() argument
2202 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) ocrdma_ring_sq_db() argument
2204 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); ocrdma_ring_sq_db()
2206 iowrite32(val, qp->sq_db); ocrdma_ring_sq_db()
2213 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_post_send() local
2217 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_post_send()
2218 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { ocrdma_post_send()
2219 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_send()
2225 if (qp->qp_type == IB_QPT_UD && ocrdma_post_send()
2232 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || ocrdma_post_send()
2233 wr->num_sge > qp->sq.max_sges) { ocrdma_post_send()
2238 hdr = ocrdma_hwq_head(&qp->sq); ocrdma_post_send()
2240 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) ocrdma_post_send()
2255 ocrdma_build_send(qp, hdr, wr); ocrdma_post_send()
2261 status = ocrdma_build_send(qp, hdr, wr); ocrdma_post_send()
2268 status = ocrdma_build_write(qp, hdr, wr); ocrdma_post_send()
2271 ocrdma_build_read(qp, hdr, wr); ocrdma_post_send()
2282 status = ocrdma_build_reg(qp, hdr, reg_wr(wr)); ocrdma_post_send()
2292 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) ocrdma_post_send()
2293 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; ocrdma_post_send()
2295 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; ocrdma_post_send()
2296 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; ocrdma_post_send()
2302 ocrdma_ring_sq_db(qp); ocrdma_post_send()
2305 ocrdma_hwq_inc_head(&qp->sq); ocrdma_post_send()
2308 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_send()
2312 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) ocrdma_ring_rq_db() argument
2314 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); ocrdma_ring_rq_db()
2316 iowrite32(val, qp->rq_db); ocrdma_ring_rq_db()
2345 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_post_recv() local
2348 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_post_recv()
2349 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { ocrdma_post_recv()
2350 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_recv()
2355 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || ocrdma_post_recv()
2356 wr->num_sge > qp->rq.max_sges) { ocrdma_post_recv()
2361 rqe = ocrdma_hwq_head(&qp->rq); ocrdma_post_recv()
2364 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; ocrdma_post_recv()
2369 ocrdma_ring_rq_db(qp); ocrdma_post_recv()
2372 ocrdma_hwq_inc_head(&qp->rq); ocrdma_post_recv()
2375 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_recv()
2523 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, ocrdma_update_wc() argument
2530 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); ocrdma_update_wc()
2532 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; ocrdma_update_wc()
2561 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, ocrdma_set_cqe_status_flushed() argument
2573 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_set_cqe_status_flushed()
2594 struct ocrdma_qp *qp, int status) ocrdma_update_err_cqe()
2599 ibwc->qp = &qp->ibqp; ocrdma_update_err_cqe()
2602 ocrdma_flush_qp(qp); ocrdma_update_err_cqe()
2603 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); ocrdma_update_err_cqe()
2608 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { ocrdma_update_err_cqe()
2610 ocrdma_set_cqe_status_flushed(qp, cqe); ocrdma_update_err_cqe()
2616 struct ocrdma_qp *qp, int status) ocrdma_update_err_rcqe()
2619 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_update_err_rcqe()
2620 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_update_err_rcqe()
2622 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); ocrdma_update_err_rcqe()
2626 struct ocrdma_qp *qp, int status) ocrdma_update_err_scqe()
2628 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_update_err_scqe()
2629 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_update_err_scqe()
2631 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); ocrdma_update_err_scqe()
2635 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, ocrdma_poll_err_scqe() argument
2640 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_poll_err_scqe()
2649 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { ocrdma_poll_err_scqe()
2653 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { ocrdma_poll_err_scqe()
2656 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); ocrdma_poll_err_scqe()
2667 } else if (is_hw_sq_empty(qp)) { ocrdma_poll_err_scqe()
2674 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); ocrdma_poll_err_scqe()
2679 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, ocrdma_poll_success_scqe() argument
2684 int tail = qp->sq.tail; ocrdma_poll_success_scqe()
2687 if (!qp->wqe_wr_id_tbl[tail].signaled) { ocrdma_poll_success_scqe()
2692 ibwc->qp = &qp->ibqp; ocrdma_poll_success_scqe()
2693 ocrdma_update_wc(qp, ibwc, tail); ocrdma_poll_success_scqe()
2697 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; ocrdma_poll_success_scqe()
2701 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_poll_success_scqe()
2705 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_scqe() argument
2715 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); ocrdma_poll_scqe()
2717 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); ocrdma_poll_scqe()
2739 struct ocrdma_qp *qp) ocrdma_update_free_srq_cqe()
2745 srq = get_ocrdma_srq(qp->ibqp.srq); ocrdma_update_free_srq_cqe()
2758 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_err_rcqe() argument
2763 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_poll_err_rcqe()
2771 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { ocrdma_poll_err_rcqe()
2772 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { ocrdma_poll_err_rcqe()
2775 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); ocrdma_poll_err_rcqe()
2781 } else if (is_hw_rq_empty(qp)) { ocrdma_poll_err_rcqe()
2788 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); ocrdma_poll_err_rcqe()
2793 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, ocrdma_poll_success_rcqe() argument
2797 ibwc->qp = &qp->ibqp; ocrdma_poll_success_rcqe()
2800 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) ocrdma_poll_success_rcqe()
2816 if (qp->ibqp.srq) { ocrdma_poll_success_rcqe()
2817 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); ocrdma_poll_success_rcqe()
2819 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_poll_success_rcqe()
2820 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_poll_success_rcqe()
2824 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_rcqe() argument
2831 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_poll_rcqe()
2842 ocrdma_poll_success_rcqe(qp, cqe, ibwc); ocrdma_poll_rcqe()
2844 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, ocrdma_poll_rcqe()
2869 struct ocrdma_qp *qp = NULL; ocrdma_poll_hwcq() local
2884 qp = dev->qp_tbl[qpn]; ocrdma_poll_hwcq()
2885 BUG_ON(qp == NULL); ocrdma_poll_hwcq()
2888 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, ocrdma_poll_hwcq()
2891 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, ocrdma_poll_hwcq()
2926 struct ocrdma_qp *qp, struct ib_wc *ibwc) ocrdma_add_err_cqe()
2931 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) ocrdma_add_err_cqe()
2933 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { ocrdma_add_err_cqe()
2934 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_add_err_cqe()
2935 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_add_err_cqe()
2936 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { ocrdma_add_err_cqe()
2937 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_add_err_cqe()
2938 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_add_err_cqe()
2957 struct ocrdma_qp *qp; ocrdma_poll_cq() local
2968 /* adapter returns single error cqe when qp moves to ocrdma_poll_cq()
2974 list_for_each_entry(qp, &cq->sq_head, sq_entry) { ocrdma_poll_cq()
2977 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); ocrdma_poll_cq()
2593 ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_cqe() argument
2615 ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_rcqe() argument
2625 ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_scqe() argument
2737 ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp) ocrdma_update_free_srq_cqe() argument
2925 ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, struct ocrdma_qp *qp, struct ib_wc *ibwc) ocrdma_add_err_cqe() argument
H A Docrdma_hw.c662 struct ocrdma_qp *qp) ocrdma_process_qpcat_error()
667 if (qp == NULL) ocrdma_process_qpcat_error()
669 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps); ocrdma_process_qpcat_error()
675 struct ocrdma_qp *qp = NULL; ocrdma_dispatch_ibevent() local
688 * Some FW version returns wrong qp or cq ids in CQEs. ocrdma_dispatch_ibevent()
694 qp = dev->qp_tbl[qpid]; ocrdma_dispatch_ibevent()
695 if (qp == NULL) { ocrdma_dispatch_ibevent()
730 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent()
732 ocrdma_process_qpcat_error(dev, qp); ocrdma_dispatch_ibevent()
735 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent()
739 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent()
743 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent()
753 ib_evt.element.srq = &qp->srq->ibsrq; ocrdma_dispatch_ibevent()
759 ib_evt.element.srq = &qp->srq->ibsrq; ocrdma_dispatch_ibevent()
765 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent()
781 if (qp->ibqp.event_handler) ocrdma_dispatch_ibevent()
782 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); ocrdma_dispatch_ibevent()
787 if (qp->srq->ibsrq.event_handler) ocrdma_dispatch_ibevent()
788 qp->srq->ibsrq.event_handler(&ib_evt, ocrdma_dispatch_ibevent()
789 qp->srq->ibsrq. ocrdma_dispatch_ibevent()
902 struct ocrdma_qp *qp; _ocrdma_qp_buddy_cq_handler() local
909 qp = list_entry(cur, struct ocrdma_qp, sq_entry); list_for_each()
911 qp = list_entry(cur, struct ocrdma_qp, rq_entry); list_for_each()
913 if (qp->srq) list_for_each()
918 if (qp->sq_cq == qp->rq_cq) list_for_each()
923 if (qp->sq_cq == cq) list_for_each()
924 bcq = qp->rq_cq; list_for_each()
926 bcq = qp->sq_cq; list_for_each()
2085 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) ocrdma_is_qp_in_sq_flushlist() argument
2090 if (qp == tmp) { ocrdma_is_qp_in_sq_flushlist()
2098 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) ocrdma_is_qp_in_rq_flushlist() argument
2103 if (qp == tmp) { ocrdma_is_qp_in_rq_flushlist()
2111 void ocrdma_flush_qp(struct ocrdma_qp *qp) ocrdma_flush_qp() argument
2115 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_flush_qp()
2118 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); ocrdma_flush_qp()
2120 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); ocrdma_flush_qp()
2121 if (!qp->srq) { ocrdma_flush_qp()
2122 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); ocrdma_flush_qp()
2124 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); ocrdma_flush_qp()
2129 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) ocrdma_init_hwq_ptr() argument
2131 qp->sq.head = 0; ocrdma_init_hwq_ptr()
2132 qp->sq.tail = 0; ocrdma_init_hwq_ptr()
2133 qp->rq.head = 0; ocrdma_init_hwq_ptr()
2134 qp->rq.tail = 0; ocrdma_init_hwq_ptr()
2137 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, ocrdma_qp_state_change() argument
2146 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_qp_state_change()
2149 *old_ib_state = get_ibqp_state(qp->state); ocrdma_qp_state_change()
2150 if (new_state == qp->state) { ocrdma_qp_state_change()
2151 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_qp_state_change()
2157 ocrdma_init_hwq_ptr(qp); ocrdma_qp_state_change()
2158 ocrdma_del_flush_qp(qp); ocrdma_qp_state_change()
2160 ocrdma_flush_qp(qp); ocrdma_qp_state_change()
2163 qp->state = new_state; ocrdma_qp_state_change()
2165 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_qp_state_change()
2169 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) ocrdma_set_create_qp_mbx_access_flags() argument
2172 if (qp->cap_flags & OCRDMA_QP_INB_RD) ocrdma_set_create_qp_mbx_access_flags()
2174 if (qp->cap_flags & OCRDMA_QP_INB_WR) ocrdma_set_create_qp_mbx_access_flags()
2176 if (qp->cap_flags & OCRDMA_QP_MW_BIND) ocrdma_set_create_qp_mbx_access_flags()
2178 if (qp->cap_flags & OCRDMA_QP_LKEY0) ocrdma_set_create_qp_mbx_access_flags()
2180 if (qp->cap_flags & OCRDMA_QP_FAST_REG) ocrdma_set_create_qp_mbx_access_flags()
2187 struct ocrdma_qp *qp) ocrdma_set_create_qp_sq_cmd()
2192 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_sq_cmd()
2209 qp->sq.max_cnt = max_wqe_allocated; ocrdma_set_create_qp_sq_cmd()
2212 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); ocrdma_set_create_qp_sq_cmd()
2213 if (!qp->sq.va) ocrdma_set_create_qp_sq_cmd()
2215 memset(qp->sq.va, 0, len); ocrdma_set_create_qp_sq_cmd()
2216 qp->sq.len = len; ocrdma_set_create_qp_sq_cmd()
2217 qp->sq.pa = pa; ocrdma_set_create_qp_sq_cmd()
2218 qp->sq.entry_size = dev->attr.wqe_size; ocrdma_set_create_qp_sq_cmd()
2232 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << ocrdma_set_create_qp_sq_cmd()
2243 struct ocrdma_qp *qp) ocrdma_set_create_qp_rq_cmd()
2248 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_rq_cmd()
2260 qp->rq.max_cnt = max_rqe_allocated; ocrdma_set_create_qp_rq_cmd()
2263 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); ocrdma_set_create_qp_rq_cmd()
2264 if (!qp->rq.va) ocrdma_set_create_qp_rq_cmd()
2266 memset(qp->rq.va, 0, len); ocrdma_set_create_qp_rq_cmd()
2267 qp->rq.pa = pa; ocrdma_set_create_qp_rq_cmd()
2268 qp->rq.len = len; ocrdma_set_create_qp_rq_cmd()
2269 qp->rq.entry_size = dev->attr.rqe_size; ocrdma_set_create_qp_rq_cmd()
2280 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << ocrdma_set_create_qp_rq_cmd()
2291 struct ocrdma_qp *qp, ocrdma_set_create_qp_dpp_cmd()
2295 qp->dpp_enabled = true; ocrdma_set_create_qp_dpp_cmd()
2306 struct ocrdma_qp *qp) ocrdma_set_create_qp_ird_cmd()
2308 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_ird_cmd()
2320 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, ocrdma_set_create_qp_ird_cmd()
2322 if (!qp->ird_q_va) ocrdma_set_create_qp_ird_cmd()
2324 memset(qp->ird_q_va, 0, ird_q_len); ocrdma_set_create_qp_ird_cmd()
2328 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va + ocrdma_set_create_qp_ird_cmd()
2340 struct ocrdma_qp *qp, ocrdma_get_create_qp_rsp()
2345 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK; ocrdma_get_create_qp_rsp()
2346 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK; ocrdma_get_create_qp_rsp()
2347 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; ocrdma_get_create_qp_rsp()
2348 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK; ocrdma_get_create_qp_rsp()
2349 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT); ocrdma_get_create_qp_rsp()
2350 qp->dpp_enabled = false; ocrdma_get_create_qp_rsp()
2352 qp->dpp_enabled = true; ocrdma_get_create_qp_rsp()
2365 qp->sq.max_cnt = max_wqe_allocated; ocrdma_get_create_qp_rsp()
2366 qp->sq.max_wqe_idx = max_wqe_allocated - 1; ocrdma_get_create_qp_rsp()
2369 qp->rq.max_cnt = max_rqe_allocated; ocrdma_get_create_qp_rsp()
2370 qp->rq.max_wqe_idx = max_rqe_allocated - 1; ocrdma_get_create_qp_rsp()
2374 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, ocrdma_mbx_create_qp() argument
2380 struct ocrdma_pd *pd = qp->pd; ocrdma_mbx_create_qp()
2407 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp); ocrdma_mbx_create_qp()
2415 qp->srq = srq; ocrdma_mbx_create_qp()
2417 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp); ocrdma_mbx_create_qp()
2422 status = ocrdma_set_create_qp_ird_cmd(cmd, qp); ocrdma_mbx_create_qp()
2429 flags = ocrdma_set_create_qp_mbx_access_flags(qp); ocrdma_mbx_create_qp()
2441 qp->sq_cq = cq; ocrdma_mbx_create_qp()
2445 qp->rq_cq = cq; ocrdma_mbx_create_qp()
2449 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, ocrdma_mbx_create_qp()
2457 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt); ocrdma_mbx_create_qp()
2458 qp->state = OCRDMA_QPS_RST; ocrdma_mbx_create_qp()
2462 if (qp->rq.va) ocrdma_mbx_create_qp()
2463 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); ocrdma_mbx_create_qp()
2466 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_create_qp()
2473 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_mbx_query_qp() argument
2483 cmd->qp_id = qp->id; ocrdma_mbx_query_qp()
2494 static int ocrdma_set_av_params(struct ocrdma_qp *qp, ocrdma_set_av_params() argument
2505 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_set_av_params()
2534 qp->sgid_idx = ah_attr->grh.sgid_index; ocrdma_set_av_params()
2565 static int ocrdma_set_qp_params(struct ocrdma_qp *qp, ocrdma_set_qp_params() argument
2570 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_set_qp_params()
2578 qp->qkey = attrs->qkey; ocrdma_set_qp_params()
2583 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask); ocrdma_set_qp_params()
2586 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { ocrdma_set_qp_params()
2656 qp->max_ord = attrs->max_rd_atomic; ocrdma_set_qp_params()
2664 qp->max_ird = attrs->max_dest_rd_atomic; ocrdma_set_qp_params()
2667 cmd->params.max_ord_ird = (qp->max_ord << ocrdma_set_qp_params()
2669 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK); ocrdma_set_qp_params()
2674 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_mbx_modify_qp() argument
2684 cmd->params.id = qp->id; ocrdma_mbx_modify_qp()
2694 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & ocrdma_mbx_modify_qp()
2698 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask); ocrdma_mbx_modify_qp()
2710 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_mbx_destroy_qp() argument
2719 cmd->qp_id = qp->id; ocrdma_mbx_destroy_qp()
2726 if (qp->sq.va) ocrdma_mbx_destroy_qp()
2727 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_destroy_qp()
2728 if (!qp->srq && qp->rq.va) ocrdma_mbx_destroy_qp()
2729 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); ocrdma_mbx_destroy_qp()
2730 if (qp->dpp_enabled) ocrdma_mbx_destroy_qp()
2731 qp->pd->num_dpp_qp++; ocrdma_mbx_destroy_qp()
661 ocrdma_process_qpcat_error(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_process_qpcat_error() argument
2185 ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, struct ib_qp_init_attr *attrs, struct ocrdma_qp *qp) ocrdma_set_create_qp_sq_cmd() argument
2241 ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, struct ib_qp_init_attr *attrs, struct ocrdma_qp *qp) ocrdma_set_create_qp_rq_cmd() argument
2289 ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_pd *pd, struct ocrdma_qp *qp, u8 enable_dpp_cq, u16 dpp_cq_id) ocrdma_set_create_qp_dpp_cmd() argument
2305 ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_qp *qp) ocrdma_set_create_qp_ird_cmd() argument
2339 ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, u16 *dpp_offset, u16 *dpp_credit_lmt) ocrdma_get_create_qp_rsp() argument
H A Docrdma_verbs.h107 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
/linux-4.4.14/drivers/infiniband/core/
H A Dverbs.c536 struct ib_qp *qp = context; __ib_shared_qp_event_handler() local
539 spin_lock_irqsave(&qp->device->event_handler_lock, flags); __ib_shared_qp_event_handler()
540 list_for_each_entry(event->element.qp, &qp->open_list, open_list) __ib_shared_qp_event_handler()
541 if (event->element.qp->event_handler) __ib_shared_qp_event_handler()
542 event->element.qp->event_handler(event, event->element.qp->qp_context); __ib_shared_qp_event_handler()
543 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); __ib_shared_qp_event_handler()
546 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) __ib_insert_xrcd_qp() argument
549 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); __ib_insert_xrcd_qp()
557 struct ib_qp *qp; __ib_open_qp() local
560 qp = kzalloc(sizeof *qp, GFP_KERNEL); __ib_open_qp()
561 if (!qp) __ib_open_qp()
564 qp->real_qp = real_qp; __ib_open_qp()
566 qp->device = real_qp->device; __ib_open_qp()
567 qp->event_handler = event_handler; __ib_open_qp()
568 qp->qp_context = qp_context; __ib_open_qp()
569 qp->qp_num = real_qp->qp_num; __ib_open_qp()
570 qp->qp_type = real_qp->qp_type; __ib_open_qp()
573 list_add(&qp->open_list, &real_qp->open_list); __ib_open_qp()
576 return qp; __ib_open_qp()
582 struct ib_qp *qp, *real_qp; ib_open_qp() local
587 qp = ERR_PTR(-EINVAL); ib_open_qp()
591 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, ib_open_qp()
597 return qp; ib_open_qp()
604 struct ib_qp *qp, *real_qp; ib_create_qp() local
608 qp = device->create_qp(pd, qp_init_attr, NULL); ib_create_qp()
610 if (!IS_ERR(qp)) { ib_create_qp()
611 qp->device = device; ib_create_qp()
612 qp->real_qp = qp; ib_create_qp()
613 qp->uobject = NULL; ib_create_qp()
614 qp->qp_type = qp_init_attr->qp_type; ib_create_qp()
616 atomic_set(&qp->usecnt, 0); ib_create_qp()
618 qp->event_handler = __ib_shared_qp_event_handler; ib_create_qp()
619 qp->qp_context = qp; ib_create_qp()
620 qp->pd = NULL; ib_create_qp()
621 qp->send_cq = qp->recv_cq = NULL; ib_create_qp()
622 qp->srq = NULL; ib_create_qp()
623 qp->xrcd = qp_init_attr->xrcd; ib_create_qp()
625 INIT_LIST_HEAD(&qp->open_list); ib_create_qp()
627 real_qp = qp; ib_create_qp()
628 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, ib_create_qp()
630 if (!IS_ERR(qp)) ib_create_qp()
635 qp->event_handler = qp_init_attr->event_handler; ib_create_qp()
636 qp->qp_context = qp_init_attr->qp_context; ib_create_qp()
638 qp->recv_cq = NULL; ib_create_qp()
639 qp->srq = NULL; ib_create_qp()
641 qp->recv_cq = qp_init_attr->recv_cq; ib_create_qp()
643 qp->srq = qp_init_attr->srq; ib_create_qp()
644 if (qp->srq) ib_create_qp()
648 qp->pd = pd; ib_create_qp()
649 qp->send_cq = qp_init_attr->send_cq; ib_create_qp()
650 qp->xrcd = NULL; ib_create_qp()
657 return qp; ib_create_qp()
997 int ib_resolve_eth_dmac(struct ib_qp *qp, ib_resolve_eth_dmac() argument
1003 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) || ib_resolve_eth_dmac()
1004 qp_attr->ah_attr.port_num > rdma_end_port(qp->device)) ib_resolve_eth_dmac()
1007 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num)) ib_resolve_eth_dmac()
1018 ret = ib_query_gid(qp->device, ib_resolve_eth_dmac()
1045 int ib_modify_qp(struct ib_qp *qp, ib_modify_qp() argument
1051 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask); ib_modify_qp()
1055 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); ib_modify_qp()
1059 int ib_query_qp(struct ib_qp *qp, ib_query_qp() argument
1064 return qp->device->query_qp ? ib_query_qp()
1065 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : ib_query_qp()
1070 int ib_close_qp(struct ib_qp *qp) ib_close_qp() argument
1075 real_qp = qp->real_qp; ib_close_qp()
1076 if (real_qp == qp) ib_close_qp()
1080 list_del(&qp->open_list); ib_close_qp()
1084 kfree(qp); ib_close_qp()
1090 static int __ib_destroy_shared_qp(struct ib_qp *qp) __ib_destroy_shared_qp() argument
1096 real_qp = qp->real_qp; __ib_destroy_shared_qp()
1100 ib_close_qp(qp); __ib_destroy_shared_qp()
1118 int ib_destroy_qp(struct ib_qp *qp) ib_destroy_qp() argument
1125 if (atomic_read(&qp->usecnt)) ib_destroy_qp()
1128 if (qp->real_qp != qp) ib_destroy_qp()
1129 return __ib_destroy_shared_qp(qp); ib_destroy_qp()
1131 pd = qp->pd; ib_destroy_qp()
1132 scq = qp->send_cq; ib_destroy_qp()
1133 rcq = qp->recv_cq; ib_destroy_qp()
1134 srq = qp->srq; ib_destroy_qp()
1136 ret = qp->device->destroy_qp(qp); ib_destroy_qp()
1369 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) ib_attach_mcast() argument
1373 if (!qp->device->attach_mcast) ib_attach_mcast()
1375 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) ib_attach_mcast()
1378 ret = qp->device->attach_mcast(qp, gid, lid); ib_attach_mcast()
1380 atomic_inc(&qp->usecnt); ib_attach_mcast()
1385 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) ib_detach_mcast() argument
1389 if (!qp->device->detach_mcast) ib_detach_mcast()
1391 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) ib_detach_mcast()
1394 ret = qp->device->detach_mcast(qp, gid, lid); ib_detach_mcast()
1396 atomic_dec(&qp->usecnt); ib_detach_mcast()
1423 struct ib_qp *qp; ib_dealloc_xrcd() local
1430 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); ib_dealloc_xrcd()
1431 ret = ib_destroy_qp(qp); ib_dealloc_xrcd()
1440 struct ib_flow *ib_create_flow(struct ib_qp *qp, ib_create_flow() argument
1445 if (!qp->device->create_flow) ib_create_flow()
1448 flow_id = qp->device->create_flow(qp, flow_attr, domain); ib_create_flow()
1450 atomic_inc(&qp->usecnt); ib_create_flow()
1458 struct ib_qp *qp = flow_id->qp; ib_destroy_flow() local
1460 err = qp->device->destroy_flow(flow_id); ib_destroy_flow()
1462 atomic_dec(&qp->usecnt); ib_destroy_flow()
H A Diwcm.c245 static int iwcm_modify_qp_err(struct ib_qp *qp) iwcm_modify_qp_err() argument
249 if (!qp) iwcm_modify_qp_err()
253 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); iwcm_modify_qp_err()
260 static int iwcm_modify_qp_sqd(struct ib_qp *qp) iwcm_modify_qp_sqd() argument
264 BUG_ON(qp == NULL); iwcm_modify_qp_sqd()
266 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); iwcm_modify_qp_sqd()
286 struct ib_qp *qp = NULL; iw_cm_disconnect() local
299 if (cm_id_priv->qp) iw_cm_disconnect()
300 qp = cm_id_priv->qp; iw_cm_disconnect()
325 if (qp) { iw_cm_disconnect()
327 ret = iwcm_modify_qp_err(qp); iw_cm_disconnect()
329 ret = iwcm_modify_qp_sqd(qp); iw_cm_disconnect()
374 (void)iwcm_modify_qp_err(cm_id_priv->qp); destroy_cm_id()
399 if (cm_id_priv->qp) { destroy_cm_id()
400 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); destroy_cm_id()
401 cm_id_priv->qp = NULL; destroy_cm_id()
516 struct ib_qp *qp; iw_cm_accept() local
531 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); iw_cm_accept()
532 if (!qp) { iw_cm_accept()
538 cm_id->device->iwcm->add_ref(qp); iw_cm_accept()
539 cm_id_priv->qp = qp; iw_cm_accept()
548 if (cm_id_priv->qp) { iw_cm_accept()
549 cm_id->device->iwcm->rem_ref(qp); iw_cm_accept()
550 cm_id_priv->qp = NULL; iw_cm_accept()
573 struct ib_qp *qp; iw_cm_connect() local
592 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); iw_cm_connect()
593 if (!qp) { iw_cm_connect()
599 cm_id->device->iwcm->add_ref(qp); iw_cm_connect()
600 cm_id_priv->qp = qp; iw_cm_connect()
607 if (cm_id_priv->qp) { iw_cm_connect()
608 cm_id->device->iwcm->rem_ref(qp); iw_cm_connect()
609 cm_id_priv->qp = NULL; iw_cm_connect()
761 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); cm_conn_rep_handler()
762 cm_id_priv->qp = NULL; cm_conn_rep_handler()
811 if (cm_id_priv->qp) { cm_close_handler()
812 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); cm_close_handler()
813 cm_id_priv->qp = NULL; cm_close_handler()
H A Duverbs_cmd.c254 static void put_qp_read(struct ib_qp *qp) put_qp_read() argument
256 put_uobj_read(qp->uobject); put_qp_read()
259 static void put_qp_write(struct ib_qp *qp) put_qp_write() argument
261 put_uobj_write(qp->uobject); put_qp_write()
1602 tmp.qp_num = wc->qp->qp_num; copy_wc_to_user()
1763 struct ib_qp *qp; create_qp() local
1862 qp = ib_create_qp(pd, &attr); create_qp()
1864 qp = device->create_qp(pd, &attr, uhw); create_qp()
1866 if (IS_ERR(qp)) { create_qp()
1867 ret = PTR_ERR(qp); create_qp()
1872 qp->real_qp = qp; create_qp()
1873 qp->device = device; create_qp()
1874 qp->pd = pd; create_qp()
1875 qp->send_cq = attr.send_cq; create_qp()
1876 qp->recv_cq = attr.recv_cq; create_qp()
1877 qp->srq = attr.srq; create_qp()
1878 qp->event_handler = attr.event_handler; create_qp()
1879 qp->qp_context = attr.qp_context; create_qp()
1880 qp->qp_type = attr.qp_type; create_qp()
1881 atomic_set(&qp->usecnt, 0); create_qp()
1889 qp->uobject = &obj->uevent.uobject; create_qp()
1891 obj->uevent.uobject.object = qp; create_qp()
1897 resp.base.qpn = qp->qp_num; create_qp()
1941 ib_destroy_qp(qp); create_qp()
2076 struct ib_qp *qp; ib_uverbs_open_qp() local
2112 qp = ib_open_qp(xrcd, &attr); ib_uverbs_open_qp()
2113 if (IS_ERR(qp)) { ib_uverbs_open_qp()
2114 ret = PTR_ERR(qp); ib_uverbs_open_qp()
2118 qp->uobject = &obj->uevent.uobject; ib_uverbs_open_qp()
2120 obj->uevent.uobject.object = qp; ib_uverbs_open_qp()
2126 resp.qpn = qp->qp_num; ib_uverbs_open_qp()
2153 ib_destroy_qp(qp); ib_uverbs_open_qp()
2168 struct ib_qp *qp; ib_uverbs_query_qp() local
2183 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_query_qp()
2184 if (!qp) { ib_uverbs_query_qp()
2189 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); ib_uverbs_query_qp()
2191 put_qp_read(qp); ib_uverbs_query_qp()
2283 struct ib_qp *qp; ib_uverbs_modify_qp() local
2297 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_modify_qp()
2298 if (!qp) { ib_uverbs_modify_qp()
2349 if (qp->real_qp == qp) { ib_uverbs_modify_qp()
2350 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); ib_uverbs_modify_qp()
2353 ret = qp->device->modify_qp(qp, attr, ib_uverbs_modify_qp()
2354 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); ib_uverbs_modify_qp()
2356 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); ib_uverbs_modify_qp()
2365 put_qp_read(qp); ib_uverbs_modify_qp()
2381 struct ib_qp *qp; ib_uverbs_destroy_qp() local
2393 qp = uobj->object; ib_uverbs_destroy_qp()
2401 ret = ib_destroy_qp(qp); ib_uverbs_destroy_qp()
2447 struct ib_qp *qp; ib_uverbs_post_send() local
2467 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_post_send()
2468 if (!qp) ib_uverbs_post_send()
2471 is_ud = qp->qp_type == IB_QPT_UD; ib_uverbs_post_send()
2597 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); ib_uverbs_post_send()
2610 put_qp_read(qp); ib_uverbs_post_send()
2719 struct ib_qp *qp; ib_uverbs_post_recv() local
2731 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_post_recv()
2732 if (!qp) ib_uverbs_post_recv()
2736 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); ib_uverbs_post_recv()
2738 put_qp_read(qp); ib_uverbs_post_recv()
2946 struct ib_qp *qp; ib_uverbs_attach_mcast() local
2954 qp = idr_write_qp(cmd.qp_handle, file->ucontext); ib_uverbs_attach_mcast()
2955 if (!qp) ib_uverbs_attach_mcast()
2958 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); ib_uverbs_attach_mcast()
2976 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); ib_uverbs_attach_mcast()
2983 put_qp_write(qp); ib_uverbs_attach_mcast()
2995 struct ib_qp *qp; ib_uverbs_detach_mcast() local
3002 qp = idr_write_qp(cmd.qp_handle, file->ucontext); ib_uverbs_detach_mcast()
3003 if (!qp) ib_uverbs_detach_mcast()
3006 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); ib_uverbs_detach_mcast()
3010 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); ib_uverbs_detach_mcast()
3021 put_qp_write(qp); ib_uverbs_detach_mcast()
3080 struct ib_qp *qp; ib_uverbs_ex_create_flow() local
3141 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_ex_create_flow()
3142 if (!qp) { ib_uverbs_ex_create_flow()
3181 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); ib_uverbs_ex_create_flow()
3186 flow_id->qp = qp; ib_uverbs_ex_create_flow()
3202 put_qp_read(qp); ib_uverbs_ex_create_flow()
3221 put_qp_read(qp); ib_uverbs_ex_create_flow()
H A Dmad.c327 if (!port_priv->qp_info[qpn].qp) { ib_register_mad_agent()
357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; ib_register_mad_agent()
523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; ib_register_mad_snoop()
692 static void build_smp_wc(struct ib_qp *qp, build_smp_wc() argument
703 wc->qp = qp; build_smp_wc()
834 build_smp_wc(mad_agent_priv->agent.qp, handle_outgoing_dr_smp()
1031 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; ib_create_send_mad()
1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; ib_create_send_mad()
1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, ib_send_mad()
1327 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, ib_redirect_mad_qp() argument
1798 u32 qp_num = qp_info->qp->qp_num; validate_mad()
2058 qp_info->qp->qp_num, handle_ib_smi()
2146 qp_info->qp->qp_num, handle_opa_smi()
2268 qp_info->qp->qp_num, ib_mad_recv_done_handler()
2287 qp_info->qp->qp_num, mad_size, opa); ib_mad_recv_done_handler()
2465 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, ib_mad_send_done_handler()
2523 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, mad_error_handler()
2537 ret = ib_modify_qp(qp_info->qp, attr, mad_error_handler()
2718 build_smp_wc(recv_mad_agent->agent.qp, local_completions()
2924 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); ib_mad_post_receive_mads()
2953 if (!qp_info->qp) cleanup_recv_queue()
2986 struct ib_qp *qp; ib_mad_port_start() local
3002 qp = port_priv->qp_info[i].qp; ib_mad_port_start()
3003 if (!qp) ib_mad_port_start()
3012 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; ib_mad_port_start()
3013 ret = ib_modify_qp(qp, attr, IB_QP_STATE | ib_mad_port_start()
3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE); ib_mad_port_start()
3033 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); ib_mad_port_start()
3051 if (!port_priv->qp_info[i].qp) ib_mad_port_start()
3073 event->event, qp_info->qp->qp_num); qp_event_handler()
3116 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); create_mad_qp()
3117 if (IS_ERR(qp_info->qp)) { create_mad_qp()
3121 ret = PTR_ERR(qp_info->qp); create_mad_qp()
3135 if (!qp_info->qp) destroy_mad_qp()
3138 ib_destroy_qp(qp_info->qp); destroy_mad_qp()
H A Duverbs_main.c197 static void ib_uverbs_detach_umcast(struct ib_qp *qp, ib_uverbs_detach_umcast() argument
203 ib_detach_mcast(qp, &mcast->gid, mcast->lid); ib_uverbs_detach_umcast()
242 struct ib_qp *qp = uobj->object; ib_uverbs_cleanup_ucontext() local
247 if (qp != qp->real_qp) { ib_uverbs_cleanup_ucontext()
248 ib_close_qp(qp); ib_uverbs_cleanup_ucontext()
250 ib_uverbs_detach_umcast(qp, uqp); ib_uverbs_cleanup_ucontext()
251 ib_destroy_qp(qp); ib_uverbs_cleanup_ucontext()
548 /* for XRC target qp's, check that qp is live */ ib_uverbs_qp_event_handler()
549 if (!event->element.qp->uobject || !event->element.qp->uobject->live) ib_uverbs_qp_event_handler()
552 uobj = container_of(event->element.qp->uobject, ib_uverbs_qp_event_handler()
H A Diwcm.h50 struct ib_qp *qp; member in struct:iwcm_id_private
H A Dcore_priv.h49 int ib_resolve_eth_dmac(struct ib_qp *qp,
H A Dcma.c634 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_ud_qp() argument
644 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); cma_init_ud_qp()
649 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); cma_init_ud_qp()
655 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); cma_init_ud_qp()
660 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_conn_qp() argument
670 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); cma_init_conn_qp()
677 struct ib_qp *qp; rdma_create_qp() local
684 qp = ib_create_qp(pd, qp_init_attr); rdma_create_qp()
685 if (IS_ERR(qp)) rdma_create_qp()
686 return PTR_ERR(qp); rdma_create_qp()
689 ret = cma_init_ud_qp(id_priv, qp); rdma_create_qp()
691 ret = cma_init_conn_qp(id_priv, qp); rdma_create_qp()
695 id->qp = qp; rdma_create_qp()
696 id_priv->qp_num = qp->qp_num; rdma_create_qp()
697 id_priv->srq = (qp->srq != NULL); rdma_create_qp()
700 ib_destroy_qp(qp); rdma_create_qp()
711 ib_destroy_qp(id_priv->id.qp); rdma_destroy_qp()
712 id_priv->id.qp = NULL; rdma_destroy_qp()
725 if (!id_priv->id.qp) { cma_modify_qp_rtr()
736 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr()
754 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr()
767 if (!id_priv->id.qp) { cma_modify_qp_rts()
779 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rts()
791 if (!id_priv->id.qp) { cma_modify_qp_err()
797 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); cma_modify_qp_err()
1544 if (id_priv->id.qp) { cma_ib_handler()
3267 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; cma_connect_iw()
3290 if (!id->qp) { rdma_connect()
3359 if (id_priv->id.qp) { cma_accept_iw()
3401 if (!id->qp && conn_param) { rdma_accept()
3528 if (!status && id_priv->id.qp) cma_ib_mc_handler()
3529 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, cma_ib_mc_handler()
3783 if (id->qp) rdma_leave_multicast()
3784 ib_detach_mcast(id->qp, rdma_leave_multicast()
H A Dmad_priv.h186 struct ib_qp *qp; member in struct:ib_mad_qp_info
H A Dagent.c102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); agent_send_response()
H A Dmad_rmpp.c163 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, alloc_response_msg()
294 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, create_rmpp_recv()
/linux-4.4.14/include/linux/
H A Dntb_transport.h65 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
67 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
72 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
73 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
77 void ntb_transport_free_queue(struct ntb_transport_qp *qp);
78 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
80 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
82 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
83 void ntb_transport_link_up(struct ntb_transport_qp *qp);
84 void ntb_transport_link_down(struct ntb_transport_qp *qp);
85 bool ntb_transport_link_query(struct ntb_transport_qp *qp);
86 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
H A DMakefile4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
H A Dqp.c37 #include <linux/mlx5/qp.h>
73 struct mlx5_core_qp *qp; mlx5_rsc_event() local
80 qp = (struct mlx5_core_qp *)common; mlx5_rsc_event()
81 qp->event(qp, event_type); mlx5_rsc_event()
97 struct mlx5_core_qp *qp = mlx5_eq_pagefault() local
101 if (!qp) { mlx5_eq_pagefault()
165 if (qp->pfault_handler) { mlx5_eq_pagefault()
166 qp->pfault_handler(qp, &pfault); mlx5_eq_pagefault()
181 struct mlx5_core_qp *qp, mlx5_core_create_qp()
213 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_create_qp()
214 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); mlx5_core_create_qp()
216 qp->common.res = MLX5_RES_QP; mlx5_core_create_qp()
218 err = radix_tree_insert(&table->tree, qp->qpn, qp); mlx5_core_create_qp()
225 err = mlx5_debug_qp_add(dev, qp); mlx5_core_create_qp()
228 qp->qpn); mlx5_core_create_qp()
230 qp->pid = current->pid; mlx5_core_create_qp()
231 atomic_set(&qp->common.refcount, 1); mlx5_core_create_qp()
233 init_completion(&qp->common.free); mlx5_core_create_qp()
241 din.qpn = cpu_to_be32(qp->qpn); mlx5_core_create_qp()
249 struct mlx5_core_qp *qp) mlx5_core_destroy_qp()
257 mlx5_debug_qp_remove(dev, qp); mlx5_core_destroy_qp()
260 radix_tree_delete(&table->tree, qp->qpn); mlx5_core_destroy_qp()
263 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); mlx5_core_destroy_qp()
264 wait_for_completion(&qp->common.free); mlx5_core_destroy_qp()
269 in.qpn = cpu_to_be32(qp->qpn); mlx5_core_destroy_qp()
285 struct mlx5_core_qp *qp) mlx5_core_qp_modify()
335 in->qpn = cpu_to_be32(qp->qpn); mlx5_core_qp_modify()
359 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, mlx5_core_qp_query() argument
368 in.qpn = cpu_to_be32(qp->qpn); mlx5_core_qp_query()
180 mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) mlx5_core_create_qp() argument
248 mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_core_destroy_qp() argument
282 mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, enum mlx5_qp_state new_state, struct mlx5_modify_qp_mbox_in *in, int sqd_event, struct mlx5_core_qp *qp) mlx5_core_qp_modify() argument
H A Ddebugfs.c35 #include <linux/mlx5/qp.h>
277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, qp_read_field() argument
290 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); qp_read_field()
292 mlx5_core_warn(dev, "failed to query qp\n"); qp_read_field()
300 param = qp->pid; qp_read_field()
536 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_debug_qp_add() argument
544 &qp->dbg, qp->qpn, qp_fields, mlx5_debug_qp_add()
545 ARRAY_SIZE(qp_fields), qp); mlx5_debug_qp_add()
547 qp->dbg = NULL; mlx5_debug_qp_add()
552 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_debug_qp_remove() argument
557 if (qp->dbg) mlx5_debug_qp_remove()
558 rem_res_tree(qp->dbg); mlx5_debug_qp_remove()
/linux-4.4.14/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_arm_cq_event_coalescing()
170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; bnx2i_arm_cq_event_coalescing()
171 if (cq_index > ep->qp.cqe_size * 2) bnx2i_arm_cq_event_coalescing()
172 cq_index -= ep->qp.cqe_size * 2; bnx2i_arm_cq_event_coalescing()
195 if (!bnx2i_conn->ep->qp.rqe_left) bnx2i_get_rq_buf()
198 bnx2i_conn->ep->qp.rqe_left--; bnx2i_get_rq_buf()
199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); bnx2i_get_rq_buf()
200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { bnx2i_get_rq_buf()
201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; bnx2i_get_rq_buf()
202 bnx2i_conn->ep->qp.rq_cons_idx = 0; bnx2i_get_rq_buf()
204 bnx2i_conn->ep->qp.rq_cons_qe++; bnx2i_get_rq_buf()
205 bnx2i_conn->ep->qp.rq_cons_idx++; bnx2i_get_rq_buf()
220 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); bnx2i_ring_577xx_doorbell()
234 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); bnx2i_put_rq_buf()
237 ep->qp.rqe_left += count; bnx2i_put_rq_buf()
238 ep->qp.rq_prod_idx &= 0x7FFF; bnx2i_put_rq_buf()
239 ep->qp.rq_prod_idx += count; bnx2i_put_rq_buf()
241 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { bnx2i_put_rq_buf()
242 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; bnx2i_put_rq_buf()
244 ep->qp.rq_prod_idx |= 0x8000; bnx2i_put_rq_buf()
246 ep->qp.rq_prod_idx |= hi_bit; bnx2i_put_rq_buf()
249 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; bnx2i_put_rq_buf()
250 rq_db->prod_idx = ep->qp.rq_prod_idx; bnx2i_put_rq_buf()
253 writew(ep->qp.rq_prod_idx, bnx2i_put_rq_buf()
254 ep->qp.ctx_base + CNIC_RECV_DOORBELL); bnx2i_put_rq_buf()
277 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; bnx2i_ring_sq_dbell()
278 sq_db->prod_idx = ep->qp.sq_prod_idx; bnx2i_ring_sq_dbell()
281 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); bnx2i_ring_sq_dbell()
300 if (bnx2i_conn->ep->qp.sq_prod_qe == bnx2i_ring_dbell_update_sq_params()
301 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params()
302 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params()
303 bnx2i_conn->ep->qp.sq_first_qe; bnx2i_ring_dbell_update_sq_params()
305 bnx2i_conn->ep->qp.sq_prod_qe++; bnx2i_ring_dbell_update_sq_params()
307 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= bnx2i_ring_dbell_update_sq_params()
308 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params()
309 bnx2i_conn->ep->qp.sq_prod_qe += count; bnx2i_ring_dbell_update_sq_params()
311 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - bnx2i_ring_dbell_update_sq_params()
312 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_ring_dbell_update_sq_params()
313 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params()
314 &bnx2i_conn->ep->qp.sq_first_qe[count - bnx2i_ring_dbell_update_sq_params()
318 bnx2i_conn->ep->qp.sq_prod_idx += count; bnx2i_ring_dbell_update_sq_params()
320 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); bnx2i_ring_dbell_update_sq_params()
343 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_login()
402 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_tmf()
473 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_text()
519 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_scsicmd()
550 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; bnx2i_send_iscsi_nopout()
613 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_logout()
747 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; bnx2i_send_cmd_cleanup_req()
817 dma_addr = ep->qp.sq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
821 dma_addr = ep->qp.cq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
829 dma_addr = ep->qp.rq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req()
833 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req()
838 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req()
878 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
882 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
890 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req()
894 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
898 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
907 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req()
963 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); setup_qp_page_tables()
964 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
965 page = ep->qp.sq_phys; setup_qp_page_tables()
968 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); setup_qp_page_tables()
970 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; setup_qp_page_tables()
991 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); setup_qp_page_tables()
992 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
993 page = ep->qp.rq_phys; setup_qp_page_tables()
996 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); setup_qp_page_tables()
998 ptbl = (u32 *) ep->qp.rq_pgtbl_virt; setup_qp_page_tables()
1019 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); setup_qp_page_tables()
1020 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables()
1021 page = ep->qp.cq_phys; setup_qp_page_tables()
1024 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); setup_qp_page_tables()
1026 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; setup_qp_page_tables()
1067 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; bnx2i_alloc_qp_resc()
1068 ep->qp.sq_mem_size = bnx2i_alloc_qp_resc()
1069 (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1070 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc()
1071 (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1072 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc()
1073 (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1075 ep->qp.sq_pgtbl_virt = bnx2i_alloc_qp_resc()
1076 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_alloc_qp_resc()
1077 &ep->qp.sq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1078 if (!ep->qp.sq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1080 ep->qp.sq_pgtbl_size); bnx2i_alloc_qp_resc()
1085 ep->qp.sq_virt = bnx2i_alloc_qp_resc()
1086 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_alloc_qp_resc()
1087 &ep->qp.sq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1088 if (!ep->qp.sq_virt) { bnx2i_alloc_qp_resc()
1090 ep->qp.sq_mem_size); bnx2i_alloc_qp_resc()
1094 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); bnx2i_alloc_qp_resc()
1095 ep->qp.sq_first_qe = ep->qp.sq_virt; bnx2i_alloc_qp_resc()
1096 ep->qp.sq_prod_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc()
1097 ep->qp.sq_cons_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc()
1098 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; bnx2i_alloc_qp_resc()
1099 ep->qp.sq_prod_idx = 0; bnx2i_alloc_qp_resc()
1100 ep->qp.sq_cons_idx = 0; bnx2i_alloc_qp_resc()
1101 ep->qp.sqe_left = hba->max_sqes; bnx2i_alloc_qp_resc()
1104 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; bnx2i_alloc_qp_resc()
1105 ep->qp.cq_mem_size = bnx2i_alloc_qp_resc()
1106 (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1107 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc()
1108 (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1109 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc()
1110 (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1112 ep->qp.cq_pgtbl_virt = bnx2i_alloc_qp_resc()
1113 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_alloc_qp_resc()
1114 &ep->qp.cq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1115 if (!ep->qp.cq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1117 ep->qp.cq_pgtbl_size); bnx2i_alloc_qp_resc()
1122 ep->qp.cq_virt = bnx2i_alloc_qp_resc()
1123 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_alloc_qp_resc()
1124 &ep->qp.cq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1125 if (!ep->qp.cq_virt) { bnx2i_alloc_qp_resc()
1127 ep->qp.cq_mem_size); bnx2i_alloc_qp_resc()
1130 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); bnx2i_alloc_qp_resc()
1132 ep->qp.cq_first_qe = ep->qp.cq_virt; bnx2i_alloc_qp_resc()
1133 ep->qp.cq_prod_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc()
1134 ep->qp.cq_cons_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc()
1135 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; bnx2i_alloc_qp_resc()
1136 ep->qp.cq_prod_idx = 0; bnx2i_alloc_qp_resc()
1137 ep->qp.cq_cons_idx = 0; bnx2i_alloc_qp_resc()
1138 ep->qp.cqe_left = hba->max_cqes; bnx2i_alloc_qp_resc()
1139 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; bnx2i_alloc_qp_resc()
1140 ep->qp.cqe_size = hba->max_cqes; bnx2i_alloc_qp_resc()
1143 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_alloc_qp_resc()
1147 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; bnx2i_alloc_qp_resc()
1148 ep->qp.rq_mem_size = bnx2i_alloc_qp_resc()
1149 (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1150 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc()
1151 (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc()
1152 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc()
1153 (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc()
1155 ep->qp.rq_pgtbl_virt = bnx2i_alloc_qp_resc()
1156 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_alloc_qp_resc()
1157 &ep->qp.rq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1158 if (!ep->qp.rq_pgtbl_virt) { bnx2i_alloc_qp_resc()
1160 ep->qp.rq_pgtbl_size); bnx2i_alloc_qp_resc()
1165 ep->qp.rq_virt = bnx2i_alloc_qp_resc()
1166 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_alloc_qp_resc()
1167 &ep->qp.rq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc()
1168 if (!ep->qp.rq_virt) { bnx2i_alloc_qp_resc()
1170 ep->qp.rq_mem_size); bnx2i_alloc_qp_resc()
1174 ep->qp.rq_first_qe = ep->qp.rq_virt; bnx2i_alloc_qp_resc()
1175 ep->qp.rq_prod_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc()
1176 ep->qp.rq_cons_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc()
1177 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; bnx2i_alloc_qp_resc()
1178 ep->qp.rq_prod_idx = 0x8000; bnx2i_alloc_qp_resc()
1179 ep->qp.rq_cons_idx = 0; bnx2i_alloc_qp_resc()
1180 ep->qp.rqe_left = hba->max_rqes; bnx2i_alloc_qp_resc()
1202 if (ep->qp.ctx_base) { bnx2i_free_qp_resc()
1203 iounmap(ep->qp.ctx_base); bnx2i_free_qp_resc()
1204 ep->qp.ctx_base = NULL; bnx2i_free_qp_resc()
1207 if (ep->qp.sq_pgtbl_virt) { bnx2i_free_qp_resc()
1208 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_free_qp_resc()
1209 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); bnx2i_free_qp_resc()
1210 ep->qp.sq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1211 ep->qp.sq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1213 if (ep->qp.sq_virt) { bnx2i_free_qp_resc()
1214 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_free_qp_resc()
1215 ep->qp.sq_virt, ep->qp.sq_phys); bnx2i_free_qp_resc()
1216 ep->qp.sq_virt = NULL; bnx2i_free_qp_resc()
1217 ep->qp.sq_phys = 0; bnx2i_free_qp_resc()
1221 if (ep->qp.rq_pgtbl_virt) { bnx2i_free_qp_resc()
1222 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_free_qp_resc()
1223 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); bnx2i_free_qp_resc()
1224 ep->qp.rq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1225 ep->qp.rq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1227 if (ep->qp.rq_virt) { bnx2i_free_qp_resc()
1228 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_free_qp_resc()
1229 ep->qp.rq_virt, ep->qp.rq_phys); bnx2i_free_qp_resc()
1230 ep->qp.rq_virt = NULL; bnx2i_free_qp_resc()
1231 ep->qp.rq_phys = 0; bnx2i_free_qp_resc()
1235 if (ep->qp.cq_pgtbl_virt) { bnx2i_free_qp_resc()
1236 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_free_qp_resc()
1237 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); bnx2i_free_qp_resc()
1238 ep->qp.cq_pgtbl_virt = NULL; bnx2i_free_qp_resc()
1239 ep->qp.cq_pgtbl_phys = 0; bnx2i_free_qp_resc()
1241 if (ep->qp.cq_virt) { bnx2i_free_qp_resc()
1242 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_free_qp_resc()
1243 ep->qp.cq_virt, ep->qp.cq_phys); bnx2i_free_qp_resc()
1244 ep->qp.cq_virt = NULL; bnx2i_free_qp_resc()
1245 ep->qp.cq_phys = 0; bnx2i_free_qp_resc()
1981 struct qp_info *qp; bnx2i_process_new_cqes() local
1989 qp = &bnx2i_conn->ep->qp; bnx2i_process_new_cqes()
1991 if (!qp->cq_virt) { bnx2i_process_new_cqes()
1997 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; bnx2i_process_new_cqes()
1998 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) bnx2i_process_new_cqes()
2024 qp->cq_cons_qe); bnx2i_process_new_cqes()
2028 qp->cq_cons_qe); bnx2i_process_new_cqes()
2032 qp->cq_cons_qe); bnx2i_process_new_cqes()
2036 qp->cq_cons_qe); bnx2i_process_new_cqes()
2040 qp->cq_cons_qe)) bnx2i_process_new_cqes()
2045 qp->cq_cons_qe); bnx2i_process_new_cqes()
2049 qp->cq_cons_qe); bnx2i_process_new_cqes()
2054 qp->cq_cons_qe); bnx2i_process_new_cqes()
2058 qp->cq_cons_qe); bnx2i_process_new_cqes()
2083 qp->cqe_exp_seq_sn++; bnx2i_process_new_cqes()
2084 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) bnx2i_process_new_cqes()
2085 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; bnx2i_process_new_cqes()
2087 if (qp->cq_cons_qe == qp->cq_last_qe) { bnx2i_process_new_cqes()
2088 qp->cq_cons_qe = qp->cq_first_qe; bnx2i_process_new_cqes()
2089 qp->cq_cons_idx = 0; bnx2i_process_new_cqes()
2091 qp->cq_cons_qe++; bnx2i_process_new_cqes()
2092 qp->cq_cons_idx++; bnx2i_process_new_cqes()
2488 ep->qp.ctx_base = NULL; bnx2i_process_ofld_cmpl()
2744 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); bnx2i_map_ep_dbell_regs()
2763 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, bnx2i_map_ep_dbell_regs()
2765 if (!ep->qp.ctx_base) bnx2i_map_ep_dbell_regs()
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_nm.c62 struct scif_qp *qp = scifdev->qpairs; scif_free_qp() local
64 if (!qp) scif_free_qp()
66 scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size); scif_free_qp()
67 kfree(qp->inbound_q.rb_base); scif_free_qp()
68 scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp)); scif_free_qp()
75 struct scif_qp *qp = &dev->qpairs[0]; scif_cleanup_qp() local
77 if (!qp) scif_cleanup_qp()
79 scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev); scif_cleanup_qp()
80 scif_iounmap((void *)qp->outbound_q.rb_base, scif_cleanup_qp()
82 qp->remote_qp = NULL; scif_cleanup_qp()
83 qp->local_write = 0; scif_cleanup_qp()
84 qp->inbound_q.current_write_offset = 0; scif_cleanup_qp()
85 qp->inbound_q.current_read_offset = 0; scif_cleanup_qp()
H A Dscif_nodeqp.c29 * 2) scif_setup_qp(..) allocates the local qp and calls
105 int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset, scif_setup_qp_connect() argument
108 void *local_q = qp->inbound_q.rb_base; scif_setup_qp_connect()
112 spin_lock_init(&qp->send_lock); scif_setup_qp_connect()
113 spin_lock_init(&qp->recv_lock); scif_setup_qp_connect()
124 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size); scif_setup_qp_connect()
131 scif_rb_init(&qp->inbound_q, scif_setup_qp_connect()
133 &qp->local_write, scif_setup_qp_connect()
139 qp->inbound_q.read_ptr = NULL; scif_setup_qp_connect()
140 err = scif_map_single(qp_offset, qp, scif_setup_qp_connect()
144 qp->local_qp = *qp_offset; scif_setup_qp_connect()
147 scif_unmap_single(qp->local_buf, scifdev, local_size); scif_setup_qp_connect()
148 qp->local_buf = 0; scif_setup_qp_connect()
155 int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset, scif_setup_qp_accept() argument
165 spin_lock_init(&qp->send_lock); scif_setup_qp_accept()
166 spin_lock_init(&qp->recv_lock); scif_setup_qp_accept()
171 qp->remote_qp = remote_qp; scif_setup_qp_accept()
172 if (qp->remote_qp->magic != SCIFEP_MAGIC) { scif_setup_qp_accept()
176 qp->remote_buf = remote_qp->local_buf; scif_setup_qp_accept()
177 remote_size = qp->remote_qp->inbound_q.size; scif_setup_qp_accept()
178 remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev); scif_setup_qp_accept()
183 qp->remote_qp->local_write = 0; scif_setup_qp_accept()
188 scif_rb_init(&qp->outbound_q, scif_setup_qp_accept()
189 &qp->local_read, scif_setup_qp_accept()
190 &qp->remote_qp->local_write, scif_setup_qp_accept()
198 err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size); scif_setup_qp_accept()
201 qp->remote_qp->local_read = 0; scif_setup_qp_accept()
206 scif_rb_init(&qp->inbound_q, scif_setup_qp_accept()
207 &qp->remote_qp->local_read, scif_setup_qp_accept()
208 &qp->local_write, scif_setup_qp_accept()
210 err = scif_map_single(qp_offset, qp, scifdev, scif_setup_qp_accept()
214 qp->local_qp = *qp_offset; scif_setup_qp_accept()
217 scif_unmap_single(qp->local_buf, scifdev, local_size); scif_setup_qp_accept()
218 qp->local_buf = 0; scif_setup_qp_accept()
223 qp->outbound_q.rb_base = NULL; scif_setup_qp_accept()
225 scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev); scif_setup_qp_accept()
226 qp->remote_qp = NULL; scif_setup_qp_accept()
231 struct scif_qp *qp, u64 payload) scif_setup_qp_connect_response()
238 qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev); scif_setup_qp_connect_response()
240 if (!qp->remote_qp) { scif_setup_qp_connect_response()
245 if (qp->remote_qp->magic != SCIFEP_MAGIC) { scif_setup_qp_connect_response()
253 tmp_phys = qp->remote_qp->local_buf; scif_setup_qp_connect_response()
254 remote_size = qp->remote_qp->inbound_q.size; scif_setup_qp_connect_response()
260 qp->local_read = 0; scif_setup_qp_connect_response()
261 scif_rb_init(&qp->outbound_q, scif_setup_qp_connect_response()
262 &qp->local_read, scif_setup_qp_connect_response()
263 &qp->remote_qp->local_write, scif_setup_qp_connect_response()
270 qp->remote_qp->local_read = qp->inbound_q.current_read_offset; scif_setup_qp_connect_response()
275 scif_rb_init(&qp->inbound_q, scif_setup_qp_connect_response()
276 &qp->remote_qp->local_read, scif_setup_qp_connect_response()
277 &qp->local_write, scif_setup_qp_connect_response()
278 qp->inbound_q.rb_base, scif_setup_qp_connect_response()
279 get_count_order(qp->inbound_q.size)); scif_setup_qp_connect_response()
336 struct scif_qp *qp; scif_setup_qp() local
340 qp = kzalloc(sizeof(*qp), GFP_KERNEL); scif_setup_qp()
341 if (!qp) { scif_setup_qp()
345 qp->magic = SCIFEP_MAGIC; scif_setup_qp()
346 scifdev->qpairs = qp; scif_setup_qp()
347 err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr, scif_setup_qp()
620 struct scif_qp *qp = scifdev->qpairs; _scif_nodeqp_send() local
624 if (!qp) { _scif_nodeqp_send()
628 spin_lock(&qp->send_lock); _scif_nodeqp_send()
630 while ((err = scif_rb_write(&qp->outbound_q, _scif_nodeqp_send()
640 scif_rb_commit(&qp->outbound_q); _scif_nodeqp_send()
641 spin_unlock(&qp->send_lock); _scif_nodeqp_send()
861 struct scif_qp *qp = &peerdev->qpairs[0]; scif_poll_qp_state() local
863 if (qp->qp_state != SCIF_QP_ONLINE || scif_poll_qp_state()
864 qp->remote_qp->qp_state != SCIF_QP_ONLINE) { scif_poll_qp_state()
868 qp->qp_state); scif_poll_qp_state()
879 __func__, __LINE__, peerdev->node, qp->qp_state); scif_poll_qp_state()
880 qp->remote_qp->qp_state = SCIF_QP_OFFLINE; scif_poll_qp_state()
898 struct scif_qp *qp; scif_node_add_ack() local
912 * NODE_ADD for src and setup its end of the qp to dst scif_node_add_ack()
924 qp = &peerdev->qpairs[0]; scif_node_add_ack()
930 qp->remote_qp->qp_state = SCIF_QP_ONLINE; scif_node_add_ack()
1073 * @qp: Remote memory pointer
1083 struct scif_qp *qp, struct scifmsg *msg) scif_nodeqp_msg_handler()
1101 * @qp: Remote memory pointer
1107 void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp) scif_nodeqp_intrhandler() argument
1113 read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg)); scif_nodeqp_intrhandler()
1116 scif_nodeqp_msg_handler(scifdev, qp, &msg); scif_nodeqp_intrhandler()
1123 scif_rb_update_read_ptr(&qp->inbound_q); scif_nodeqp_intrhandler()
1140 struct scif_qp *qp = scifdev->qpairs; scif_loopb_wq_handler() local
1145 spin_lock(&qp->recv_lock); scif_loopb_wq_handler()
1152 spin_unlock(&qp->recv_lock); scif_loopb_wq_handler()
1155 scif_nodeqp_msg_handler(scifdev, qp, &msg->msg); scif_loopb_wq_handler()
1164 * @qp: Queue pair.
1194 scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp) scif_loopb_msg_handler() argument
1203 read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg, scif_loopb_msg_handler()
1207 scif_rb_update_read_ptr(&qp->inbound_q); scif_loopb_msg_handler()
1210 spin_lock(&qp->recv_lock); scif_loopb_msg_handler()
1212 spin_unlock(&qp->recv_lock); scif_loopb_msg_handler()
1214 scif_rb_update_read_ptr(&qp->inbound_q); scif_loopb_msg_handler()
1229 struct scif_qp *qp; scif_setup_loopback_qp() local
1251 qp = scifdev->qpairs; scif_setup_loopback_qp()
1252 qp->magic = SCIFEP_MAGIC; scif_setup_loopback_qp()
1253 spin_lock_init(&qp->send_lock); scif_setup_loopback_qp()
1254 spin_lock_init(&qp->recv_lock); scif_setup_loopback_qp()
1266 scif_rb_init(&qp->outbound_q, scif_setup_loopback_qp()
1267 &qp->local_read, scif_setup_loopback_qp()
1268 &qp->local_write, scif_setup_loopback_qp()
1271 scif_rb_init(&qp->inbound_q, scif_setup_loopback_qp()
1272 &qp->local_read, scif_setup_loopback_qp()
1273 &qp->local_write, scif_setup_loopback_qp()
230 scif_setup_qp_connect_response(struct scif_dev *scifdev, struct scif_qp *qp, u64 payload) scif_setup_qp_connect_response() argument
1082 scif_nodeqp_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp, struct scifmsg *msg) scif_nodeqp_msg_handler() argument
H A Dscif_epd.c23 struct scif_qp *qp = ep->qp_info.qp; scif_cleanup_ep_qp() local
25 if (qp->outbound_q.rb_base) { scif_cleanup_ep_qp()
26 scif_iounmap((void *)qp->outbound_q.rb_base, scif_cleanup_ep_qp()
27 qp->outbound_q.size, ep->remote_dev); scif_cleanup_ep_qp()
28 qp->outbound_q.rb_base = NULL; scif_cleanup_ep_qp()
30 if (qp->remote_qp) { scif_cleanup_ep_qp()
31 scif_iounmap((void *)qp->remote_qp, scif_cleanup_ep_qp()
33 qp->remote_qp = NULL; scif_cleanup_ep_qp()
35 if (qp->local_qp) { scif_cleanup_ep_qp()
36 scif_unmap_single(qp->local_qp, ep->remote_dev, scif_cleanup_ep_qp()
38 qp->local_qp = 0x0; scif_cleanup_ep_qp()
40 if (qp->local_buf) { scif_cleanup_ep_qp()
41 scif_unmap_single(qp->local_buf, ep->remote_dev, scif_cleanup_ep_qp()
43 qp->local_buf = 0; scif_cleanup_ep_qp()
50 struct scif_qp *qp = ep->qp_info.qp; scif_teardown_ep() local
52 if (qp) { scif_teardown_ep()
56 kfree(qp->inbound_q.rb_base); scif_teardown_ep()
57 kfree(qp); scif_teardown_ep()
H A Dscif_api.c61 ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL); scif_open()
62 if (!ep->qp_info.qp) scif_open()
80 kfree(ep->qp_info.qp); scif_open()
430 * Listen status is complete so delete the qp information not needed scif_listen()
434 ep->qp_info.qp = NULL; scif_listen()
451 * allocates the local qp for the endpoint ring buffer and then sends
485 err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset, scif_conn_func()
528 ep->qp_info.qp, scif_conn_func()
694 ep->qp_info.qp->magic = SCIFEP_MAGIC; __scif_connect()
828 cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL); scif_accept()
829 if (!cep->qp_info.qp) { scif_accept()
838 cep->qp_info.qp->magic = SCIFEP_MAGIC; scif_accept()
844 err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset, scif_accept()
952 struct scif_qp *qp = ep->qp_info.qp; _scif_send() local
959 write_count = scif_rb_space(&qp->outbound_q); _scif_send()
963 ret = scif_rb_write(&qp->outbound_q, msg, _scif_send()
968 scif_rb_commit(&qp->outbound_q); _scif_send()
993 (scif_rb_space(&qp->outbound_q) >= _scif_send()
1015 struct scif_qp *qp = ep->qp_info.qp; _scif_recv() local
1022 read_count = scif_rb_count(&qp->inbound_q, remaining_len); _scif_recv()
1030 read_size = scif_rb_get_next(&qp->inbound_q, _scif_recv()
1039 scif_rb_update_read_ptr(&qp->inbound_q); _scif_recv()
1078 scif_rb_count(&qp->inbound_q, _scif_recv()
1355 if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1)) __scif_pollfd()
1358 if (scif_rb_space(&ep->qp_info.qp->outbound_q)) __scif_pollfd()
H A Dscif_nodeqp.h186 void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
187 int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
190 int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
192 int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
196 struct scif_qp *qp, u64 payload);
H A Dscif_epd.h57 * @qp - Qpair for this endpoint
63 struct scif_qp *qp; member in struct:scif_endpt_qp_info
/linux-4.4.14/drivers/net/
H A Dntb_netdev.c77 struct ntb_transport_qp *qp; member in struct:ntb_netdev
92 ntb_transport_link_query(dev->qp)); ntb_netdev_event_handler()
95 if (ntb_transport_link_query(dev->qp)) ntb_netdev_event_handler()
102 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ntb_netdev_rx_handler() argument
141 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); ntb_netdev_rx_handler()
150 struct ntb_transport_qp *qp, int size) __ntb_netdev_maybe_stop_tx()
160 if (likely(ntb_transport_tx_free_entry(qp) < size)) { __ntb_netdev_maybe_stop_tx()
170 struct ntb_transport_qp *qp, int size) ntb_netdev_maybe_stop_tx()
173 (ntb_transport_tx_free_entry(qp) >= size)) ntb_netdev_maybe_stop_tx()
176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size); ntb_netdev_maybe_stop_tx()
179 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, ntb_netdev_tx_handler() argument
200 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { ntb_netdev_tx_handler()
216 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); ntb_netdev_start_xmit()
218 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); ntb_netdev_start_xmit()
223 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); ntb_netdev_start_xmit()
238 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { ntb_netdev_tx_timer()
264 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ntb_netdev_open()
275 ntb_transport_link_up(dev->qp); ntb_netdev_open()
281 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_open()
292 ntb_transport_link_down(dev->qp); ntb_netdev_close()
294 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_close()
308 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN) ntb_netdev_change_mtu()
317 ntb_transport_link_down(dev->qp); ntb_netdev_change_mtu()
322 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++) ntb_netdev_change_mtu()
332 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ntb_netdev_change_mtu()
343 ntb_transport_link_up(dev->qp); ntb_netdev_change_mtu()
348 ntb_transport_link_down(dev->qp); ntb_netdev_change_mtu()
350 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_change_mtu()
436 dev->qp = ntb_transport_create_queue(ndev, client_dev, ntb_netdev_probe()
438 if (!dev->qp) { ntb_netdev_probe()
443 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; ntb_netdev_probe()
454 ntb_transport_free_queue(dev->qp); ntb_netdev_probe()
485 ntb_transport_free_queue(dev->qp); ntb_netdev_remove()
149 __ntb_netdev_maybe_stop_tx(struct net_device *netdev, struct ntb_transport_qp *qp, int size) __ntb_netdev_maybe_stop_tx() argument
169 ntb_netdev_maybe_stop_tx(struct net_device *ndev, struct ntb_transport_qp *qp, int size) ntb_netdev_maybe_stop_tx() argument
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c40 #include <linux/mlx4/qp.h>
52 struct mlx4_qp *qp; mlx4_qp_event() local
56 qp = __mlx4_qp_lookup(dev, qpn); mlx4_qp_event()
57 if (qp) mlx4_qp_event()
58 atomic_inc(&qp->refcount); mlx4_qp_event()
62 if (!qp) { mlx4_qp_event()
67 qp->event(qp, event_type); mlx4_qp_event()
69 if (atomic_dec_and_test(&qp->refcount)) mlx4_qp_event()
70 complete(&qp->free); mlx4_qp_event()
74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) is_master_qp0() argument
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; is_master_qp0()
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && is_master_qp0()
82 qp->qpn <= dev->phys_caps.base_sqpn + 1; is_master_qp0()
91 int sqd_event, struct mlx4_qp *qp, int native) __mlx4_qp_modify()
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, __mlx4_qp_modify()
149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { __mlx4_qp_modify()
150 port = (qp->qpn & 1) + 1; __mlx4_qp_modify()
174 cpu_to_be32(qp->qpn); __mlx4_qp_modify()
177 qp->qpn | (!!sqd_event << 31), __mlx4_qp_modify()
181 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { __mlx4_qp_modify()
182 port = (qp->qpn & 1) + 1; __mlx4_qp_modify()
206 int sqd_event, struct mlx4_qp *qp) mlx4_qp_modify()
209 optpar, sqd_event, qp, 0); mlx4_qp_modify()
290 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", mlx4_qp_release_range()
376 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); mlx4_qp_free_icm()
381 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) mlx4_qp_alloc() argument
390 qp->qpn = qpn; mlx4_qp_alloc()
397 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & mlx4_qp_alloc()
398 (dev->caps.num_qps - 1), qp); mlx4_qp_alloc()
403 atomic_set(&qp->refcount, 1); mlx4_qp_alloc()
404 init_completion(&qp->free); mlx4_qp_alloc()
484 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) mlx4_qp_remove() argument
490 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); mlx4_qp_remove()
495 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) mlx4_qp_free() argument
497 if (atomic_dec_and_test(&qp->refcount)) mlx4_qp_free()
498 complete(&qp->free); mlx4_qp_free()
499 wait_for_completion(&qp->free); mlx4_qp_free()
501 mlx4_qp_free_icm(dev, qp->qpn); mlx4_qp_free()
581 * (in which qp number bits 6 and/or 7 are set); the other set of subareas mlx4_create_zones()
583 * Currently, the values returned by the FW (A0 steering area starting qp number mlx4_create_zones()
820 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, mlx4_init_qp_table()
871 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_qp_query() argument
881 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, mlx4_qp_query()
894 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) mlx4_qp_to_ready()
911 context, 0, 0, qp); mlx4_qp_to_ready()
87 __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) __mlx4_qp_modify() argument
202 mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) mlx4_qp_modify() argument
892 mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) mlx4_qp_to_ready() argument
H A DMakefile4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
H A Dmcg.c174 /* If the given qpn is also a promisc qp, new_steering_entry()
219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); new_steering_entry()
274 return 0; /* qp is already duplicated */ existing_steering_entry()
277 /* add the qp as a duplicate on this index */ existing_steering_entry()
302 /* if qp is not promisc, it cannot be duplicated */ check_duplicate_entry()
306 /* The qp is promisc qp so it is a duplicate on this index check_duplicate_entry()
356 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; promisc_steering_entry()
358 /* the qp is not promisc, the entry can't be removed */ promisc_steering_entry()
478 if ((be32_to_cpu(mgm->qp[i]) & add_promisc_qp()
502 mgm->qp[members_count++] = add_promisc_qp()
526 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); add_promisc_qp()
592 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); remove_promisc_qp()
637 if ((be32_to_cpu(mgm->qp[i]) & remove_promisc_qp()
653 mgm->qp[loc] = mgm->qp[members_count - 1]; remove_promisc_qp()
654 mgm->qp[members_count - 1] = 0; remove_promisc_qp()
909 "port = %d prio = 0x%x qp = 0x%x ", mlx4_err_rule()
1097 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_qp_attach_common() argument
1152 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { mlx4_qp_attach_common()
1153 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); mlx4_qp_attach_common()
1159 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | mlx4_qp_attach_common()
1162 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); mlx4_qp_attach_common()
1188 index, qp->qpn); mlx4_qp_attach_common()
1191 index, qp->qpn); mlx4_qp_attach_common()
1207 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_qp_detach_common() argument
1242 check_duplicate_entry(dev, port, steer, index, qp->qpn) && mlx4_qp_detach_common()
1243 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) mlx4_qp_detach_common()
1248 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { mlx4_qp_detach_common()
1254 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); mlx4_qp_detach_common()
1260 mgm->qp[loc] = mgm->qp[members_count - 1]; mlx4_qp_detach_common()
1261 mgm->qp[members_count - 1] = 0; mlx4_qp_detach_common()
1266 index, qp->qpn); mlx4_qp_detach_common()
1328 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_QP_ATTACH() argument
1344 qpn = qp->qpn; mlx4_QP_ATTACH()
1360 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_trans_to_dmfs_attach() argument
1377 rule.qpn = qp->qpn; mlx4_trans_to_dmfs_attach()
1400 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_multicast_attach() argument
1414 return mlx4_QP_ATTACH(dev, qp, gid, 1, mlx4_multicast_attach()
1416 return mlx4_qp_attach_common(dev, qp, gid, mlx4_multicast_attach()
1421 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, mlx4_multicast_attach()
1430 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_multicast_detach() argument
1443 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); mlx4_multicast_detach()
1445 return mlx4_qp_detach_common(dev, qp, gid, prot, mlx4_multicast_detach()
1516 struct mlx4_qp *qp, u8 gid[16], mlx4_unicast_attach()
1523 return mlx4_QP_ATTACH(dev, qp, gid, 1, mlx4_unicast_attach()
1526 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, mlx4_unicast_attach()
1531 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_unicast_detach() argument
1538 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); mlx4_unicast_detach()
1540 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); mlx4_unicast_detach()
1515 mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) mlx4_unicast_attach() argument
H A Den_resources.c36 #include <linux/mlx4/qp.h>
76 /* disable multicast loopback to qp with same counter */ mlx4_en_fill_qp_context()
87 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); mlx4_en_fill_qp_context()
92 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, mlx4_en_change_mcast_lb() argument
102 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, mlx4_en_change_mcast_lb()
140 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) mlx4_en_sqp_event() argument
H A Dresource_tracker.c43 #include <linux/mlx4/qp.h>
118 /* saved qp params before VST enforcement in order to restore on VGT */
444 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - mlx4_init_quotas()
454 dev->quotas.qp = mlx4_init_quotas()
1263 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", remove_qp_ok()
1428 enum res_qp_states state, struct res_qp **qp, qp_res_start_move_to()
1482 if (qp) qp_res_start_move_to()
1483 *qp = r; qp_res_start_move_to()
2866 /* adjust qkey in qp context */ adjust_proxy_tun_qkey()
2883 struct res_qp *qp; mlx4_RST2INIT_QP_wrapper() local
2900 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); mlx4_RST2INIT_QP_wrapper()
2903 qp->local_qpn = local_qpn; mlx4_RST2INIT_QP_wrapper()
2904 qp->sched_queue = 0; mlx4_RST2INIT_QP_wrapper()
2905 qp->param3 = 0; mlx4_RST2INIT_QP_wrapper()
2906 qp->vlan_control = 0; mlx4_RST2INIT_QP_wrapper()
2907 qp->fvl_rx = 0; mlx4_RST2INIT_QP_wrapper()
2908 qp->pri_path_fl = 0; mlx4_RST2INIT_QP_wrapper()
2909 qp->vlan_index = 0; mlx4_RST2INIT_QP_wrapper()
2910 qp->feup = 0; mlx4_RST2INIT_QP_wrapper()
2911 qp->qpc_flags = be32_to_cpu(qpc->flags); mlx4_RST2INIT_QP_wrapper()
2944 qp->mtt = mtt; mlx4_RST2INIT_QP_wrapper()
2946 qp->rcq = rcq; mlx4_RST2INIT_QP_wrapper()
2948 qp->scq = scq; mlx4_RST2INIT_QP_wrapper()
2956 qp->srq = srq; mlx4_RST2INIT_QP_wrapper()
3659 struct res_qp *qp; mlx4_GEN_QP_wrapper() local
3661 err = get_res(dev, slave, qpn, RES_QP, &qp); mlx4_GEN_QP_wrapper()
3664 if (qp->com.from_state != RES_QP_HW) { mlx4_GEN_QP_wrapper()
3748 struct res_qp *qp; mlx4_INIT2RTR_QP_wrapper() local
3772 err = get_res(dev, slave, qpn, RES_QP, &qp); mlx4_INIT2RTR_QP_wrapper()
3775 if (qp->com.from_state != RES_QP_HW) { mlx4_INIT2RTR_QP_wrapper()
3791 qp->sched_queue = orig_sched_queue; mlx4_INIT2RTR_QP_wrapper()
3792 qp->param3 = orig_param3; mlx4_INIT2RTR_QP_wrapper()
3793 qp->vlan_control = orig_vlan_control; mlx4_INIT2RTR_QP_wrapper()
3794 qp->fvl_rx = orig_fvl_rx; mlx4_INIT2RTR_QP_wrapper()
3795 qp->pri_path_fl = orig_pri_path_fl; mlx4_INIT2RTR_QP_wrapper()
3796 qp->vlan_index = orig_vlan_index; mlx4_INIT2RTR_QP_wrapper()
3797 qp->feup = orig_feup; mlx4_INIT2RTR_QP_wrapper()
3914 struct res_qp *qp; mlx4_2RST_QP_wrapper() local
3916 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); mlx4_2RST_QP_wrapper()
3923 atomic_dec(&qp->mtt->ref_count); mlx4_2RST_QP_wrapper()
3924 atomic_dec(&qp->rcq->ref_count); mlx4_2RST_QP_wrapper()
3925 atomic_dec(&qp->scq->ref_count); mlx4_2RST_QP_wrapper()
3926 if (qp->srq) mlx4_2RST_QP_wrapper()
3927 atomic_dec(&qp->srq->ref_count); mlx4_2RST_QP_wrapper()
3999 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, qp_attach() argument
4008 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, qp_attach()
4019 return mlx4_qp_attach_common(dev, qp, gid, qp_attach()
4026 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, qp_detach() argument
4034 return mlx4_qp_detach_common(dev, qp, gid, prot, type); qp_detach()
4065 struct mlx4_qp qp; /* dummy for calling attach/detach */ mlx4_QP_ATTACH_wrapper() local
4082 qp.qpn = qpn; mlx4_QP_ATTACH_wrapper()
4084 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, mlx4_QP_ATTACH_wrapper()
4087 pr_err("Fail to attach rule to qp 0x%x\n", qpn); mlx4_QP_ATTACH_wrapper()
4102 err = qp_detach(dev, &qp, gid, prot, type, reg_id); mlx4_QP_ATTACH_wrapper()
4104 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", mlx4_QP_ATTACH_wrapper()
4111 qp_detach(dev, &qp, gid, prot, type, reg_id); mlx4_QP_ATTACH_wrapper()
4443 struct mlx4_qp qp; /* dummy for calling attach/detach */ detach_qp() local
4451 qp.qpn = rqp->local_qpn; detach_qp()
4452 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, detach_qp()
4523 struct res_qp *qp; rem_slave_qps() local
4536 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe()
4538 if (qp->com.owner == slave) { list_for_each_entry_safe()
4539 qpn = qp->com.res_id; list_for_each_entry_safe()
4540 detach_qp(dev, slave, qp); list_for_each_entry_safe()
4541 state = qp->com.from_state; list_for_each_entry_safe()
4546 rb_erase(&qp->com.node, list_for_each_entry_safe()
4548 list_del(&qp->com.list); list_for_each_entry_safe()
4555 kfree(qp); list_for_each_entry_safe()
4566 qp->local_qpn, 2, list_for_each_entry_safe()
4572 slave, qp->local_qpn); list_for_each_entry_safe()
4573 atomic_dec(&qp->rcq->ref_count); list_for_each_entry_safe()
4574 atomic_dec(&qp->scq->ref_count); list_for_each_entry_safe()
4575 atomic_dec(&qp->mtt->ref_count); list_for_each_entry_safe()
4576 if (qp->srq) list_for_each_entry_safe()
4577 atomic_dec(&qp->srq->ref_count); list_for_each_entry_safe()
5053 struct res_qp *qp; mlx4_vf_immed_vlan_work_handler() local
5076 mlx4_warn(dev, "Trying to update-qp in slave %d\n", mlx4_vf_immed_vlan_work_handler()
5103 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe()
5105 if (qp->com.owner == work->slave) { list_for_each_entry_safe()
5106 if (qp->com.from_state != RES_QP_HW || list_for_each_entry_safe()
5107 !qp->sched_queue || /* no INIT2RTR trans yet */ list_for_each_entry_safe()
5108 mlx4_is_qp_reserved(dev, qp->local_qpn) || list_for_each_entry_safe()
5109 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { list_for_each_entry_safe()
5113 port = (qp->sched_queue >> 6 & 1) + 1; list_for_each_entry_safe()
5118 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) list_for_each_entry_safe()
5124 upd_context->qp_context.param3 = qp->param3; list_for_each_entry_safe()
5125 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; list_for_each_entry_safe()
5126 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; list_for_each_entry_safe()
5127 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; list_for_each_entry_safe()
5128 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; list_for_each_entry_safe()
5129 upd_context->qp_context.pri_path.feup = qp->feup; list_for_each_entry_safe()
5131 qp->sched_queue; list_for_each_entry_safe()
5133 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); list_for_each_entry_safe()
5137 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; list_for_each_entry_safe()
5139 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; list_for_each_entry_safe()
5141 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; list_for_each_entry_safe()
5143 qp->sched_queue & 0xC7; list_for_each_entry_safe()
5154 qp->local_qpn & 0xffffff, list_for_each_entry_safe()
5159 work->slave, port, qp->local_qpn, err); list_for_each_entry_safe()
1427 qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, enum res_qp_states state, struct res_qp **qp, int alloc) qp_res_start_move_to() argument
H A Den_rx.c37 #include <linux/mlx4/qp.h>
1125 struct mlx4_qp *qp) mlx4_en_config_rss_qp()
1135 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); mlx4_en_config_rss_qp()
1137 en_err(priv, "Failed to allocate qp #%x\n", qpn); mlx4_en_config_rss_qp()
1140 qp->event = mlx4_en_sqp_event; mlx4_en_config_rss_qp()
1157 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); mlx4_en_config_rss_qp()
1159 mlx4_qp_remove(mdev->dev, qp); mlx4_en_config_rss_qp()
1160 mlx4_qp_free(mdev->dev, qp); mlx4_en_config_rss_qp()
1181 en_err(priv, "Failed allocating drop qp\n"); mlx4_en_create_drop_qp()
1199 /* Allocate rx qp's and configure them according to rss map */ mlx4_en_config_rss_steer()
1234 /* Configure RSS indirection qp */ mlx4_en_config_rss_steer()
1122 mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_rx_ring *ring, enum mlx4_qp_state *state, struct mlx4_qp *qp) mlx4_en_config_rss_qp() argument
H A Den_tx.c37 #include <linux/mlx4/qp.h>
119 en_err(priv, "failed reserving qp for TX ring\n"); mlx4_en_create_tx_ring()
123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); mlx4_en_create_tx_ring()
125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); mlx4_en_create_tx_ring()
128 ring->qp.event = mlx4_en_sqp_event; mlx4_en_create_tx_ring()
182 mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_en_destroy_tx_ring()
183 mlx4_qp_free(mdev->dev, &ring->qp); mlx4_en_destroy_tx_ring()
210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); mlx4_en_activate_tx_ring()
219 &ring->qp, &ring->qp_state); mlx4_en_activate_tx_ring()
233 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); mlx4_en_deactivate_tx_ring()
H A Den_netdev.c502 struct mlx4_qp qp; mlx4_en_uc_steer_add() local
505 qp.qpn = *qpn; mlx4_en_uc_steer_add()
509 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); mlx4_en_uc_steer_add()
553 struct mlx4_qp qp; mlx4_en_uc_steer_release() local
556 qp.qpn = qpn; mlx4_en_uc_steer_release()
560 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); mlx4_en_uc_steer_release()
598 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); mlx4_en_get_qp()
600 en_err(priv, "Failed to reserve qp for mac registration\n"); mlx4_en_get_qp()
620 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", mlx4_en_put_qp()
833 /* Add the default qp number as multicast mlx4_en_set_promisc_mode()
927 /* Add the default qp number as multicast promisc */ mlx4_en_do_multicast()
1599 /* Set qp number */ mlx4_en_start_port()
1600 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); mlx4_en_start_port()
1603 en_err(priv, "Failed getting eth qp\n"); mlx4_en_start_port()
1671 /* Set default qp number */ mlx4_en_start_port()
1674 en_err(priv, "Failed setting default qp numbers\n"); mlx4_en_start_port()
H A Dmlx4_en.h51 #include <linux/mlx4/qp.h>
278 struct mlx4_qp qp; member in struct:mlx4_en_tx_ring
798 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
801 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
H A Den_cq.c35 #include <linux/mlx4/qp.h>
H A Dmlx4.h618 __be32 qp[MLX4_MAX_QP_PER_MGM]; member in struct:mlx4_mgm
1293 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1295 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1298 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c377 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, ehea_qp_alloc_register() argument
398 qp->fw_handle, rpage, 1); ehea_qp_alloc_register()
422 struct ehea_qp *qp; ehea_create_qp() local
427 qp = kzalloc(sizeof(*qp), GFP_KERNEL); ehea_create_qp()
428 if (!qp) ehea_create_qp()
431 qp->adapter = adapter; ehea_create_qp()
434 &qp->fw_handle, &qp->epas); ehea_create_qp()
445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, ehea_create_qp()
454 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, ehea_create_qp()
465 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, ehea_create_qp()
477 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3, ehea_create_qp()
488 qp->init_attr = *init_attr; ehea_create_qp()
490 return qp; ehea_create_qp()
493 hw_queue_dtor(&qp->hw_rqueue2); ehea_create_qp()
496 hw_queue_dtor(&qp->hw_rqueue1); ehea_create_qp()
499 hw_queue_dtor(&qp->hw_squeue); ehea_create_qp()
502 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle); ehea_create_qp()
503 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE); ehea_create_qp()
506 kfree(qp); ehea_create_qp()
510 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) ehea_destroy_qp_res() argument
513 struct ehea_qp_init_attr *qp_attr = &qp->init_attr; ehea_destroy_qp_res()
516 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); ehea_destroy_qp_res()
517 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); ehea_destroy_qp_res()
521 hw_queue_dtor(&qp->hw_squeue); ehea_destroy_qp_res()
522 hw_queue_dtor(&qp->hw_rqueue1); ehea_destroy_qp_res()
525 hw_queue_dtor(&qp->hw_rqueue2); ehea_destroy_qp_res()
527 hw_queue_dtor(&qp->hw_rqueue3); ehea_destroy_qp_res()
528 kfree(qp); ehea_destroy_qp_res()
533 int ehea_destroy_qp(struct ehea_qp *qp) ehea_destroy_qp() argument
536 if (!qp) ehea_destroy_qp()
539 hcp_epas_dtor(&qp->epas); ehea_destroy_qp()
541 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); ehea_destroy_qp()
543 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr); ehea_destroy_qp()
544 hret = ehea_destroy_qp_res(qp, FORCE_FREE); ehea_destroy_qp()
H A Dehea_hw.h218 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) ehea_update_sqa() argument
220 struct h_epa epa = qp->epas.kernel; ehea_update_sqa()
225 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq3a() argument
227 struct h_epa epa = qp->epas.kernel; ehea_update_rq3a()
232 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq2a() argument
234 struct h_epa epa = qp->epas.kernel; ehea_update_rq2a()
239 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq1a() argument
241 struct h_epa epa = qp->epas.kernel; ehea_update_rq1a()
H A Dehea_qmr.h306 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, ehea_get_next_rwqe() argument
312 queue = &qp->hw_rqueue1; ehea_get_next_rwqe()
314 queue = &qp->hw_rqueue2; ehea_get_next_rwqe()
316 queue = &qp->hw_rqueue3; ehea_get_next_rwqe()
339 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) ehea_poll_rq1() argument
341 struct hw_queue *queue = &qp->hw_rqueue1; ehea_poll_rq1()
352 static inline void ehea_inc_rq1(struct ehea_qp *qp) ehea_inc_rq1() argument
354 hw_qeit_inc(&qp->hw_rqueue1); ehea_inc_rq1()
386 int ehea_destroy_qp(struct ehea_qp *qp);
H A Dehea_main.c207 arr[i++].fwh = pr->qp->fw_handle; ehea_update_firmware_handles()
430 ehea_update_rq1a(pr->qp, adder); ehea_refill_rq1()
450 ehea_update_rq1a(pr->qp, i - 1); ehea_init_fill_rq1()
458 struct ehea_qp *qp = pr->qp; ehea_refill_rq_def() local
500 rwqe = ehea_get_next_rwqe(qp, rq_nr); ehea_refill_rq_def()
520 ehea_update_rq2a(pr->qp, adder); ehea_refill_rq_def()
522 ehea_update_rq3a(pr->qp, adder); ehea_refill_rq_def()
654 pr->qp->init_attr.qp_nr); ehea_treat_poll_error()
669 struct ehea_qp *qp = pr->qp; ehea_proc_rwqes() local
685 cqe = ehea_poll_rq1(qp, &wqe_index); ehea_proc_rwqes()
687 ehea_inc_rq1(qp); ehea_proc_rwqes()
752 cqe = ehea_poll_rq1(qp, &wqe_index); ehea_proc_rwqes()
788 swqe = ehea_get_swqe(pr->qp, &swqe_index); check_sqs()
798 ehea_post_swqe(pr->qp, swqe); check_sqs()
909 cqe = ehea_poll_rq1(pr->qp, &wqe_index); ehea_poll()
949 struct ehea_qp *qp; ehea_qp_aff_irq_handler() local
961 qp = port->port_res[qp_token].qp; ehea_qp_aff_irq_handler()
963 resource_type = ehea_error_data(port->adapter, qp->fw_handle, ehea_qp_aff_irq_handler()
1278 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; ehea_fill_port_res()
1392 port->port_res[i].qp->init_attr.qp_nr; ehea_configure_port()
1395 port->port_res[0].qp->init_attr.qp_nr; ehea_configure_port()
1540 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); ehea_init_port_res()
1541 if (!pr->qp) { ehea_init_port_res()
1585 ehea_destroy_qp(pr->qp); ehea_init_port_res()
1597 if (pr->qp) ehea_clean_portres()
1600 ret = ehea_destroy_qp(pr->qp); ehea_clean_portres()
1829 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; ehea_promiscuous()
2063 swqe = ehea_get_swqe(pr->qp, &swqe_index); ehea_start_xmit()
2106 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); ehea_start_xmit()
2115 ehea_post_swqe(pr->qp, swqe); ehea_start_xmit()
2201 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) ehea_activate_qp() argument
2215 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2223 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2231 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2239 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2247 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2255 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2263 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp()
2384 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); ehea_up()
2511 struct ehea_qp qp = *orig_qp; ehea_purge_sq() local
2512 struct ehea_qp_init_attr *init_attr = &qp.init_attr; ehea_purge_sq()
2518 swqe = ehea_get_swqe(&qp, &wqe_index); ehea_purge_sq()
2563 struct ehea_qp *qp = pr->qp; ehea_stop_qps() local
2566 ehea_purge_sq(qp); ehea_stop_qps()
2569 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps()
2580 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps()
2589 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps()
2614 struct ehea_qp qp = *orig_qp; ehea_update_rqs() local
2615 struct ehea_qp_init_attr *init_attr = &qp.init_attr; ehea_update_rqs()
2627 rwqe = ehea_get_next_rwqe(&qp, 2); ehea_update_rqs()
2636 rwqe = ehea_get_next_rwqe(&qp, 3); ehea_update_rqs()
2665 struct ehea_qp *qp = pr->qp; ehea_restart_qps() local
2673 ehea_update_rqs(qp, pr); ehea_restart_qps()
2676 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps()
2687 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps()
2696 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps()
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c60 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); ipoib_mcast_attach()
68 ret = ib_attach_mcast(priv->qp, mgid, mlid); ipoib_mcast_attach()
96 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp()
105 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp()
115 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp()
125 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_init_qp()
211 priv->qp = ib_create_qp(priv->pd, &init_attr); ipoib_transport_dev_init()
212 if (IS_ERR(priv->qp)) { ipoib_transport_dev_init()
217 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; ipoib_transport_dev_init()
218 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; ipoib_transport_dev_init()
219 priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; ipoib_transport_dev_init()
261 if (priv->qp) { ipoib_transport_dev_cleanup()
262 if (ib_destroy_qp(priv->qp)) ipoib_transport_dev_cleanup()
265 priv->qp = NULL; ipoib_transport_dev_cleanup()
H A Dipoib_cm.c128 ret = ib_post_recv(rx->qp, wr, &bad_wr); ipoib_cm_post_receive_nonsrq()
226 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) ipoib_cm_start_rx_drain()
273 struct ib_cm_id *cm_id, struct ib_qp *qp, ipoib_cm_modify_rx_qp()
286 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp()
298 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp()
318 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp()
420 struct ib_qp *qp, struct ib_cm_req_event_param *req, ipoib_cm_send_rep()
427 data.qpn = cpu_to_be32(priv->qp->qp_num); ipoib_cm_send_rep()
435 rep.qp_num = qp->qp_num; ipoib_cm_send_rep()
459 p->qp = ipoib_cm_create_rx_qp(dev, p); ipoib_cm_req_handler()
460 if (IS_ERR(p->qp)) { ipoib_cm_req_handler()
461 ret = PTR_ERR(p->qp); ipoib_cm_req_handler()
466 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); ipoib_cm_req_handler()
486 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); ipoib_cm_req_handler()
489 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_cm_req_handler()
490 ipoib_warn(priv, "unable to move qp to error state\n"); ipoib_cm_req_handler()
495 ib_destroy_qp(p->qp); ipoib_cm_req_handler()
517 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_cm_rx_handler()
518 ipoib_warn(priv, "unable to move qp to error state\n"); ipoib_cm_rx_handler()
587 p = wc->qp->qp_context; ipoib_cm_handle_rx_wc()
705 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); post_send()
724 tx->tx_head, skb->len, tx->qp->qp_num); ipoib_cm_send()
757 tx->qp->qp_num); ipoib_cm_send()
772 struct ipoib_cm_tx *tx = wc->qp->qp_context; ipoib_cm_handle_tx_wc()
850 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), ipoib_cm_dev_open()
854 IPOIB_CM_IETF_ID | priv->qp->qp_num); ipoib_cm_dev_open()
879 ib_destroy_qp(rx->qp); ipoib_cm_free_rx_reap_list()
909 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); ipoib_cm_dev_stop()
911 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); ipoib_cm_dev_stop()
974 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); ipoib_cm_rep_handler()
986 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); ipoib_cm_rep_handler()
1047 struct ib_cm_id *id, struct ib_qp *qp, ipoib_cm_send_req()
1055 data.qpn = cpu_to_be32(priv->qp->qp_num); ipoib_cm_send_req()
1061 req.qp_num = qp->qp_num; ipoib_cm_send_req()
1062 req.qp_type = qp->qp_type; ipoib_cm_send_req()
1084 struct ib_cm_id *cm_id, struct ib_qp *qp) ipoib_cm_modify_tx_init()
1100 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_tx_init()
1123 p->qp = ipoib_cm_create_tx_qp(p->dev, p); ipoib_cm_tx_init()
1124 if (IS_ERR(p->qp)) { ipoib_cm_tx_init()
1125 ret = PTR_ERR(p->qp); ipoib_cm_tx_init()
1126 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); ipoib_cm_tx_init()
1137 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); ipoib_cm_tx_init()
1139 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); ipoib_cm_tx_init()
1143 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); ipoib_cm_tx_init()
1150 p->qp->qp_num, pathrec->dgid.raw, qpn); ipoib_cm_tx_init()
1159 ib_destroy_qp(p->qp); ipoib_cm_tx_init()
1161 p->qp = NULL; ipoib_cm_tx_init()
1174 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); ipoib_cm_tx_destroy()
1208 if (p->qp) ipoib_cm_tx_destroy()
1209 ib_destroy_qp(p->qp); ipoib_cm_tx_destroy()
1442 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); ipoib_cm_stale_task()
1444 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); ipoib_cm_stale_task()
272 ipoib_cm_modify_rx_qp(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, unsigned psn) ipoib_cm_modify_rx_qp() argument
419 ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, struct ib_cm_req_event_param *req, unsigned psn) ipoib_cm_send_rep() argument
1046 ipoib_cm_send_req(struct net_device *dev, struct ib_cm_id *id, struct ib_qp *qp, u32 qpn, struct ib_sa_path_rec *pathrec) ipoib_cm_send_req() argument
1083 ipoib_cm_modify_tx_init(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp) ipoib_cm_modify_tx_init() argument
H A Dipoib_ib.c113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); ipoib_ib_post_receive()
210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) ipoib_ib_handle_rx_wc()
345 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); ipoib_qp_state_validate_work()
352 __func__, priv->qp->qp_num, qp_attr.qp_state); ipoib_qp_state_validate_work()
358 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); ipoib_qp_state_validate_work()
361 ret, priv->qp->qp_num); ipoib_qp_state_validate_work()
365 __func__, priv->qp->qp_num); ipoib_qp_state_validate_work()
368 priv->qp->qp_num, qp_attr.qp_state); ipoib_qp_state_validate_work()
413 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n", ipoib_ib_handle_tx_wc()
414 __func__, priv->qp->qp_num); ipoib_ib_handle_tx_wc()
533 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); post_send()
839 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_ib_dev_stop()
888 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_ib_dev_stop()
904 priv->qp = NULL; ipoib_ib_dev_init()
H A Dipoib.h226 struct ib_qp *qp; member in struct:ipoib_cm_rx
237 struct ib_qp *qp; member in struct:ipoib_cm_tx
347 struct ib_qp *qp; member in struct:ipoib_dev_priv
/linux-4.4.14/arch/ia64/kernel/
H A Dunwind_decoder.c54 * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
58 * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
60 * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
62 * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
122 unsigned char byte1, byte2, abreg, qp; unw_decode_x3() local
129 qp = (byte1 & 0x3f); unw_decode_x3()
133 UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); unw_decode_x3()
135 UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); unw_decode_x3()
142 unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_decode_x4() local
148 qp = (byte1 & 0x3f); unw_decode_x4()
154 UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); unw_decode_x4()
156 UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); unw_decode_x4()
H A Dkprobes.c91 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ set_brl_inst()
149 * (qp) cmpx.crel.ctype p1,p2=r2,r3
186 * Returns qp value if supported
194 int qp; unsupported_inst() local
196 qp = kprobe_inst & 0x3f; unsupported_inst()
198 if (slot == 1 && qp) { unsupported_inst()
205 qp = 0; unsupported_inst()
237 if (slot == 1 && qp) { unsupported_inst()
243 qp = 0; unsupported_inst()
272 if (slot == 1 && qp) { unsupported_inst()
279 qp = 0; unsupported_inst()
284 if (slot == 1 && qp) { unsupported_inst()
290 qp = 0; unsupported_inst()
293 return qp; unsupported_inst()
304 int qp) prepare_break_inst()
310 * Copy the original kprobe_inst qualifying predicate(qp) prepare_break_inst()
313 break_inst |= qp; prepare_break_inst()
589 int qp; arch_prepare_kprobe() local
604 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); arch_prepare_kprobe()
605 if (qp < 0) arch_prepare_kprobe()
614 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); arch_prepare_kprobe()
300 prepare_break_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p, int qp) prepare_break_inst() argument
H A Dbrl_emu.c58 unsigned long opcode, btype, qp, offset, cpl; ia64_emulate_brl() local
83 qp = ((bundle[1] >> 23) & 0x3f); ia64_emulate_brl()
89 tmp_taken = regs->pr & (1L << qp); ia64_emulate_brl()
H A Dunwind.c1073 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) desc_is_active()
1077 if (qp > 0) { desc_is_active()
1078 if ((sr->pr_val & (1UL << qp)) == 0) desc_is_active()
1080 sr->pr_mask |= (1UL << qp); desc_is_active()
1086 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) desc_restore_p()
1090 if (!desc_is_active(qp, t, sr)) desc_restore_p()
1100 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, desc_spill_reg_p()
1106 if (!desc_is_active(qp, t, sr)) desc_spill_reg_p()
1121 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, desc_spill_psprel_p()
1126 if (!desc_is_active(qp, t, sr)) desc_spill_psprel_p()
1136 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, desc_spill_sprel_p()
1141 if (!desc_is_active(qp, t, sr)) desc_spill_sprel_p()
1071 desc_is_active(unsigned char qp, unw_word t, struct unw_state_record *sr) desc_is_active() argument
1084 desc_restore_p(unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) desc_restore_p() argument
1098 desc_spill_reg_p(unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) desc_spill_reg_p() argument
1119 desc_spill_psprel_p(unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) desc_spill_psprel_p() argument
1134 desc_spill_sprel_p(unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) desc_spill_sprel_p() argument
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c221 struct qp_entry qp; member in struct:qp_broker_entry
239 struct qp_entry qp; member in struct:qp_guest_endpoint
914 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); qp_guest_handle_to_entry() local
916 entry = qp ? container_of( qp_guest_handle_to_entry()
917 qp, struct qp_guest_endpoint, qp) : NULL; qp_guest_handle_to_entry()
928 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); qp_broker_handle_to_entry() local
930 entry = qp ? container_of( qp_broker_handle_to_entry()
931 qp, struct qp_broker_entry, qp) : NULL; qp_broker_handle_to_entry()
986 entry->qp.peer = peer; qp_guest_endpoint_create()
987 entry->qp.flags = flags; qp_guest_endpoint_create()
988 entry->qp.produce_size = produce_size; qp_guest_endpoint_create()
989 entry->qp.consume_size = consume_size; qp_guest_endpoint_create()
990 entry->qp.ref_count = 0; qp_guest_endpoint_create()
994 INIT_LIST_HEAD(&entry->qp.list_item); qp_guest_endpoint_create()
1000 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_guest_endpoint_create()
1002 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { qp_guest_endpoint_create()
1019 qp_free_queue(entry->produce_q, entry->qp.produce_size); qp_guest_endpoint_destroy()
1020 qp_free_queue(entry->consume_q, entry->qp.consume_size); qp_guest_endpoint_destroy()
1050 alloc_msg->handle = entry->qp.handle; qp_alloc_hypercall()
1051 alloc_msg->peer = entry->qp.peer; qp_alloc_hypercall()
1052 alloc_msg->flags = entry->qp.flags; qp_alloc_hypercall()
1053 alloc_msg->produce_size = entry->qp.produce_size; qp_alloc_hypercall()
1054 alloc_msg->consume_size = entry->qp.consume_size; qp_alloc_hypercall()
1121 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_detatch_guest_work()
1124 if (entry->qp.ref_count > 1) { qp_detatch_guest_work()
1156 entry->qp.ref_count--; qp_detatch_guest_work()
1157 if (entry->qp.ref_count == 0) qp_detatch_guest_work()
1158 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); qp_detatch_guest_work()
1162 ref_count = entry->qp.ref_count; qp_detatch_guest_work()
1202 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_alloc_guest_work()
1204 if (queue_pair_entry->qp.ref_count > 1) { qp_alloc_guest_work()
1210 if (queue_pair_entry->qp.produce_size != consume_size || qp_alloc_guest_work()
1211 queue_pair_entry->qp.consume_size != qp_alloc_guest_work()
1213 queue_pair_entry->qp.flags != qp_alloc_guest_work()
1273 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_alloc_guest_work()
1286 if (queue_pair_entry->qp.handle.context != context_id || qp_alloc_guest_work()
1287 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && qp_alloc_guest_work()
1288 queue_pair_entry->qp.peer != context_id)) { qp_alloc_guest_work()
1293 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { qp_alloc_guest_work()
1308 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); qp_alloc_guest_work()
1311 queue_pair_entry->qp.ref_count++; qp_alloc_guest_work()
1312 *handle = queue_pair_entry->qp.handle; qp_alloc_guest_work()
1321 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && qp_alloc_guest_work()
1322 queue_pair_entry->qp.ref_count == 1) { qp_alloc_guest_work()
1425 entry->qp.handle = handle; qp_broker_create()
1426 entry->qp.peer = peer; qp_broker_create()
1427 entry->qp.flags = flags; qp_broker_create()
1428 entry->qp.produce_size = guest_produce_size; qp_broker_create()
1429 entry->qp.consume_size = guest_consume_size; qp_broker_create()
1430 entry->qp.ref_count = 1; qp_broker_create()
1454 INIT_LIST_HEAD(&entry->qp.list_item); qp_broker_create()
1459 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), qp_broker_create()
1468 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); qp_broker_create()
1493 qp_list_add_entry(&qp_broker_list, &entry->qp); qp_broker_create()
1507 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_broker_create()
1510 entry->qp.handle); qp_broker_create()
1512 entry->qp.handle); qp_broker_create()
1515 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_create()
1615 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || qp_broker_attach()
1648 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) qp_broker_attach()
1677 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) qp_broker_attach()
1687 if (entry->qp.produce_size != produce_size || qp_broker_attach()
1688 entry->qp.consume_size != consume_size) { qp_broker_attach()
1691 } else if (entry->qp.produce_size != consume_size || qp_broker_attach()
1692 entry->qp.consume_size != produce_size) { qp_broker_attach()
1747 qp_notify_peer(true, entry->qp.handle, context_id, qp_broker_attach()
1751 entry->create_id, entry->qp.handle.context, qp_broker_attach()
1752 entry->qp.handle.resource); qp_broker_attach()
1756 entry->qp.ref_count++; qp_broker_attach()
1767 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_attach()
2122 entry->create_id, entry->qp.handle.context, vmci_qp_broker_set_page_store()
2123 entry->qp.handle.resource); vmci_qp_broker_set_page_store()
2204 entry->qp.ref_count--; vmci_qp_broker_detach()
2206 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_detach()
2258 if (entry->qp.ref_count == 0) { vmci_qp_broker_detach()
2259 qp_list_remove_entry(&qp_broker_list, &entry->qp); vmci_qp_broker_detach()
2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); vmci_qp_broker_detach()
2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); vmci_qp_broker_detach()
2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_map()
2340 page_store.len = QPE_NUM_PAGES(entry->qp); vmci_qp_broker_map()
2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_unmap()
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_debugfs.c2002 struct lpfc_queue *qp = NULL; lpfc_idiag_queinfo_read() local
2024 qp = phba->sli4_hba.hba_eq[x]; lpfc_idiag_queinfo_read()
2025 if (!qp) lpfc_idiag_queinfo_read()
2033 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2034 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2041 qp->queue_id, lpfc_idiag_queinfo_read()
2042 qp->entry_count, lpfc_idiag_queinfo_read()
2043 qp->entry_size, lpfc_idiag_queinfo_read()
2044 qp->host_index, lpfc_idiag_queinfo_read()
2045 qp->hba_index); lpfc_idiag_queinfo_read()
2049 qp->EQ_max_eqe = 0; lpfc_idiag_queinfo_read()
2057 qp = phba->sli4_hba.fcp_cq[x]; lpfc_idiag_queinfo_read()
2066 qp->assoc_qid, lpfc_idiag_queinfo_read()
2067 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2068 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2074 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2075 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2076 qp->hba_index); lpfc_idiag_queinfo_read()
2080 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read()
2088 qp = phba->sli4_hba.fcp_wq[x]; lpfc_idiag_queinfo_read()
2097 qp->assoc_qid, lpfc_idiag_queinfo_read()
2098 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2104 qp->queue_id, lpfc_idiag_queinfo_read()
2105 qp->entry_count, lpfc_idiag_queinfo_read()
2106 qp->entry_size, lpfc_idiag_queinfo_read()
2107 qp->host_index, lpfc_idiag_queinfo_read()
2108 qp->hba_index); lpfc_idiag_queinfo_read()
2121 qp = phba->sli4_hba.mbx_cq; lpfc_idiag_queinfo_read()
2122 if (qp) { lpfc_idiag_queinfo_read()
2131 qp->assoc_qid, lpfc_idiag_queinfo_read()
2132 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2133 qp->q_cnt_3, lpfc_idiag_queinfo_read()
2134 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2140 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2141 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2142 qp->hba_index); lpfc_idiag_queinfo_read()
2151 qp = phba->sli4_hba.mbx_wq; lpfc_idiag_queinfo_read()
2152 if (qp) { lpfc_idiag_queinfo_read()
2165 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2166 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2167 qp->hba_index); lpfc_idiag_queinfo_read()
2176 qp = phba->sli4_hba.els_cq; lpfc_idiag_queinfo_read()
2177 if (qp) { lpfc_idiag_queinfo_read()
2186 qp->assoc_qid, lpfc_idiag_queinfo_read()
2187 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2188 qp->q_cnt_3, lpfc_idiag_queinfo_read()
2189 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2195 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2196 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2197 qp->hba_index); lpfc_idiag_queinfo_read()
2200 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read()
2209 qp = phba->sli4_hba.els_wq; lpfc_idiag_queinfo_read()
2210 if (qp) { lpfc_idiag_queinfo_read()
2219 qp->assoc_qid, lpfc_idiag_queinfo_read()
2220 qp->q_cnt_1, lpfc_idiag_queinfo_read()
2221 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2227 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2228 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2229 qp->hba_index); lpfc_idiag_queinfo_read()
2239 qp = phba->sli4_hba.hdr_rq; lpfc_idiag_queinfo_read()
2249 qp->assoc_qid, lpfc_idiag_queinfo_read()
2250 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2251 qp->q_cnt_3, lpfc_idiag_queinfo_read()
2252 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2258 qp->queue_id, lpfc_idiag_queinfo_read()
2259 qp->entry_count, lpfc_idiag_queinfo_read()
2260 qp->entry_size, lpfc_idiag_queinfo_read()
2261 qp->host_index, lpfc_idiag_queinfo_read()
2262 qp->hba_index); lpfc_idiag_queinfo_read()
2265 qp = phba->sli4_hba.dat_rq; lpfc_idiag_queinfo_read()
2271 qp->queue_id, lpfc_idiag_queinfo_read()
2272 qp->entry_count, lpfc_idiag_queinfo_read()
2273 qp->entry_size, lpfc_idiag_queinfo_read()
2274 qp->host_index, lpfc_idiag_queinfo_read()
2275 qp->hba_index); lpfc_idiag_queinfo_read()
2285 qp = phba->sli4_hba.fof_eq; lpfc_idiag_queinfo_read()
2286 if (!qp) lpfc_idiag_queinfo_read()
2294 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2295 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2302 qp->queue_id, lpfc_idiag_queinfo_read()
2303 qp->entry_count, lpfc_idiag_queinfo_read()
2304 qp->entry_size, lpfc_idiag_queinfo_read()
2305 qp->host_index, lpfc_idiag_queinfo_read()
2306 qp->hba_index); lpfc_idiag_queinfo_read()
2309 qp->EQ_max_eqe = 0; lpfc_idiag_queinfo_read()
2320 qp = phba->sli4_hba.oas_cq; lpfc_idiag_queinfo_read()
2321 if (qp) { lpfc_idiag_queinfo_read()
2330 qp->assoc_qid, lpfc_idiag_queinfo_read()
2331 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read()
2332 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2338 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read()
2339 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read()
2340 qp->hba_index); lpfc_idiag_queinfo_read()
2343 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read()
2352 qp = phba->sli4_hba.oas_wq; lpfc_idiag_queinfo_read()
2353 if (qp) { lpfc_idiag_queinfo_read()
2361 qp->assoc_qid, lpfc_idiag_queinfo_read()
2362 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read()
2368 qp->queue_id, lpfc_idiag_queinfo_read()
2369 qp->entry_count, lpfc_idiag_queinfo_read()
2370 qp->entry_size, lpfc_idiag_queinfo_read()
2371 qp->host_index, lpfc_idiag_queinfo_read()
2372 qp->hba_index); lpfc_idiag_queinfo_read()
/linux-4.4.14/lib/mpi/
H A Dmpih-div.c58 mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, mpihelp_divrem() argument
87 qp += qextra_limbs; mpihelp_divrem()
89 udiv_qrnnd(qp[i], n1, n1, np[i], d); mpihelp_divrem()
90 qp -= qextra_limbs; mpihelp_divrem()
93 udiv_qrnnd(qp[i], n1, n1, 0, d); mpihelp_divrem()
135 qp[i] = q; mpihelp_divrem()
156 qp[i] = q; mpihelp_divrem()
229 qp[i] = q; mpihelp_divrem()
H A Dmpi-internal.h224 mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
/linux-4.4.14/drivers/scsi/sym53c8xx_2/
H A Dsym_misc.h159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \
160 for (qp = (head)->flink; qp != (head); qp = qp->flink)
H A Dsym_hipd.c1542 SYM_QUEHEAD *qp; sym_start_next_ccbs() local
1556 qp = sym_remque_head(&lp->waiting_ccbq); sym_start_next_ccbs()
1557 if (!qp) sym_start_next_ccbs()
1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); sym_start_next_ccbs()
1563 sym_insque_head(qp, &lp->waiting_ccbq); sym_start_next_ccbs()
1572 sym_insque_head(qp, &lp->waiting_ccbq); sym_start_next_ccbs()
1581 sym_insque_tail(qp, &lp->started_ccbq); sym_start_next_ccbs()
1643 SYM_QUEHEAD *qp; sym_flush_comp_queue() local
1646 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { sym_flush_comp_queue()
1648 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_flush_comp_queue()
1947 SYM_QUEHEAD *qp; sym_settrans() local
2035 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_settrans()
2037 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_settrans()
3200 SYM_QUEHEAD qtmp, *qp; sym_clear_tasks() local
3216 while ((qp = sym_remque_head(&qtmp)) != NULL) { sym_clear_tasks()
3218 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_clear_tasks()
3283 SYM_QUEHEAD *qp; sym_sir_task_recovery() local
3324 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery()
3325 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); sym_sir_task_recovery()
3356 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery()
3357 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery()
3472 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery()
3473 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery()
4657 SYM_QUEHEAD *qp; sym_get_ccb() local
4665 qp = sym_remque_head(&np->free_ccbq); sym_get_ccb()
4666 if (!qp) sym_get_ccb()
4668 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_get_ccb()
5333 SYM_QUEHEAD *qp; sym_abort_scsiio() local
5339 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_abort_scsiio()
5340 struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_abort_scsiio()
5805 SYM_QUEHEAD *qp; sym_hcb_free() local
5822 while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { sym_hcb_free()
5823 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_hcb_free()
/linux-4.4.14/drivers/crypto/
H A Dn2_core.c233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
236 qp->head != qp->tail)
477 static unsigned long wait_for_tail(struct spu_queue *qp) wait_for_tail() argument
482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); wait_for_tail()
487 if (head == qp->tail) { wait_for_tail()
488 qp->head = head; wait_for_tail()
495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, submit_and_wait_for_tail() argument
498 unsigned long hv_ret = spu_queue_submit(qp, ent); submit_and_wait_for_tail()
501 hv_ret = wait_for_tail(qp); submit_and_wait_for_tail()
514 struct spu_queue *qp; n2_do_async_digest() local
539 qp = cpu_to_cwq[cpu]; n2_do_async_digest()
540 if (!qp) n2_do_async_digest()
543 spin_lock_irqsave(&qp->lock, flags); n2_do_async_digest()
548 ent = qp->q + qp->tail; n2_do_async_digest()
565 ent = spu_queue_next(qp, ent); n2_do_async_digest()
580 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) n2_do_async_digest()
585 spin_unlock_irqrestore(&qp->lock, flags); n2_do_async_digest()
827 struct spu_queue *qp, bool encrypt) __n2_crypt_chunk()
834 ent = spu_queue_alloc(qp, cp->arr_len); __n2_crypt_chunk()
857 ent = spu_queue_next(qp, ent); __n2_crypt_chunk()
870 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; __n2_crypt_chunk()
978 struct spu_queue *qp; n2_do_ecb() local
983 qp = cpu_to_cwq[get_cpu()]; n2_do_ecb()
985 if (!qp) n2_do_ecb()
988 spin_lock_irqsave(&qp->lock, flags); n2_do_ecb()
991 err = __n2_crypt_chunk(tfm, c, qp, encrypt); n2_do_ecb()
999 hv_ret = wait_for_tail(qp); n2_do_ecb()
1004 spin_unlock_irqrestore(&qp->lock, flags); n2_do_ecb()
1030 struct spu_queue *qp; n2_do_chaining() local
1038 qp = cpu_to_cwq[get_cpu()]; n2_do_chaining()
1040 if (!qp) n2_do_chaining()
1043 spin_lock_irqsave(&qp->lock, flags); n2_do_chaining()
1050 err = __n2_crypt_chunk(tfm, c, qp, true); n2_do_chaining()
1080 err = __n2_crypt_chunk(tfm, c, qp, false); n2_do_chaining()
1089 hv_ret = wait_for_tail(qp); n2_do_chaining()
1094 spin_unlock_irqrestore(&qp->lock, flags); n2_do_chaining()
826 __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, struct spu_queue *qp, bool encrypt) __n2_crypt_chunk() argument
/linux-4.4.14/drivers/net/ethernet/sun/
H A Dsunhme.c2139 struct quattro *qp = (struct quattro *) cookie; quattro_sbus_interrupt() local
2143 struct net_device *dev = qp->happy_meals[i]; quattro_sbus_interrupt()
2558 struct quattro *qp; quattro_sbus_find() local
2561 qp = platform_get_drvdata(op); quattro_sbus_find()
2562 if (qp) quattro_sbus_find()
2563 return qp; quattro_sbus_find()
2565 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); quattro_sbus_find()
2566 if (qp != NULL) { quattro_sbus_find()
2570 qp->happy_meals[i] = NULL; quattro_sbus_find()
2572 qp->quattro_dev = child; quattro_sbus_find()
2573 qp->next = qfe_sbus_list; quattro_sbus_find()
2574 qfe_sbus_list = qp; quattro_sbus_find()
2576 platform_set_drvdata(op, qp); quattro_sbus_find()
2578 return qp; quattro_sbus_find()
2587 struct quattro *qp; quattro_sbus_register_irqs() local
2589 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { quattro_sbus_register_irqs()
2590 struct platform_device *op = qp->quattro_dev; quattro_sbus_register_irqs()
2594 if (!qp->happy_meals[qfe_slot]) quattro_sbus_register_irqs()
2603 qp); quattro_sbus_register_irqs()
2616 struct quattro *qp; quattro_sbus_free_irqs() local
2618 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { quattro_sbus_free_irqs()
2619 struct platform_device *op = qp->quattro_dev; quattro_sbus_free_irqs()
2623 if (!qp->happy_meals[qfe_slot]) quattro_sbus_free_irqs()
2629 free_irq(op->archdata.irqs[0], qp); quattro_sbus_free_irqs()
2638 struct quattro *qp; quattro_pci_find() local
2641 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { quattro_pci_find()
2642 struct pci_dev *qpdev = qp->quattro_dev; quattro_pci_find()
2645 return qp; quattro_pci_find()
2647 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); quattro_pci_find()
2648 if (qp != NULL) { quattro_pci_find()
2652 qp->happy_meals[i] = NULL; quattro_pci_find()
2654 qp->quattro_dev = bdev; quattro_pci_find()
2655 qp->next = qfe_pci_list; quattro_pci_find()
2656 qfe_pci_list = qp; quattro_pci_find()
2659 qp->nranges = 0; quattro_pci_find()
2661 return qp; quattro_pci_find()
2681 struct quattro *qp = NULL; happy_meal_sbus_probe_one() local
2694 qp = quattro_sbus_find(op); happy_meal_sbus_probe_one()
2695 if (qp == NULL) happy_meal_sbus_probe_one()
2698 if (qp->happy_meals[qfe_slot] == NULL) happy_meal_sbus_probe_one()
2744 if (qp != NULL) { happy_meal_sbus_probe_one()
2745 hp->qfe_parent = qp; happy_meal_sbus_probe_one()
2747 qp->happy_meals[qfe_slot] = dev; happy_meal_sbus_probe_one()
2795 if (qp != NULL) happy_meal_sbus_probe_one()
2884 if (qp) happy_meal_sbus_probe_one()
2885 qp->happy_meals[qfe_slot] = NULL; happy_meal_sbus_probe_one()
2983 struct quattro *qp = NULL; happy_meal_pci_probe() local
3013 qp = quattro_pci_find(pdev); happy_meal_pci_probe()
3014 if (qp == NULL) happy_meal_pci_probe()
3017 if (qp->happy_meals[qfe_slot] == NULL) happy_meal_pci_probe()
3039 if (qp != NULL) { happy_meal_pci_probe()
3040 hp->qfe_parent = qp; happy_meal_pci_probe()
3042 qp->happy_meals[qfe_slot] = dev; happy_meal_pci_probe()
3110 if (qp != NULL) happy_meal_pci_probe()
3169 struct pci_dev *qpdev = qp->quattro_dev; happy_meal_pci_probe()
3203 if (qp != NULL) happy_meal_pci_probe()
3204 qp->happy_meals[qfe_slot] = NULL; happy_meal_pci_probe()
H A Dsunqe.h298 #define TX_BUFFS_AVAIL(qp) \
299 (((qp)->tx_old <= (qp)->tx_new) ? \
300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
301 (qp)->tx_old - (qp)->tx_new - 1)
H A Dsunqe.c937 struct sunqe *qp = platform_get_drvdata(op); qec_sbus_remove() local
938 struct net_device *net_dev = qp->dev; qec_sbus_remove()
942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); qec_sbus_remove()
943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); qec_sbus_remove()
945 qp->qe_block, qp->qblock_dvma); qec_sbus_remove()
947 qp->buffers, qp->buffers_dvma); qec_sbus_remove()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c681 ep->com.qp = NULL; close_complete_upcall()
713 ep->com.qp = NULL; peer_abort_upcall()
742 ep->com.qp = NULL; connect_reply_upcall()
925 err = iwch_modify_qp(ep->com.qp->rhp, process_mpa_reply()
926 ep->com.qp, mask, &attrs, 1); process_mpa_reply()
930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { process_mpa_reply()
1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
1497 if (ep->com.cm_id && ep->com.qp) { peer_close()
1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close()
1592 if (ep->com.cm_id && ep->com.qp) { peer_abort()
1594 ret = iwch_modify_qp(ep->com.qp->rhp, peer_abort()
1595 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, peer_abort()
1599 "%s - qp <- error failed!\n", peer_abort()
1659 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl()
1661 iwch_modify_qp(ep->com.qp->rhp, close_con_rpl()
1662 ep->com.qp, close_con_rpl()
1689 * For (1), we save the message in the qp for later consumer consumption.
1705 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, terminate()
1707 ep->com.qp->attr.terminate_msg_len = skb->len; terminate()
1708 ep->com.qp->attr.is_terminate_local = 0; terminate()
1726 iwch_modify_qp(ep->com.qp->rhp, ec_status()
1727 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ec_status()
1754 if (ep->com.cm_id && ep->com.qp) { ep_timeout()
1756 iwch_modify_qp(ep->com.qp->rhp, ep_timeout()
1757 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ep_timeout()
1801 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); iwch_accept_cr() local
1810 BUG_ON(!qp); iwch_accept_cr()
1812 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || iwch_accept_cr()
1813 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { iwch_accept_cr()
1821 ep->com.qp = qp; iwch_accept_cr()
1845 err = iwch_modify_qp(ep->com.qp->rhp, iwch_accept_cr()
1846 ep->com.qp, mask, &attrs, 1); iwch_accept_cr()
1851 if (iwch_rqes_posted(qp)) { iwch_accept_cr()
1870 ep->com.qp = NULL; iwch_accept_cr()
1929 ep->com.qp = get_qhp(h, conn_param->qpn); iwch_connect()
1930 BUG_ON(!ep->com.qp); iwch_connect()
1931 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, iwch_connect()
1932 ep->com.qp, cm_id); iwch_connect()
H A Diwch_provider.h184 void iwch_qp_add_ref(struct ib_qp *qp);
185 void iwch_qp_rem_ref(struct ib_qp *qp);
333 int iwch_bind_mw(struct ib_qp *qp,
H A Diwch_ev.c65 "qp state %d qpid 0x%x status 0x%x\n", __func__, post_qp_event()
93 event.element.qp = &qhp->ibqp; post_qp_event()
H A Diwch_cm.h156 struct iwch_qp *qp; member in struct:iwch_ep_common
H A Diwch_provider.c1114 void iwch_qp_add_ref(struct ib_qp *qp) iwch_qp_add_ref() argument
1116 PDBG("%s ib_qp %p\n", __func__, qp); iwch_qp_add_ref()
1117 atomic_inc(&(to_iwch_qp(qp)->refcnt)); iwch_qp_add_ref()
1120 void iwch_qp_rem_ref(struct ib_qp *qp) iwch_qp_rem_ref() argument
1122 PDBG("%s ib_qp %p\n", __func__, qp); iwch_qp_rem_ref()
1123 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) iwch_qp_rem_ref()
1124 wake_up(&(to_iwch_qp(qp)->wait)); iwch_qp_rem_ref()
H A Diwch_qp.c529 int iwch_bind_mw(struct ib_qp *qp, iwch_bind_mw() argument
547 qhp = to_iwch_qp(qp); iwch_bind_mw()
769 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); iwch_post_zb_read()
819 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
833 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
H A Diwch_cq.c82 wc->qp = &qhp->ibqp; iwch_poll_cq_one()
/linux-4.4.14/drivers/infiniband/hw/usnic/
H A Dusnic_ib_verbs.h48 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
62 int usnic_ib_destroy_qp(struct ib_qp *qp);
H A Dusnic_ib_verbs.c200 usnic_info("No free qp grp found on %s\n", find_free_vf_and_create_qp_grp()
370 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, usnic_ib_query_qp() argument
383 qp_grp = to_uqp_grp(qp); usnic_ib_query_qp()
543 int usnic_ib_destroy_qp(struct ib_qp *qp) usnic_ib_destroy_qp() argument
550 qp_grp = to_uqp_grp(qp); usnic_ib_destroy_qp()
554 usnic_err("Failed to move qp grp %u to reset\n", usnic_ib_destroy_qp()
H A Dusnic_ib_qp_grp.h46 * The qp group struct represents all the hw resources needed to present a ib_qp
H A Dusnic_ib_qp_grp.c101 * The QP res chunk, used to derive qp indices, get_qp_res_chunk()
121 usnic_err("Unable to get qp res with err %ld\n", enable_qp_grp()
131 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n", enable_qp_grp()
162 usnic_err("Unable to get qp res with err %ld\n", disable_qp_grp()
493 ib_event.element.qp = &qp_grp->ibqp; usnic_ib_qp_grp_modify()
/linux-4.4.14/drivers/scsi/bnx2fc/
H A Dbnx2fc_debug.c3 * session resources such as connection id and qp resources.
H A Dbnx2fc_debug.h3 * session resources such as connection id and qp resources.
H A Dbnx2fc_tgt.c3 * session resources such as connection id and qp resources.
660 *bnx2fc_alloc_session_resc - Allocate qp resources for the session
830 * bnx2i_free_session_resc - free qp resources for the session
H A Dbnx2fc_constants.h3 * session resources such as connection id and qp resources.
/linux-4.4.14/drivers/infiniband/ulp/iser/
H A Diser_verbs.c60 iser_err("qp event %s (%d)\n", iser_qp_event_callback()
523 ib_conn->qp = ib_conn->cma_id->qp; iser_create_ib_conn_res()
524 iser_info("setting conn %p cma_id %p qp %p\n", iser_create_ib_conn_res()
526 ib_conn->cma_id->qp); iser_create_ib_conn_res()
640 iser_info("freeing conn %p cma_id %p qp %p\n", iser_free_ib_conn_res()
641 iser_conn, ib_conn->cma_id, ib_conn->qp); iser_free_ib_conn_res()
643 if (ib_conn->qp != NULL) { iser_free_ib_conn_res()
646 ib_conn->qp = NULL; iser_free_ib_conn_res()
727 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); iser_conn_terminate()
877 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); iser_connected_handler()
878 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); iser_connected_handler()
1063 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); iser_post_recvl()
1092 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); iser_post_recvm()
1124 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); iser_post_send()
1205 ib_conn = wc->qp->qp_context; iser_handle_wc()
H A Discsi_iser.h472 * @qp: Connection Queue-pair
485 struct ib_qp *qp; member in struct:ib_conn
/linux-4.4.14/include/linux/mlx4/
H A Dqp.h474 int sqd_event, struct mlx4_qp *qp);
476 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
481 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
488 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
H A Ddevice.h86 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
234 * bits 6 and 7 set in their qp number.
811 int qp; member in struct:mlx4_quotas
884 } __packed qp; member in union:mlx4_eqe::__anon12691
1102 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
1104 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
1115 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1117 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1119 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1122 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/linux-4.4.14/include/rdma/
H A Dib_verbs.h485 struct ib_qp *qp; member in union:ib_event::__anon13646
766 struct ib_qp *qp; member in struct:ib_wc
850 /* Reserve a range for qp types internal to the low level driver.
851 * These qp types will not be visible at the IB core layer, so the
1501 struct ib_qp *qp; member in struct:ib_flow
1694 int (*modify_qp)(struct ib_qp *qp,
1698 int (*query_qp)(struct ib_qp *qp,
1702 int (*destroy_qp)(struct ib_qp *qp);
1703 int (*post_send)(struct ib_qp *qp,
1706 int (*post_recv)(struct ib_qp *qp,
1762 int (*bind_mw)(struct ib_qp *qp,
1774 int (*attach_mcast)(struct ib_qp *qp,
1777 int (*detach_mcast)(struct ib_qp *qp,
1794 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2371 * @qp: The QP to modify.
2377 int ib_modify_qp(struct ib_qp *qp,
2384 * @qp: The QP to query.
2392 int ib_query_qp(struct ib_qp *qp,
2399 * @qp: The QP to destroy.
2401 int ib_destroy_qp(struct ib_qp *qp);
2415 * @qp: The QP handle to release
2420 int ib_close_qp(struct ib_qp *qp);
2425 * @qp: The QP to post the work request on.
2435 static inline int ib_post_send(struct ib_qp *qp, ib_post_send() argument
2439 return qp->device->post_send(qp, send_wr, bad_send_wr); ib_post_send()
2445 * @qp: The QP to post the work request on.
2450 static inline int ib_post_recv(struct ib_qp *qp, ib_post_recv() argument
2454 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); ib_post_recv()
2895 * @qp: QP to post the bind work request on.
2904 static inline int ib_bind_mw(struct ib_qp *qp, ib_bind_mw() argument
2910 mw->device->bind_mw(qp, mw, mw_bind) : ib_bind_mw()
2961 * @qp: QP to attach to the multicast group. The QP must be type
2971 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2975 * @qp: QP to detach from the multicast group.
2979 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2993 struct ib_flow *ib_create_flow(struct ib_qp *qp,
H A Diw_cm.h106 void (*add_ref)(struct ib_qp *qp);
108 void (*rem_ref)(struct ib_qp *qp);
153 * @qp: The QP
160 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
H A Drdma_cm.h152 struct ib_qp *qp; member in struct:rdma_cm_id
299 * state of the qp associated with the id is modified to error, such that any
H A Dib_mad.h455 * @qp: Reference to QP used for sending and receiving MADs.
473 struct ib_qp *qp; member in struct:ib_mad_agent
677 * @qp: Reference to a QP that requires MAD services.
691 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
/linux-4.4.14/net/9p/
H A Dtrans_rdma.c68 * @qp: Queue Pair pointer
95 struct ib_qp *qp; member in struct:p9_trans_rdma
383 if (rdma->qp && !IS_ERR(rdma->qp)) rdma_destroy_trans()
384 ib_destroy_qp(rdma->qp); rdma_destroy_trans()
420 return ib_post_recv(rdma->qp, &wr, &bad_wr); post_recv()
524 err = ib_post_send(rdma->qp, &wr, &bad_wr); rdma_request()
726 rdma->qp = rdma->cm_id->qp; rdma_create_trans()
/linux-4.4.14/include/linux/mlx5/
H A Dqp.h620 struct mlx5_core_qp *qp,
626 struct mlx5_core_qp *qp);
628 struct mlx5_core_qp *qp);
629 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
636 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
637 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
H A Ddriver.h455 /* start: qp staff */
461 /* end: qp staff */
/linux-4.4.14/arch/sparc/kernel/
H A Dds.c995 struct ds_queue_entry *qp, *tmp; process_ds_work() local
1003 list_for_each_entry_safe(qp, tmp, &todo, list) { process_ds_work()
1004 struct ds_data *dpkt = (struct ds_data *) qp->req; process_ds_work()
1005 struct ds_info *dp = qp->dp; process_ds_work()
1007 int req_len = qp->req_len; process_ds_work()
1021 list_del(&qp->list); process_ds_work()
1022 kfree(qp); process_ds_work()
1048 struct ds_queue_entry *qp; ds_data() local
1050 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); ds_data()
1051 if (!qp) { ds_data()
1054 qp->dp = dp; ds_data()
1055 memcpy(&qp->req, pkt, len); ds_data()
1056 list_add_tail(&qp->list, &ds_work_list); ds_data()
/linux-4.4.14/drivers/media/pci/solo6x10/
H A Dsolo6x10-enc.c182 unsigned int qp) solo_s_jpeg_qp()
187 if ((ch > 31) || (qp > 3)) solo_s_jpeg_qp()
206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; solo_s_jpeg_qp()
181 solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) solo_s_jpeg_qp() argument
H A Dsolo6x10.h157 u8 mode, gop, qp, interlaced, interval; member in struct:solo_enc_dev
378 unsigned int qp);
H A Dsolo6x10-v4l2-enc.c261 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); solo_enc_on()
266 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); solo_enc_on()
1103 solo_enc->qp = ctrl->val; solo_s_ctrl()
1104 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp); solo_s_ctrl()
1105 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp); solo_s_ctrl()
1289 solo_enc->qp = SOLO_DEFAULT_QP; solo_enc_alloc()
/linux-4.4.14/drivers/scsi/
H A Dqlogicpti.h504 #define for_each_qlogicpti(qp) \
505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
/linux-4.4.14/drivers/infiniband/ulp/srp/
H A Dib_srp.c259 struct ib_qp *qp) srp_init_qp()
280 ret = ib_modify_qp(qp, attr, srp_init_qp()
467 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); srp_destroy_qp()
473 ret = ib_post_recv(ch->qp, &wr, &bad_wr); srp_destroy_qp()
479 ib_destroy_qp(ch->qp); srp_destroy_qp()
488 struct ib_qp *qp; srp_create_ch_ib() local
530 qp = ib_create_qp(dev->pd, init_attr); srp_create_ch_ib()
531 if (IS_ERR(qp)) { srp_create_ch_ib()
532 ret = PTR_ERR(qp); srp_create_ch_ib()
536 ret = srp_init_qp(target, qp); srp_create_ch_ib()
558 if (ch->qp) srp_create_ch_ib()
565 ch->qp = qp; srp_create_ch_ib()
583 ib_destroy_qp(qp); srp_create_ch_ib()
615 if (!ch->qp) srp_free_ch_ib()
637 ch->qp = NULL; srp_free_ch_ib()
722 req->param.qp_num = ch->qp->qp_num; srp_send_req()
723 req->param.qp_type = ch->qp->qp_type; srp_send_req()
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp srp_connect_ch()
1056 return ib_post_send(ch->qp, &wr, &bad_wr); srp_inv_rkey()
1369 err = ib_post_send(ch->qp, &wr.wr, &bad_wr); srp_map_finish_fr()
1759 return ib_post_send(ch->qp, &wr, &bad_wr); srp_post_send()
1777 return ib_post_recv(ch->qp, &wr, &bad_wr); srp_post_recv()
1805 rsp->tag, ch - target->ch, ch->qp->qp_num); srp_process_rsp()
2280 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler()
2299 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler()
258 srp_init_qp(struct srp_target_port *target, struct ib_qp *qp) srp_init_qp() argument
H A Dib_srp.h147 struct ib_qp *qp; member in struct:srp_rdma_ch
/linux-4.4.14/drivers/infiniband/ulp/srpt/
H A Dib_srpt.c466 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, srpt_mad_recv_handler()
829 ret = ib_post_send(ch->qp, &wr, &bad_wr); srpt_post_send()
947 * Initialized the attributes of queue pair 'qp' by allowing local write,
948 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
950 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_init_ch_qp() argument
965 ret = ib_modify_qp(qp, attr, srpt_init_ch_qp()
976 * @qp: queue pair to change the state of.
984 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rtr() argument
997 ret = ib_modify_qp(qp, &qp_attr, attr_mask); srpt_ch_qp_rtr()
1006 * @qp: queue pair to change the state of.
1014 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rts() argument
1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask); srpt_ch_qp_rts()
1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); srpt_ch_qp_err()
2053 ch->qp = ib_create_qp(sdev->pd, qp_init); srpt_create_ch_ib()
2054 if (IS_ERR(ch->qp)) { srpt_create_ch_ib()
2055 ret = PTR_ERR(ch->qp); srpt_create_ch_ib()
2073 ret = srpt_init_ch_qp(ch, ch->qp); srpt_create_ch_ib()
2094 ib_destroy_qp(ch->qp); srpt_create_ch_ib()
2105 ib_destroy_qp(ch->qp); srpt_destroy_ch_ib()
2504 ret = srpt_ch_qp_rtr(ch, ch->qp); srpt_cm_req_recv()
2555 rep_param->qp_num = ch->qp->qp_num; srpt_cm_req_recv()
2636 ret = srpt_ch_qp_rts(ch, ch->qp); srpt_cm_rtu_recv()
2817 ret = ib_post_send(ch->qp, &wr.wr, &bad_wr); srpt_perform_rdmas()
2830 ib_post_send(ch->qp, &wr.wr, &bad_wr) != 0) { srpt_perform_rdmas()
H A Dib_srpt.h274 * @qp: IB queue pair used for communicating over this channel.
305 struct ib_qp *qp; member in struct:srpt_rdma_ch
/linux-4.4.14/arch/ia64/include/asm/
H A Dkprobes.h41 unsigned long long qp : 6; member in struct:cmp_inst::__anon1477
/linux-4.4.14/drivers/atm/
H A Dfirestream.c631 static int qp; variable
651 pq[qp].cmd = cmd; submit_queue()
652 pq[qp].p0 = p1; submit_queue()
653 pq[qp].p1 = p2; submit_queue()
654 pq[qp].p2 = p3; submit_queue()
655 qp++; submit_queue()
656 if (qp >= 60) qp = 0; submit_queue()
1950 i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); firestream_remove_one()
1951 qp++; firestream_remove_one()
1952 if (qp >= 60) qp = 0; firestream_remove_one()
/linux-4.4.14/drivers/infiniband/ulp/isert/
H A Dib_isert.c176 return cma_id->qp; isert_create_qp()
186 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); isert_conn_setup_qp()
187 if (IS_ERR(isert_conn->qp)) { isert_conn_setup_qp()
188 ret = PTR_ERR(isert_conn->qp); isert_conn_setup_qp()
765 if (isert_conn->qp) { isert_connect_release()
766 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; isert_connect_release()
769 ib_destroy_qp(isert_conn->qp); isert_connect_release()
783 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_connected_handler()
901 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_disconnected_handler()
928 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_connect_error()
995 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, isert_post_recvm()
1014 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed); isert_post_recv()
1038 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); isert_post_send()
1129 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); isert_rdma_post_recvl()
2075 isert_conn = wc->qp->qp_context; isert_handle_wc()
2140 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, isert_post_response()
2574 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); isert_fast_reg_mr()
2687 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); isert_reg_sig_mr()
2897 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); isert_put_datain()
2931 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); isert_get_dataout()
3301 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { isert_wait4flush()
H A Dib_isert.h166 struct ib_qp *qp; member in struct:isert_conn
/linux-4.4.14/net/rds/
H A Diw_cm.c584 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, rds_iw_conn_shutdown()
586 ic->i_cm_id ? ic->i_cm_id->qp : NULL); rds_iw_conn_shutdown()
601 if (ic->i_cm_id->qp) { rds_iw_conn_shutdown()
603 ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); rds_iw_conn_shutdown()
633 if (ic->i_cm_id->qp) rds_iw_conn_shutdown()
H A Dib_cm.c86 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); rds_ib_tune_rnr()
144 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); rds_ib_cm_connect_complete()
714 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, rds_ib_conn_shutdown()
716 ic->i_cm_id ? ic->i_cm_id->qp : NULL); rds_ib_conn_shutdown()
747 if (ic->i_cm_id->qp) rds_ib_conn_shutdown()
H A Diw_rdma.c693 ret = ib_post_send(ibmr->cm_id->qp, &reg_wr.wr, &failed_wr); rds_iw_rdma_reg_mr()
706 if (!ibmr->cm_id->qp || !ibmr->mr) rds_iw_rdma_fastreg_inv()
716 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); rds_iw_rdma_fastreg_inv()
H A Dib_send.c302 /* We expect errors as the qp is drained during shutdown */ rds_ib_send_cqe_handler()
725 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rds_ib_xmit()
822 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); rds_ib_xmit_atomic()
962 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); rds_ib_xmit_rdma()
/linux-4.4.14/net/sched/
H A Dsch_api.c142 struct Qdisc_ops *q, **qp; register_qdisc() local
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) register_qdisc()
172 *qp = qops; register_qdisc()
186 struct Qdisc_ops *q, **qp; unregister_qdisc() local
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) unregister_qdisc()
194 *qp = q->next; unregister_qdisc()
/linux-4.4.14/net/sunrpc/xprtrdma/
H A Dverbs.c308 ib_query_qp(ia->ri_id->qp, attr, rpcrdma_conn_upcall()
551 if (ia->ri_id->qp) rpcrdma_ia_close()
713 if (ia->ri_id->qp) rpcrdma_ep_destroy()
719 if (ia->ri_id->qp) { rpcrdma_ep_destroy()
721 ia->ri_id->qp = NULL; rpcrdma_ep_destroy()
1289 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); rpcrdma_ep_post()
1318 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); rpcrdma_ep_post_recv()
H A Dfrwr_ops.c396 rc = ib_post_send(ia->ri_id->qp, &reg_wr.wr, &bad_wr); frwr_op_map()
442 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); frwr_op_unmap()
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
H A Dfile.c2157 struct mfc_cq_sr *qp, *spuqp; __spufs_dma_info_read() local
2166 qp = &info.dma_info_command_data[i]; __spufs_dma_info_read()
2169 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; __spufs_dma_info_read()
2170 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; __spufs_dma_info_read()
2171 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; __spufs_dma_info_read()
2172 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; __spufs_dma_info_read()
2209 struct mfc_cq_sr *qp, *puqp; __spufs_proxydma_info_read() local
2223 qp = &info.proxydma_info_command_data[i]; __spufs_proxydma_info_read()
2226 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; __spufs_proxydma_info_read()
2227 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; __spufs_proxydma_info_read()
2228 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; __spufs_proxydma_info_read()
2229 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; __spufs_proxydma_info_read()
/linux-4.4.14/mm/
H A Dmempolicy.c490 struct queue_pages *qp = walk->private; queue_pages_pte_range() local
491 unsigned long flags = qp->flags; queue_pages_pte_range()
514 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) queue_pages_pte_range()
518 migrate_page_add(page, qp->pagelist, flags); queue_pages_pte_range()
530 struct queue_pages *qp = walk->private; queue_pages_hugetlb() local
531 unsigned long flags = qp->flags; queue_pages_hugetlb()
543 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) queue_pages_hugetlb()
548 isolate_huge_page(page, qp->pagelist); queue_pages_hugetlb()
590 struct queue_pages *qp = walk->private; queue_pages_test_walk() local
592 unsigned long flags = qp->flags; queue_pages_test_walk()
605 if (qp->prev && qp->prev->vm_end < vma->vm_start) queue_pages_test_walk()
609 qp->prev = vma; queue_pages_test_walk()
638 struct queue_pages qp = { queue_pages_range() local
649 .private = &qp, queue_pages_range()
/linux-4.4.14/drivers/scsi/pm8001/
H A Dpm8001_sas.c216 uint32_t *qp = (uint32_t *)(((char *) pm8001_phy_control() local
220 phy->invalid_dword_count = qp[0]; pm8001_phy_control()
221 phy->running_disparity_error_count = qp[1]; pm8001_phy_control()
222 phy->loss_of_dword_sync_count = qp[3]; pm8001_phy_control()
223 phy->phy_reset_problem_count = qp[4]; pm8001_phy_control()
/linux-4.4.14/net/openvswitch/
H A Dflow.c311 struct qtag_prefix *qp; parse_vlan() local
320 qp = (struct qtag_prefix *) skb->data; parse_vlan()
321 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); parse_vlan()

Completed in 5655 milliseconds

12