Lines Matching refs:qp

223 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)  in insert_qp()  argument
225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in insert_qp()
227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in insert_qp()
229 atomic_inc(&qp->refcount); in insert_qp()
232 if (qp->ibqp.qp_num == 0) in insert_qp()
233 rcu_assign_pointer(ibp->qp0, qp); in insert_qp()
234 else if (qp->ibqp.qp_num == 1) in insert_qp()
235 rcu_assign_pointer(ibp->qp1, qp); in insert_qp()
237 qp->next = dev->qp_table[n]; in insert_qp()
238 rcu_assign_pointer(dev->qp_table[n], qp); in insert_qp()
248 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) in remove_qp() argument
250 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in remove_qp()
251 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); in remove_qp()
258 lockdep_is_held(&dev->qpt_lock)) == qp) { in remove_qp()
261 lockdep_is_held(&dev->qpt_lock)) == qp) { in remove_qp()
272 if (q == qp) { in remove_qp()
274 rcu_dereference_protected(qp->next, in remove_qp()
284 atomic_dec(&qp->refcount); in remove_qp()
299 struct qib_qp *qp; in qib_free_all_qps() local
317 qp = rcu_dereference_protected(dev->qp_table[n], in qib_free_all_qps()
321 for (; qp; qp = rcu_dereference_protected(qp->next, in qib_free_all_qps()
341 struct qib_qp *qp = NULL; in qib_lookup_qpn() local
346 qp = rcu_dereference(ibp->qp0); in qib_lookup_qpn()
348 qp = rcu_dereference(ibp->qp1); in qib_lookup_qpn()
349 if (qp) in qib_lookup_qpn()
350 atomic_inc(&qp->refcount); in qib_lookup_qpn()
355 for (qp = rcu_dereference(dev->qp_table[n]); qp; in qib_lookup_qpn()
356 qp = rcu_dereference(qp->next)) in qib_lookup_qpn()
357 if (qp->ibqp.qp_num == qpn) { in qib_lookup_qpn()
358 atomic_inc(&qp->refcount); in qib_lookup_qpn()
363 return qp; in qib_lookup_qpn()
371 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) in qib_reset_qp() argument
373 qp->remote_qpn = 0; in qib_reset_qp()
374 qp->qkey = 0; in qib_reset_qp()
375 qp->qp_access_flags = 0; in qib_reset_qp()
376 atomic_set(&qp->s_dma_busy, 0); in qib_reset_qp()
377 qp->s_flags &= QIB_S_SIGNAL_REQ_WR; in qib_reset_qp()
378 qp->s_hdrwords = 0; in qib_reset_qp()
379 qp->s_wqe = NULL; in qib_reset_qp()
380 qp->s_draining = 0; in qib_reset_qp()
381 qp->s_next_psn = 0; in qib_reset_qp()
382 qp->s_last_psn = 0; in qib_reset_qp()
383 qp->s_sending_psn = 0; in qib_reset_qp()
384 qp->s_sending_hpsn = 0; in qib_reset_qp()
385 qp->s_psn = 0; in qib_reset_qp()
386 qp->r_psn = 0; in qib_reset_qp()
387 qp->r_msn = 0; in qib_reset_qp()
389 qp->s_state = IB_OPCODE_RC_SEND_LAST; in qib_reset_qp()
390 qp->r_state = IB_OPCODE_RC_SEND_LAST; in qib_reset_qp()
392 qp->s_state = IB_OPCODE_UC_SEND_LAST; in qib_reset_qp()
393 qp->r_state = IB_OPCODE_UC_SEND_LAST; in qib_reset_qp()
395 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; in qib_reset_qp()
396 qp->r_nak_state = 0; in qib_reset_qp()
397 qp->r_aflags = 0; in qib_reset_qp()
398 qp->r_flags = 0; in qib_reset_qp()
399 qp->s_head = 0; in qib_reset_qp()
400 qp->s_tail = 0; in qib_reset_qp()
401 qp->s_cur = 0; in qib_reset_qp()
402 qp->s_acked = 0; in qib_reset_qp()
403 qp->s_last = 0; in qib_reset_qp()
404 qp->s_ssn = 1; in qib_reset_qp()
405 qp->s_lsn = 0; in qib_reset_qp()
406 qp->s_mig_state = IB_MIG_MIGRATED; in qib_reset_qp()
407 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); in qib_reset_qp()
408 qp->r_head_ack_queue = 0; in qib_reset_qp()
409 qp->s_tail_ack_queue = 0; in qib_reset_qp()
410 qp->s_num_rd_atomic = 0; in qib_reset_qp()
411 if (qp->r_rq.wq) { in qib_reset_qp()
412 qp->r_rq.wq->head = 0; in qib_reset_qp()
413 qp->r_rq.wq->tail = 0; in qib_reset_qp()
415 qp->r_sge.num_sge = 0; in qib_reset_qp()
418 static void clear_mr_refs(struct qib_qp *qp, int clr_sends) in clear_mr_refs() argument
422 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) in clear_mr_refs()
423 qib_put_ss(&qp->s_rdma_read_sge); in clear_mr_refs()
425 qib_put_ss(&qp->r_sge); in clear_mr_refs()
428 while (qp->s_last != qp->s_head) { in clear_mr_refs()
429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); in clear_mr_refs()
437 if (qp->ibqp.qp_type == IB_QPT_UD || in clear_mr_refs()
438 qp->ibqp.qp_type == IB_QPT_SMI || in clear_mr_refs()
439 qp->ibqp.qp_type == IB_QPT_GSI) in clear_mr_refs()
441 if (++qp->s_last >= qp->s_size) in clear_mr_refs()
442 qp->s_last = 0; in clear_mr_refs()
444 if (qp->s_rdma_mr) { in clear_mr_refs()
445 qib_put_mr(qp->s_rdma_mr); in clear_mr_refs()
446 qp->s_rdma_mr = NULL; in clear_mr_refs()
450 if (qp->ibqp.qp_type != IB_QPT_RC) in clear_mr_refs()
453 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { in clear_mr_refs()
454 struct qib_ack_entry *e = &qp->s_ack_queue[n]; in clear_mr_refs()
474 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) in qib_error_qp() argument
476 struct qib_ibdev *dev = to_idev(qp->ibqp.device); in qib_error_qp()
480 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) in qib_error_qp()
483 qp->state = IB_QPS_ERR; in qib_error_qp()
485 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { in qib_error_qp()
486 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); in qib_error_qp()
487 del_timer(&qp->s_timer); in qib_error_qp()
490 if (qp->s_flags & QIB_S_ANY_WAIT_SEND) in qib_error_qp()
491 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; in qib_error_qp()
494 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { in qib_error_qp()
495 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; in qib_error_qp()
496 list_del_init(&qp->iowait); in qib_error_qp()
500 if (!(qp->s_flags & QIB_S_BUSY)) { in qib_error_qp()
501 qp->s_hdrwords = 0; in qib_error_qp()
502 if (qp->s_rdma_mr) { in qib_error_qp()
503 qib_put_mr(qp->s_rdma_mr); in qib_error_qp()
504 qp->s_rdma_mr = NULL; in qib_error_qp()
506 if (qp->s_tx) { in qib_error_qp()
507 qib_put_txreq(qp->s_tx); in qib_error_qp()
508 qp->s_tx = NULL; in qib_error_qp()
513 if (qp->s_last != qp->s_head) in qib_error_qp()
514 qib_schedule_send(qp); in qib_error_qp()
516 clear_mr_refs(qp, 0); in qib_error_qp()
519 wc.qp = &qp->ibqp; in qib_error_qp()
522 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { in qib_error_qp()
523 wc.wr_id = qp->r_wr_id; in qib_error_qp()
525 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_error_qp()
529 if (qp->r_rq.wq) { in qib_error_qp()
534 spin_lock(&qp->r_rq.lock); in qib_error_qp()
537 wq = qp->r_rq.wq; in qib_error_qp()
539 if (head >= qp->r_rq.size) in qib_error_qp()
542 if (tail >= qp->r_rq.size) in qib_error_qp()
545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in qib_error_qp()
546 if (++tail >= qp->r_rq.size) in qib_error_qp()
548 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); in qib_error_qp()
552 spin_unlock(&qp->r_rq.lock); in qib_error_qp()
553 } else if (qp->ibqp.event_handler) in qib_error_qp()
573 struct qib_qp *qp = to_iqp(ibqp); in qib_modify_qp() local
581 spin_lock_irq(&qp->r_lock); in qib_modify_qp()
582 spin_lock(&qp->s_lock); in qib_modify_qp()
585 attr->cur_qp_state : qp->state; in qib_modify_qp()
595 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) in qib_modify_qp()
602 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) in qib_modify_qp()
617 if (qp->ibqp.qp_type == IB_QPT_SMI || in qib_modify_qp()
618 qp->ibqp.qp_type == IB_QPT_GSI || in qib_modify_qp()
645 int mtu, pidx = qp->port_num - 1; in qib_modify_qp()
676 if (qp->s_mig_state == IB_MIG_ARMED) in qib_modify_qp()
681 if (qp->s_mig_state == IB_MIG_REARM) in qib_modify_qp()
685 if (qp->s_mig_state == IB_MIG_ARMED) in qib_modify_qp()
697 if (qp->state != IB_QPS_RESET) { in qib_modify_qp()
698 qp->state = IB_QPS_RESET; in qib_modify_qp()
700 if (!list_empty(&qp->iowait)) in qib_modify_qp()
701 list_del_init(&qp->iowait); in qib_modify_qp()
703 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); in qib_modify_qp()
704 spin_unlock(&qp->s_lock); in qib_modify_qp()
705 spin_unlock_irq(&qp->r_lock); in qib_modify_qp()
707 cancel_work_sync(&qp->s_work); in qib_modify_qp()
708 del_timer_sync(&qp->s_timer); in qib_modify_qp()
709 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); in qib_modify_qp()
710 if (qp->s_tx) { in qib_modify_qp()
711 qib_put_txreq(qp->s_tx); in qib_modify_qp()
712 qp->s_tx = NULL; in qib_modify_qp()
714 remove_qp(dev, qp); in qib_modify_qp()
715 wait_event(qp->wait, !atomic_read(&qp->refcount)); in qib_modify_qp()
716 spin_lock_irq(&qp->r_lock); in qib_modify_qp()
717 spin_lock(&qp->s_lock); in qib_modify_qp()
718 clear_mr_refs(qp, 1); in qib_modify_qp()
719 qib_reset_qp(qp, ibqp->qp_type); in qib_modify_qp()
725 qp->r_flags &= ~QIB_R_COMM_EST; in qib_modify_qp()
726 qp->state = new_state; in qib_modify_qp()
730 qp->s_draining = qp->s_last != qp->s_cur; in qib_modify_qp()
731 qp->state = new_state; in qib_modify_qp()
735 if (qp->ibqp.qp_type == IB_QPT_RC) in qib_modify_qp()
737 qp->state = new_state; in qib_modify_qp()
741 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); in qib_modify_qp()
745 qp->state = new_state; in qib_modify_qp()
750 qp->s_pkey_index = attr->pkey_index; in qib_modify_qp()
753 qp->port_num = attr->port_num; in qib_modify_qp()
756 qp->remote_qpn = attr->dest_qp_num; in qib_modify_qp()
759 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; in qib_modify_qp()
760 qp->s_psn = qp->s_next_psn; in qib_modify_qp()
761 qp->s_sending_psn = qp->s_next_psn; in qib_modify_qp()
762 qp->s_last_psn = qp->s_next_psn - 1; in qib_modify_qp()
763 qp->s_sending_hpsn = qp->s_last_psn; in qib_modify_qp()
767 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; in qib_modify_qp()
770 qp->qp_access_flags = attr->qp_access_flags; in qib_modify_qp()
773 qp->remote_ah_attr = attr->ah_attr; in qib_modify_qp()
774 qp->s_srate = attr->ah_attr.static_rate; in qib_modify_qp()
778 qp->alt_ah_attr = attr->alt_ah_attr; in qib_modify_qp()
779 qp->s_alt_pkey_index = attr->alt_pkey_index; in qib_modify_qp()
783 qp->s_mig_state = attr->path_mig_state; in qib_modify_qp()
785 qp->remote_ah_attr = qp->alt_ah_attr; in qib_modify_qp()
786 qp->port_num = qp->alt_ah_attr.port_num; in qib_modify_qp()
787 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_modify_qp()
792 qp->path_mtu = pmtu; in qib_modify_qp()
793 qp->pmtu = ib_mtu_enum_to_int(pmtu); in qib_modify_qp()
797 qp->s_retry_cnt = attr->retry_cnt; in qib_modify_qp()
798 qp->s_retry = attr->retry_cnt; in qib_modify_qp()
802 qp->s_rnr_retry_cnt = attr->rnr_retry; in qib_modify_qp()
803 qp->s_rnr_retry = attr->rnr_retry; in qib_modify_qp()
807 qp->r_min_rnr_timer = attr->min_rnr_timer; in qib_modify_qp()
810 qp->timeout = attr->timeout; in qib_modify_qp()
811 qp->timeout_jiffies = in qib_modify_qp()
812 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / in qib_modify_qp()
817 qp->qkey = attr->qkey; in qib_modify_qp()
820 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; in qib_modify_qp()
823 qp->s_max_rd_atomic = attr->max_rd_atomic; in qib_modify_qp()
825 spin_unlock(&qp->s_lock); in qib_modify_qp()
826 spin_unlock_irq(&qp->r_lock); in qib_modify_qp()
829 insert_qp(dev, qp); in qib_modify_qp()
832 ev.device = qp->ibqp.device; in qib_modify_qp()
833 ev.element.qp = &qp->ibqp; in qib_modify_qp()
835 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_modify_qp()
838 ev.device = qp->ibqp.device; in qib_modify_qp()
839 ev.element.qp = &qp->ibqp; in qib_modify_qp()
841 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_modify_qp()
847 spin_unlock(&qp->s_lock); in qib_modify_qp()
848 spin_unlock_irq(&qp->r_lock); in qib_modify_qp()
858 struct qib_qp *qp = to_iqp(ibqp); in qib_query_qp() local
860 attr->qp_state = qp->state; in qib_query_qp()
862 attr->path_mtu = qp->path_mtu; in qib_query_qp()
863 attr->path_mig_state = qp->s_mig_state; in qib_query_qp()
864 attr->qkey = qp->qkey; in qib_query_qp()
865 attr->rq_psn = qp->r_psn & QIB_PSN_MASK; in qib_query_qp()
866 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; in qib_query_qp()
867 attr->dest_qp_num = qp->remote_qpn; in qib_query_qp()
868 attr->qp_access_flags = qp->qp_access_flags; in qib_query_qp()
869 attr->cap.max_send_wr = qp->s_size - 1; in qib_query_qp()
870 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; in qib_query_qp()
871 attr->cap.max_send_sge = qp->s_max_sge; in qib_query_qp()
872 attr->cap.max_recv_sge = qp->r_rq.max_sge; in qib_query_qp()
874 attr->ah_attr = qp->remote_ah_attr; in qib_query_qp()
875 attr->alt_ah_attr = qp->alt_ah_attr; in qib_query_qp()
876 attr->pkey_index = qp->s_pkey_index; in qib_query_qp()
877 attr->alt_pkey_index = qp->s_alt_pkey_index; in qib_query_qp()
879 attr->sq_draining = qp->s_draining; in qib_query_qp()
880 attr->max_rd_atomic = qp->s_max_rd_atomic; in qib_query_qp()
881 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; in qib_query_qp()
882 attr->min_rnr_timer = qp->r_min_rnr_timer; in qib_query_qp()
883 attr->port_num = qp->port_num; in qib_query_qp()
884 attr->timeout = qp->timeout; in qib_query_qp()
885 attr->retry_cnt = qp->s_retry_cnt; in qib_query_qp()
886 attr->rnr_retry = qp->s_rnr_retry_cnt; in qib_query_qp()
887 attr->alt_port_num = qp->alt_ah_attr.port_num; in qib_query_qp()
888 attr->alt_timeout = qp->alt_timeout; in qib_query_qp()
890 init_attr->event_handler = qp->ibqp.event_handler; in qib_query_qp()
891 init_attr->qp_context = qp->ibqp.qp_context; in qib_query_qp()
892 init_attr->send_cq = qp->ibqp.send_cq; in qib_query_qp()
893 init_attr->recv_cq = qp->ibqp.recv_cq; in qib_query_qp()
894 init_attr->srq = qp->ibqp.srq; in qib_query_qp()
896 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) in qib_query_qp()
900 init_attr->qp_type = qp->ibqp.qp_type; in qib_query_qp()
901 init_attr->port_num = qp->port_num; in qib_query_qp()
911 __be32 qib_compute_aeth(struct qib_qp *qp) in qib_compute_aeth() argument
913 u32 aeth = qp->r_msn & QIB_MSN_MASK; in qib_compute_aeth()
915 if (qp->ibqp.srq) { in qib_compute_aeth()
924 struct qib_rwq *wq = qp->r_rq.wq; in qib_compute_aeth()
930 if (head >= qp->r_rq.size) in qib_compute_aeth()
933 if (tail >= qp->r_rq.size) in qib_compute_aeth()
942 credits += qp->r_rq.size; in qib_compute_aeth()
979 struct qib_qp *qp; in qib_create_qp() local
1039 sz = sizeof(*qp); in qib_create_qp()
1045 sg_list_sz = sizeof(*qp->r_sg_list) * in qib_create_qp()
1048 sg_list_sz = sizeof(*qp->r_sg_list) * in qib_create_qp()
1050 qp = kzalloc(sz + sg_list_sz, gfp); in qib_create_qp()
1051 if (!qp) { in qib_create_qp()
1055 RCU_INIT_POINTER(qp->next, NULL); in qib_create_qp()
1056 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); in qib_create_qp()
1057 if (!qp->s_hdr) { in qib_create_qp()
1061 qp->timeout_jiffies = in qib_create_qp()
1062 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / in qib_create_qp()
1067 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; in qib_create_qp()
1068 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; in qib_create_qp()
1069 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + in qib_create_qp()
1072 qp->r_rq.wq = vmalloc_user( in qib_create_qp()
1074 qp->r_rq.size * sz); in qib_create_qp()
1076 qp->r_rq.wq = __vmalloc( in qib_create_qp()
1078 qp->r_rq.size * sz, in qib_create_qp()
1081 if (!qp->r_rq.wq) { in qib_create_qp()
1091 spin_lock_init(&qp->r_lock); in qib_create_qp()
1092 spin_lock_init(&qp->s_lock); in qib_create_qp()
1093 spin_lock_init(&qp->r_rq.lock); in qib_create_qp()
1094 atomic_set(&qp->refcount, 0); in qib_create_qp()
1095 init_waitqueue_head(&qp->wait); in qib_create_qp()
1096 init_waitqueue_head(&qp->wait_dma); in qib_create_qp()
1097 init_timer(&qp->s_timer); in qib_create_qp()
1098 qp->s_timer.data = (unsigned long)qp; in qib_create_qp()
1099 INIT_WORK(&qp->s_work, qib_do_send); in qib_create_qp()
1100 INIT_LIST_HEAD(&qp->iowait); in qib_create_qp()
1101 INIT_LIST_HEAD(&qp->rspwait); in qib_create_qp()
1102 qp->state = IB_QPS_RESET; in qib_create_qp()
1103 qp->s_wq = swq; in qib_create_qp()
1104 qp->s_size = init_attr->cap.max_send_wr + 1; in qib_create_qp()
1105 qp->s_max_sge = init_attr->cap.max_send_sge; in qib_create_qp()
1107 qp->s_flags = QIB_S_SIGNAL_REQ_WR; in qib_create_qp()
1114 vfree(qp->r_rq.wq); in qib_create_qp()
1117 qp->ibqp.qp_num = err; in qib_create_qp()
1118 qp->port_num = init_attr->port_num; in qib_create_qp()
1119 qib_reset_qp(qp, init_attr->qp_type); in qib_create_qp()
1135 if (!qp->r_rq.wq) { in qib_create_qp()
1145 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; in qib_create_qp()
1147 qp->ip = qib_create_mmap_info(dev, s, in qib_create_qp()
1149 qp->r_rq.wq); in qib_create_qp()
1150 if (!qp->ip) { in qib_create_qp()
1155 err = ib_copy_to_udata(udata, &(qp->ip->offset), in qib_create_qp()
1156 sizeof(qp->ip->offset)); in qib_create_qp()
1174 if (qp->ip) { in qib_create_qp()
1176 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); in qib_create_qp()
1180 ret = &qp->ibqp; in qib_create_qp()
1184 if (qp->ip) in qib_create_qp()
1185 kref_put(&qp->ip->ref, qib_release_mmap_info); in qib_create_qp()
1187 vfree(qp->r_rq.wq); in qib_create_qp()
1188 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); in qib_create_qp()
1190 kfree(qp->s_hdr); in qib_create_qp()
1191 kfree(qp); in qib_create_qp()
1209 struct qib_qp *qp = to_iqp(ibqp); in qib_destroy_qp() local
1213 spin_lock_irq(&qp->s_lock); in qib_destroy_qp()
1214 if (qp->state != IB_QPS_RESET) { in qib_destroy_qp()
1215 qp->state = IB_QPS_RESET; in qib_destroy_qp()
1217 if (!list_empty(&qp->iowait)) in qib_destroy_qp()
1218 list_del_init(&qp->iowait); in qib_destroy_qp()
1220 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); in qib_destroy_qp()
1221 spin_unlock_irq(&qp->s_lock); in qib_destroy_qp()
1222 cancel_work_sync(&qp->s_work); in qib_destroy_qp()
1223 del_timer_sync(&qp->s_timer); in qib_destroy_qp()
1224 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); in qib_destroy_qp()
1225 if (qp->s_tx) { in qib_destroy_qp()
1226 qib_put_txreq(qp->s_tx); in qib_destroy_qp()
1227 qp->s_tx = NULL; in qib_destroy_qp()
1229 remove_qp(dev, qp); in qib_destroy_qp()
1230 wait_event(qp->wait, !atomic_read(&qp->refcount)); in qib_destroy_qp()
1231 clear_mr_refs(qp, 1); in qib_destroy_qp()
1233 spin_unlock_irq(&qp->s_lock); in qib_destroy_qp()
1236 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); in qib_destroy_qp()
1241 if (qp->ip) in qib_destroy_qp()
1242 kref_put(&qp->ip->ref, qib_release_mmap_info); in qib_destroy_qp()
1244 vfree(qp->r_rq.wq); in qib_destroy_qp()
1245 vfree(qp->s_wq); in qib_destroy_qp()
1246 kfree(qp->s_hdr); in qib_destroy_qp()
1247 kfree(qp); in qib_destroy_qp()
1283 void qib_get_credit(struct qib_qp *qp, u32 aeth) in qib_get_credit() argument
1293 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { in qib_get_credit()
1294 qp->s_flags |= QIB_S_UNLIMITED_CREDIT; in qib_get_credit()
1295 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { in qib_get_credit()
1296 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; in qib_get_credit()
1297 qib_schedule_send(qp); in qib_get_credit()
1300 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { in qib_get_credit()
1303 if (qib_cmp24(credit, qp->s_lsn) > 0) { in qib_get_credit()
1304 qp->s_lsn = credit; in qib_get_credit()
1305 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { in qib_get_credit()
1306 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; in qib_get_credit()
1307 qib_schedule_send(qp); in qib_get_credit()
1317 struct qib_qp *qp; member
1343 struct qib_qp *pqp = iter->qp; in qib_qp_iter_next()
1344 struct qib_qp *qp; in qib_qp_iter_next() local
1348 qp = rcu_dereference(pqp->next); in qib_qp_iter_next()
1350 qp = rcu_dereference(dev->qp_table[n]); in qib_qp_iter_next()
1351 pqp = qp; in qib_qp_iter_next()
1352 if (qp) { in qib_qp_iter_next()
1353 iter->qp = qp; in qib_qp_iter_next()
1368 struct qib_qp *qp = iter->qp; in qib_qp_iter_print() local
1370 wqe = get_swqe_ptr(qp, qp->s_last); in qib_qp_iter_print()
1374 qp->ibqp.qp_num, in qib_qp_iter_print()
1375 qp_type_str[qp->ibqp.qp_type], in qib_qp_iter_print()
1376 qp->state, in qib_qp_iter_print()
1378 qp->s_hdrwords, in qib_qp_iter_print()
1379 qp->s_flags, in qib_qp_iter_print()
1380 atomic_read(&qp->s_dma_busy), in qib_qp_iter_print()
1381 !list_empty(&qp->iowait), in qib_qp_iter_print()
1382 qp->timeout, in qib_qp_iter_print()
1384 qp->s_lsn, in qib_qp_iter_print()
1385 qp->s_last_psn, in qib_qp_iter_print()
1386 qp->s_psn, qp->s_next_psn, in qib_qp_iter_print()
1387 qp->s_sending_psn, qp->s_sending_hpsn, in qib_qp_iter_print()
1388 qp->s_last, qp->s_acked, qp->s_cur, in qib_qp_iter_print()
1389 qp->s_tail, qp->s_head, qp->s_size, in qib_qp_iter_print()
1390 qp->remote_qpn, in qib_qp_iter_print()
1391 qp->remote_ah_attr.dlid); in qib_qp_iter_print()