1/*
2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
37#include <linux/jhash.h>
38#ifdef CONFIG_DEBUG_FS
39#include <linux/seq_file.h>
40#endif
41
42#include "qib.h"
43
44#define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
45#define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
46
47static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48			      struct qpn_map *map, unsigned off)
49{
50	return (map - qpt->map) * BITS_PER_PAGE + off;
51}
52
53static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
54					struct qpn_map *map, unsigned off,
55					unsigned n)
56{
57	if (qpt->mask) {
58		off++;
59		if (((off & qpt->mask) >> 1) >= n)
60			off = (off | qpt->mask) + 2;
61	} else
62		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
63	return off;
64}
65
66/*
67 * Convert the AETH credit code into the number of credits.
68 */
69static u32 credit_table[31] = {
70	0,                      /* 0 */
71	1,                      /* 1 */
72	2,                      /* 2 */
73	3,                      /* 3 */
74	4,                      /* 4 */
75	6,                      /* 5 */
76	8,                      /* 6 */
77	12,                     /* 7 */
78	16,                     /* 8 */
79	24,                     /* 9 */
80	32,                     /* A */
81	48,                     /* B */
82	64,                     /* C */
83	96,                     /* D */
84	128,                    /* E */
85	192,                    /* F */
86	256,                    /* 10 */
87	384,                    /* 11 */
88	512,                    /* 12 */
89	768,                    /* 13 */
90	1024,                   /* 14 */
91	1536,                   /* 15 */
92	2048,                   /* 16 */
93	3072,                   /* 17 */
94	4096,                   /* 18 */
95	6144,                   /* 19 */
96	8192,                   /* 1A */
97	12288,                  /* 1B */
98	16384,                  /* 1C */
99	24576,                  /* 1D */
100	32768                   /* 1E */
101};
102
103static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
104			 gfp_t gfp)
105{
106	unsigned long page = get_zeroed_page(gfp);
107
108	/*
109	 * Free the page if someone raced with us installing it.
110	 */
111
112	spin_lock(&qpt->lock);
113	if (map->page)
114		free_page(page);
115	else
116		map->page = (void *)page;
117	spin_unlock(&qpt->lock);
118}
119
120/*
121 * Allocate the next available QPN or
122 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
123 */
124static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
125		     enum ib_qp_type type, u8 port, gfp_t gfp)
126{
127	u32 i, offset, max_scan, qpn;
128	struct qpn_map *map;
129	u32 ret;
130
131	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
132		unsigned n;
133
134		ret = type == IB_QPT_GSI;
135		n = 1 << (ret + 2 * (port - 1));
136		spin_lock(&qpt->lock);
137		if (qpt->flags & n)
138			ret = -EINVAL;
139		else
140			qpt->flags |= n;
141		spin_unlock(&qpt->lock);
142		goto bail;
143	}
144
145	qpn = qpt->last + 2;
146	if (qpn >= QPN_MAX)
147		qpn = 2;
148	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
149		qpn = (qpn | qpt->mask) + 2;
150	offset = qpn & BITS_PER_PAGE_MASK;
151	map = &qpt->map[qpn / BITS_PER_PAGE];
152	max_scan = qpt->nmaps - !offset;
153	for (i = 0;;) {
154		if (unlikely(!map->page)) {
155			get_map_page(qpt, map, gfp);
156			if (unlikely(!map->page))
157				break;
158		}
159		do {
160			if (!test_and_set_bit(offset, map->page)) {
161				qpt->last = qpn;
162				ret = qpn;
163				goto bail;
164			}
165			offset = find_next_offset(qpt, map, offset,
166				dd->n_krcv_queues);
167			qpn = mk_qpn(qpt, map, offset);
168			/*
169			 * This test differs from alloc_pidmap().
170			 * If find_next_offset() does find a zero
171			 * bit, we don't need to check for QPN
172			 * wrapping around past our starting QPN.
173			 * We just need to be sure we don't loop
174			 * forever.
175			 */
176		} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
177		/*
178		 * In order to keep the number of pages allocated to a
179		 * minimum, we scan the all existing pages before increasing
180		 * the size of the bitmap table.
181		 */
182		if (++i > max_scan) {
183			if (qpt->nmaps == QPNMAP_ENTRIES)
184				break;
185			map = &qpt->map[qpt->nmaps++];
186			offset = 0;
187		} else if (map < &qpt->map[qpt->nmaps]) {
188			++map;
189			offset = 0;
190		} else {
191			map = &qpt->map[0];
192			offset = 2;
193		}
194		qpn = mk_qpn(qpt, map, offset);
195	}
196
197	ret = -ENOMEM;
198
199bail:
200	return ret;
201}
202
203static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204{
205	struct qpn_map *map;
206
207	map = qpt->map + qpn / BITS_PER_PAGE;
208	if (map->page)
209		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
210}
211
212static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
213{
214	return jhash_1word(qpn, dev->qp_rnd) &
215		(dev->qp_table_size - 1);
216}
217
218
219/*
220 * Put the QP into the hash table.
221 * The hash table holds a reference to the QP.
222 */
223static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
224{
225	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
226	unsigned long flags;
227	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
228
229	atomic_inc(&qp->refcount);
230	spin_lock_irqsave(&dev->qpt_lock, flags);
231
232	if (qp->ibqp.qp_num == 0)
233		rcu_assign_pointer(ibp->qp0, qp);
234	else if (qp->ibqp.qp_num == 1)
235		rcu_assign_pointer(ibp->qp1, qp);
236	else {
237		qp->next = dev->qp_table[n];
238		rcu_assign_pointer(dev->qp_table[n], qp);
239	}
240
241	spin_unlock_irqrestore(&dev->qpt_lock, flags);
242}
243
244/*
245 * Remove the QP from the table so it can't be found asynchronously by
246 * the receive interrupt routine.
247 */
248static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
249{
250	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
251	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
252	unsigned long flags;
253	int removed = 1;
254
255	spin_lock_irqsave(&dev->qpt_lock, flags);
256
257	if (rcu_dereference_protected(ibp->qp0,
258			lockdep_is_held(&dev->qpt_lock)) == qp) {
259		RCU_INIT_POINTER(ibp->qp0, NULL);
260	} else if (rcu_dereference_protected(ibp->qp1,
261			lockdep_is_held(&dev->qpt_lock)) == qp) {
262		RCU_INIT_POINTER(ibp->qp1, NULL);
263	} else {
264		struct qib_qp *q;
265		struct qib_qp __rcu **qpp;
266
267		removed = 0;
268		qpp = &dev->qp_table[n];
269		for (; (q = rcu_dereference_protected(*qpp,
270				lockdep_is_held(&dev->qpt_lock))) != NULL;
271				qpp = &q->next)
272			if (q == qp) {
273				RCU_INIT_POINTER(*qpp,
274					rcu_dereference_protected(qp->next,
275					 lockdep_is_held(&dev->qpt_lock)));
276				removed = 1;
277				break;
278			}
279	}
280
281	spin_unlock_irqrestore(&dev->qpt_lock, flags);
282	if (removed) {
283		synchronize_rcu();
284		atomic_dec(&qp->refcount);
285	}
286}
287
288/**
289 * qib_free_all_qps - check for QPs still in use
290 * @qpt: the QP table to empty
291 *
292 * There should not be any QPs still in use.
293 * Free memory for table.
294 */
295unsigned qib_free_all_qps(struct qib_devdata *dd)
296{
297	struct qib_ibdev *dev = &dd->verbs_dev;
298	unsigned long flags;
299	struct qib_qp *qp;
300	unsigned n, qp_inuse = 0;
301
302	for (n = 0; n < dd->num_pports; n++) {
303		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
304
305		if (!qib_mcast_tree_empty(ibp))
306			qp_inuse++;
307		rcu_read_lock();
308		if (rcu_dereference(ibp->qp0))
309			qp_inuse++;
310		if (rcu_dereference(ibp->qp1))
311			qp_inuse++;
312		rcu_read_unlock();
313	}
314
315	spin_lock_irqsave(&dev->qpt_lock, flags);
316	for (n = 0; n < dev->qp_table_size; n++) {
317		qp = rcu_dereference_protected(dev->qp_table[n],
318			lockdep_is_held(&dev->qpt_lock));
319		RCU_INIT_POINTER(dev->qp_table[n], NULL);
320
321		for (; qp; qp = rcu_dereference_protected(qp->next,
322					lockdep_is_held(&dev->qpt_lock)))
323			qp_inuse++;
324	}
325	spin_unlock_irqrestore(&dev->qpt_lock, flags);
326	synchronize_rcu();
327
328	return qp_inuse;
329}
330
331/**
332 * qib_lookup_qpn - return the QP with the given QPN
333 * @qpt: the QP table
334 * @qpn: the QP number to look up
335 *
336 * The caller is responsible for decrementing the QP reference count
337 * when done.
338 */
339struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
340{
341	struct qib_qp *qp = NULL;
342
343	rcu_read_lock();
344	if (unlikely(qpn <= 1)) {
345		if (qpn == 0)
346			qp = rcu_dereference(ibp->qp0);
347		else
348			qp = rcu_dereference(ibp->qp1);
349		if (qp)
350			atomic_inc(&qp->refcount);
351	} else {
352		struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
353		unsigned n = qpn_hash(dev, qpn);
354
355		for (qp = rcu_dereference(dev->qp_table[n]); qp;
356			qp = rcu_dereference(qp->next))
357			if (qp->ibqp.qp_num == qpn) {
358				atomic_inc(&qp->refcount);
359				break;
360			}
361	}
362	rcu_read_unlock();
363	return qp;
364}
365
366/**
367 * qib_reset_qp - initialize the QP state to the reset state
368 * @qp: the QP to reset
369 * @type: the QP type
370 */
371static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
372{
373	qp->remote_qpn = 0;
374	qp->qkey = 0;
375	qp->qp_access_flags = 0;
376	atomic_set(&qp->s_dma_busy, 0);
377	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
378	qp->s_hdrwords = 0;
379	qp->s_wqe = NULL;
380	qp->s_draining = 0;
381	qp->s_next_psn = 0;
382	qp->s_last_psn = 0;
383	qp->s_sending_psn = 0;
384	qp->s_sending_hpsn = 0;
385	qp->s_psn = 0;
386	qp->r_psn = 0;
387	qp->r_msn = 0;
388	if (type == IB_QPT_RC) {
389		qp->s_state = IB_OPCODE_RC_SEND_LAST;
390		qp->r_state = IB_OPCODE_RC_SEND_LAST;
391	} else {
392		qp->s_state = IB_OPCODE_UC_SEND_LAST;
393		qp->r_state = IB_OPCODE_UC_SEND_LAST;
394	}
395	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
396	qp->r_nak_state = 0;
397	qp->r_aflags = 0;
398	qp->r_flags = 0;
399	qp->s_head = 0;
400	qp->s_tail = 0;
401	qp->s_cur = 0;
402	qp->s_acked = 0;
403	qp->s_last = 0;
404	qp->s_ssn = 1;
405	qp->s_lsn = 0;
406	qp->s_mig_state = IB_MIG_MIGRATED;
407	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
408	qp->r_head_ack_queue = 0;
409	qp->s_tail_ack_queue = 0;
410	qp->s_num_rd_atomic = 0;
411	if (qp->r_rq.wq) {
412		qp->r_rq.wq->head = 0;
413		qp->r_rq.wq->tail = 0;
414	}
415	qp->r_sge.num_sge = 0;
416}
417
418static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
419{
420	unsigned n;
421
422	if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
423		qib_put_ss(&qp->s_rdma_read_sge);
424
425	qib_put_ss(&qp->r_sge);
426
427	if (clr_sends) {
428		while (qp->s_last != qp->s_head) {
429			struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
430			unsigned i;
431
432			for (i = 0; i < wqe->wr.num_sge; i++) {
433				struct qib_sge *sge = &wqe->sg_list[i];
434
435				qib_put_mr(sge->mr);
436			}
437			if (qp->ibqp.qp_type == IB_QPT_UD ||
438			    qp->ibqp.qp_type == IB_QPT_SMI ||
439			    qp->ibqp.qp_type == IB_QPT_GSI)
440				atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
441			if (++qp->s_last >= qp->s_size)
442				qp->s_last = 0;
443		}
444		if (qp->s_rdma_mr) {
445			qib_put_mr(qp->s_rdma_mr);
446			qp->s_rdma_mr = NULL;
447		}
448	}
449
450	if (qp->ibqp.qp_type != IB_QPT_RC)
451		return;
452
453	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
454		struct qib_ack_entry *e = &qp->s_ack_queue[n];
455
456		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
457		    e->rdma_sge.mr) {
458			qib_put_mr(e->rdma_sge.mr);
459			e->rdma_sge.mr = NULL;
460		}
461	}
462}
463
464/**
465 * qib_error_qp - put a QP into the error state
466 * @qp: the QP to put into the error state
467 * @err: the receive completion error to signal if a RWQE is active
468 *
469 * Flushes both send and receive work queues.
470 * Returns true if last WQE event should be generated.
471 * The QP r_lock and s_lock should be held and interrupts disabled.
472 * If we are already in error state, just return.
473 */
474int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
475{
476	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
477	struct ib_wc wc;
478	int ret = 0;
479
480	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
481		goto bail;
482
483	qp->state = IB_QPS_ERR;
484
485	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
486		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
487		del_timer(&qp->s_timer);
488	}
489
490	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
491		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
492
493	spin_lock(&dev->pending_lock);
494	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
495		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
496		list_del_init(&qp->iowait);
497	}
498	spin_unlock(&dev->pending_lock);
499
500	if (!(qp->s_flags & QIB_S_BUSY)) {
501		qp->s_hdrwords = 0;
502		if (qp->s_rdma_mr) {
503			qib_put_mr(qp->s_rdma_mr);
504			qp->s_rdma_mr = NULL;
505		}
506		if (qp->s_tx) {
507			qib_put_txreq(qp->s_tx);
508			qp->s_tx = NULL;
509		}
510	}
511
512	/* Schedule the sending tasklet to drain the send work queue. */
513	if (qp->s_last != qp->s_head)
514		qib_schedule_send(qp);
515
516	clear_mr_refs(qp, 0);
517
518	memset(&wc, 0, sizeof(wc));
519	wc.qp = &qp->ibqp;
520	wc.opcode = IB_WC_RECV;
521
522	if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
523		wc.wr_id = qp->r_wr_id;
524		wc.status = err;
525		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
526	}
527	wc.status = IB_WC_WR_FLUSH_ERR;
528
529	if (qp->r_rq.wq) {
530		struct qib_rwq *wq;
531		u32 head;
532		u32 tail;
533
534		spin_lock(&qp->r_rq.lock);
535
536		/* sanity check pointers before trusting them */
537		wq = qp->r_rq.wq;
538		head = wq->head;
539		if (head >= qp->r_rq.size)
540			head = 0;
541		tail = wq->tail;
542		if (tail >= qp->r_rq.size)
543			tail = 0;
544		while (tail != head) {
545			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
546			if (++tail >= qp->r_rq.size)
547				tail = 0;
548			qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
549		}
550		wq->tail = tail;
551
552		spin_unlock(&qp->r_rq.lock);
553	} else if (qp->ibqp.event_handler)
554		ret = 1;
555
556bail:
557	return ret;
558}
559
560/**
561 * qib_modify_qp - modify the attributes of a queue pair
562 * @ibqp: the queue pair who's attributes we're modifying
563 * @attr: the new attributes
564 * @attr_mask: the mask of attributes to modify
565 * @udata: user data for libibverbs.so
566 *
567 * Returns 0 on success, otherwise returns an errno.
568 */
569int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
570		  int attr_mask, struct ib_udata *udata)
571{
572	struct qib_ibdev *dev = to_idev(ibqp->device);
573	struct qib_qp *qp = to_iqp(ibqp);
574	enum ib_qp_state cur_state, new_state;
575	struct ib_event ev;
576	int lastwqe = 0;
577	int mig = 0;
578	int ret;
579	u32 pmtu = 0; /* for gcc warning only */
580
581	spin_lock_irq(&qp->r_lock);
582	spin_lock(&qp->s_lock);
583
584	cur_state = attr_mask & IB_QP_CUR_STATE ?
585		attr->cur_qp_state : qp->state;
586	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
587
588	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
589				attr_mask, IB_LINK_LAYER_UNSPECIFIED))
590		goto inval;
591
592	if (attr_mask & IB_QP_AV) {
593		if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
594			goto inval;
595		if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
596			goto inval;
597	}
598
599	if (attr_mask & IB_QP_ALT_PATH) {
600		if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
601			goto inval;
602		if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
603			goto inval;
604		if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
605			goto inval;
606	}
607
608	if (attr_mask & IB_QP_PKEY_INDEX)
609		if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
610			goto inval;
611
612	if (attr_mask & IB_QP_MIN_RNR_TIMER)
613		if (attr->min_rnr_timer > 31)
614			goto inval;
615
616	if (attr_mask & IB_QP_PORT)
617		if (qp->ibqp.qp_type == IB_QPT_SMI ||
618		    qp->ibqp.qp_type == IB_QPT_GSI ||
619		    attr->port_num == 0 ||
620		    attr->port_num > ibqp->device->phys_port_cnt)
621			goto inval;
622
623	if (attr_mask & IB_QP_DEST_QPN)
624		if (attr->dest_qp_num > QIB_QPN_MASK)
625			goto inval;
626
627	if (attr_mask & IB_QP_RETRY_CNT)
628		if (attr->retry_cnt > 7)
629			goto inval;
630
631	if (attr_mask & IB_QP_RNR_RETRY)
632		if (attr->rnr_retry > 7)
633			goto inval;
634
635	/*
636	 * Don't allow invalid path_mtu values.  OK to set greater
637	 * than the active mtu (or even the max_cap, if we have tuned
638	 * that to a small mtu.  We'll set qp->path_mtu
639	 * to the lesser of requested attribute mtu and active,
640	 * for packetizing messages.
641	 * Note that the QP port has to be set in INIT and MTU in RTR.
642	 */
643	if (attr_mask & IB_QP_PATH_MTU) {
644		struct qib_devdata *dd = dd_from_dev(dev);
645		int mtu, pidx = qp->port_num - 1;
646
647		mtu = ib_mtu_enum_to_int(attr->path_mtu);
648		if (mtu == -1)
649			goto inval;
650		if (mtu > dd->pport[pidx].ibmtu) {
651			switch (dd->pport[pidx].ibmtu) {
652			case 4096:
653				pmtu = IB_MTU_4096;
654				break;
655			case 2048:
656				pmtu = IB_MTU_2048;
657				break;
658			case 1024:
659				pmtu = IB_MTU_1024;
660				break;
661			case 512:
662				pmtu = IB_MTU_512;
663				break;
664			case 256:
665				pmtu = IB_MTU_256;
666				break;
667			default:
668				pmtu = IB_MTU_2048;
669			}
670		} else
671			pmtu = attr->path_mtu;
672	}
673
674	if (attr_mask & IB_QP_PATH_MIG_STATE) {
675		if (attr->path_mig_state == IB_MIG_REARM) {
676			if (qp->s_mig_state == IB_MIG_ARMED)
677				goto inval;
678			if (new_state != IB_QPS_RTS)
679				goto inval;
680		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
681			if (qp->s_mig_state == IB_MIG_REARM)
682				goto inval;
683			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
684				goto inval;
685			if (qp->s_mig_state == IB_MIG_ARMED)
686				mig = 1;
687		} else
688			goto inval;
689	}
690
691	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
692		if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
693			goto inval;
694
695	switch (new_state) {
696	case IB_QPS_RESET:
697		if (qp->state != IB_QPS_RESET) {
698			qp->state = IB_QPS_RESET;
699			spin_lock(&dev->pending_lock);
700			if (!list_empty(&qp->iowait))
701				list_del_init(&qp->iowait);
702			spin_unlock(&dev->pending_lock);
703			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
704			spin_unlock(&qp->s_lock);
705			spin_unlock_irq(&qp->r_lock);
706			/* Stop the sending work queue and retry timer */
707			cancel_work_sync(&qp->s_work);
708			del_timer_sync(&qp->s_timer);
709			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
710			if (qp->s_tx) {
711				qib_put_txreq(qp->s_tx);
712				qp->s_tx = NULL;
713			}
714			remove_qp(dev, qp);
715			wait_event(qp->wait, !atomic_read(&qp->refcount));
716			spin_lock_irq(&qp->r_lock);
717			spin_lock(&qp->s_lock);
718			clear_mr_refs(qp, 1);
719			qib_reset_qp(qp, ibqp->qp_type);
720		}
721		break;
722
723	case IB_QPS_RTR:
724		/* Allow event to retrigger if QP set to RTR more than once */
725		qp->r_flags &= ~QIB_R_COMM_EST;
726		qp->state = new_state;
727		break;
728
729	case IB_QPS_SQD:
730		qp->s_draining = qp->s_last != qp->s_cur;
731		qp->state = new_state;
732		break;
733
734	case IB_QPS_SQE:
735		if (qp->ibqp.qp_type == IB_QPT_RC)
736			goto inval;
737		qp->state = new_state;
738		break;
739
740	case IB_QPS_ERR:
741		lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
742		break;
743
744	default:
745		qp->state = new_state;
746		break;
747	}
748
749	if (attr_mask & IB_QP_PKEY_INDEX)
750		qp->s_pkey_index = attr->pkey_index;
751
752	if (attr_mask & IB_QP_PORT)
753		qp->port_num = attr->port_num;
754
755	if (attr_mask & IB_QP_DEST_QPN)
756		qp->remote_qpn = attr->dest_qp_num;
757
758	if (attr_mask & IB_QP_SQ_PSN) {
759		qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
760		qp->s_psn = qp->s_next_psn;
761		qp->s_sending_psn = qp->s_next_psn;
762		qp->s_last_psn = qp->s_next_psn - 1;
763		qp->s_sending_hpsn = qp->s_last_psn;
764	}
765
766	if (attr_mask & IB_QP_RQ_PSN)
767		qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
768
769	if (attr_mask & IB_QP_ACCESS_FLAGS)
770		qp->qp_access_flags = attr->qp_access_flags;
771
772	if (attr_mask & IB_QP_AV) {
773		qp->remote_ah_attr = attr->ah_attr;
774		qp->s_srate = attr->ah_attr.static_rate;
775	}
776
777	if (attr_mask & IB_QP_ALT_PATH) {
778		qp->alt_ah_attr = attr->alt_ah_attr;
779		qp->s_alt_pkey_index = attr->alt_pkey_index;
780	}
781
782	if (attr_mask & IB_QP_PATH_MIG_STATE) {
783		qp->s_mig_state = attr->path_mig_state;
784		if (mig) {
785			qp->remote_ah_attr = qp->alt_ah_attr;
786			qp->port_num = qp->alt_ah_attr.port_num;
787			qp->s_pkey_index = qp->s_alt_pkey_index;
788		}
789	}
790
791	if (attr_mask & IB_QP_PATH_MTU) {
792		qp->path_mtu = pmtu;
793		qp->pmtu = ib_mtu_enum_to_int(pmtu);
794	}
795
796	if (attr_mask & IB_QP_RETRY_CNT) {
797		qp->s_retry_cnt = attr->retry_cnt;
798		qp->s_retry = attr->retry_cnt;
799	}
800
801	if (attr_mask & IB_QP_RNR_RETRY) {
802		qp->s_rnr_retry_cnt = attr->rnr_retry;
803		qp->s_rnr_retry = attr->rnr_retry;
804	}
805
806	if (attr_mask & IB_QP_MIN_RNR_TIMER)
807		qp->r_min_rnr_timer = attr->min_rnr_timer;
808
809	if (attr_mask & IB_QP_TIMEOUT) {
810		qp->timeout = attr->timeout;
811		qp->timeout_jiffies =
812			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
813				1000UL);
814	}
815
816	if (attr_mask & IB_QP_QKEY)
817		qp->qkey = attr->qkey;
818
819	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
820		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
821
822	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
823		qp->s_max_rd_atomic = attr->max_rd_atomic;
824
825	spin_unlock(&qp->s_lock);
826	spin_unlock_irq(&qp->r_lock);
827
828	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
829		insert_qp(dev, qp);
830
831	if (lastwqe) {
832		ev.device = qp->ibqp.device;
833		ev.element.qp = &qp->ibqp;
834		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
835		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
836	}
837	if (mig) {
838		ev.device = qp->ibqp.device;
839		ev.element.qp = &qp->ibqp;
840		ev.event = IB_EVENT_PATH_MIG;
841		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
842	}
843	ret = 0;
844	goto bail;
845
846inval:
847	spin_unlock(&qp->s_lock);
848	spin_unlock_irq(&qp->r_lock);
849	ret = -EINVAL;
850
851bail:
852	return ret;
853}
854
855int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
856		 int attr_mask, struct ib_qp_init_attr *init_attr)
857{
858	struct qib_qp *qp = to_iqp(ibqp);
859
860	attr->qp_state = qp->state;
861	attr->cur_qp_state = attr->qp_state;
862	attr->path_mtu = qp->path_mtu;
863	attr->path_mig_state = qp->s_mig_state;
864	attr->qkey = qp->qkey;
865	attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
866	attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
867	attr->dest_qp_num = qp->remote_qpn;
868	attr->qp_access_flags = qp->qp_access_flags;
869	attr->cap.max_send_wr = qp->s_size - 1;
870	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
871	attr->cap.max_send_sge = qp->s_max_sge;
872	attr->cap.max_recv_sge = qp->r_rq.max_sge;
873	attr->cap.max_inline_data = 0;
874	attr->ah_attr = qp->remote_ah_attr;
875	attr->alt_ah_attr = qp->alt_ah_attr;
876	attr->pkey_index = qp->s_pkey_index;
877	attr->alt_pkey_index = qp->s_alt_pkey_index;
878	attr->en_sqd_async_notify = 0;
879	attr->sq_draining = qp->s_draining;
880	attr->max_rd_atomic = qp->s_max_rd_atomic;
881	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
882	attr->min_rnr_timer = qp->r_min_rnr_timer;
883	attr->port_num = qp->port_num;
884	attr->timeout = qp->timeout;
885	attr->retry_cnt = qp->s_retry_cnt;
886	attr->rnr_retry = qp->s_rnr_retry_cnt;
887	attr->alt_port_num = qp->alt_ah_attr.port_num;
888	attr->alt_timeout = qp->alt_timeout;
889
890	init_attr->event_handler = qp->ibqp.event_handler;
891	init_attr->qp_context = qp->ibqp.qp_context;
892	init_attr->send_cq = qp->ibqp.send_cq;
893	init_attr->recv_cq = qp->ibqp.recv_cq;
894	init_attr->srq = qp->ibqp.srq;
895	init_attr->cap = attr->cap;
896	if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
897		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
898	else
899		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
900	init_attr->qp_type = qp->ibqp.qp_type;
901	init_attr->port_num = qp->port_num;
902	return 0;
903}
904
905/**
906 * qib_compute_aeth - compute the AETH (syndrome + MSN)
907 * @qp: the queue pair to compute the AETH for
908 *
909 * Returns the AETH.
910 */
911__be32 qib_compute_aeth(struct qib_qp *qp)
912{
913	u32 aeth = qp->r_msn & QIB_MSN_MASK;
914
915	if (qp->ibqp.srq) {
916		/*
917		 * Shared receive queues don't generate credits.
918		 * Set the credit field to the invalid value.
919		 */
920		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
921	} else {
922		u32 min, max, x;
923		u32 credits;
924		struct qib_rwq *wq = qp->r_rq.wq;
925		u32 head;
926		u32 tail;
927
928		/* sanity check pointers before trusting them */
929		head = wq->head;
930		if (head >= qp->r_rq.size)
931			head = 0;
932		tail = wq->tail;
933		if (tail >= qp->r_rq.size)
934			tail = 0;
935		/*
936		 * Compute the number of credits available (RWQEs).
937		 * XXX Not holding the r_rq.lock here so there is a small
938		 * chance that the pair of reads are not atomic.
939		 */
940		credits = head - tail;
941		if ((int)credits < 0)
942			credits += qp->r_rq.size;
943		/*
944		 * Binary search the credit table to find the code to
945		 * use.
946		 */
947		min = 0;
948		max = 31;
949		for (;;) {
950			x = (min + max) / 2;
951			if (credit_table[x] == credits)
952				break;
953			if (credit_table[x] > credits)
954				max = x;
955			else if (min == x)
956				break;
957			else
958				min = x;
959		}
960		aeth |= x << QIB_AETH_CREDIT_SHIFT;
961	}
962	return cpu_to_be32(aeth);
963}
964
965/**
966 * qib_create_qp - create a queue pair for a device
967 * @ibpd: the protection domain who's device we create the queue pair for
968 * @init_attr: the attributes of the queue pair
969 * @udata: user data for libibverbs.so
970 *
971 * Returns the queue pair on success, otherwise returns an errno.
972 *
973 * Called by the ib_create_qp() core verbs function.
974 */
975struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
976			    struct ib_qp_init_attr *init_attr,
977			    struct ib_udata *udata)
978{
979	struct qib_qp *qp;
980	int err;
981	struct qib_swqe *swq = NULL;
982	struct qib_ibdev *dev;
983	struct qib_devdata *dd;
984	size_t sz;
985	size_t sg_list_sz;
986	struct ib_qp *ret;
987	gfp_t gfp;
988
989
990	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
991	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
992	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
993		return ERR_PTR(-EINVAL);
994
995	/* GFP_NOIO is applicable in RC QPs only */
996	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
997	    init_attr->qp_type != IB_QPT_RC)
998		return ERR_PTR(-EINVAL);
999
1000	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
1001			GFP_NOIO : GFP_KERNEL;
1002
1003	/* Check receive queue parameters if no SRQ is specified. */
1004	if (!init_attr->srq) {
1005		if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
1006		    init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
1007			ret = ERR_PTR(-EINVAL);
1008			goto bail;
1009		}
1010		if (init_attr->cap.max_send_sge +
1011		    init_attr->cap.max_send_wr +
1012		    init_attr->cap.max_recv_sge +
1013		    init_attr->cap.max_recv_wr == 0) {
1014			ret = ERR_PTR(-EINVAL);
1015			goto bail;
1016		}
1017	}
1018
1019	switch (init_attr->qp_type) {
1020	case IB_QPT_SMI:
1021	case IB_QPT_GSI:
1022		if (init_attr->port_num == 0 ||
1023		    init_attr->port_num > ibpd->device->phys_port_cnt) {
1024			ret = ERR_PTR(-EINVAL);
1025			goto bail;
1026		}
1027	case IB_QPT_UC:
1028	case IB_QPT_RC:
1029	case IB_QPT_UD:
1030		sz = sizeof(struct qib_sge) *
1031			init_attr->cap.max_send_sge +
1032			sizeof(struct qib_swqe);
1033		swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
1034				gfp, PAGE_KERNEL);
1035		if (swq == NULL) {
1036			ret = ERR_PTR(-ENOMEM);
1037			goto bail;
1038		}
1039		sz = sizeof(*qp);
1040		sg_list_sz = 0;
1041		if (init_attr->srq) {
1042			struct qib_srq *srq = to_isrq(init_attr->srq);
1043
1044			if (srq->rq.max_sge > 1)
1045				sg_list_sz = sizeof(*qp->r_sg_list) *
1046					(srq->rq.max_sge - 1);
1047		} else if (init_attr->cap.max_recv_sge > 1)
1048			sg_list_sz = sizeof(*qp->r_sg_list) *
1049				(init_attr->cap.max_recv_sge - 1);
1050		qp = kzalloc(sz + sg_list_sz, gfp);
1051		if (!qp) {
1052			ret = ERR_PTR(-ENOMEM);
1053			goto bail_swq;
1054		}
1055		RCU_INIT_POINTER(qp->next, NULL);
1056		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
1057		if (!qp->s_hdr) {
1058			ret = ERR_PTR(-ENOMEM);
1059			goto bail_qp;
1060		}
1061		qp->timeout_jiffies =
1062			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1063				1000UL);
1064		if (init_attr->srq)
1065			sz = 0;
1066		else {
1067			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1068			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1069			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1070				sizeof(struct qib_rwqe);
1071			if (gfp != GFP_NOIO)
1072				qp->r_rq.wq = vmalloc_user(
1073						sizeof(struct qib_rwq) +
1074						qp->r_rq.size * sz);
1075			else
1076				qp->r_rq.wq = __vmalloc(
1077						sizeof(struct qib_rwq) +
1078						qp->r_rq.size * sz,
1079						gfp, PAGE_KERNEL);
1080
1081			if (!qp->r_rq.wq) {
1082				ret = ERR_PTR(-ENOMEM);
1083				goto bail_qp;
1084			}
1085		}
1086
1087		/*
1088		 * ib_create_qp() will initialize qp->ibqp
1089		 * except for qp->ibqp.qp_num.
1090		 */
1091		spin_lock_init(&qp->r_lock);
1092		spin_lock_init(&qp->s_lock);
1093		spin_lock_init(&qp->r_rq.lock);
1094		atomic_set(&qp->refcount, 0);
1095		init_waitqueue_head(&qp->wait);
1096		init_waitqueue_head(&qp->wait_dma);
1097		init_timer(&qp->s_timer);
1098		qp->s_timer.data = (unsigned long)qp;
1099		INIT_WORK(&qp->s_work, qib_do_send);
1100		INIT_LIST_HEAD(&qp->iowait);
1101		INIT_LIST_HEAD(&qp->rspwait);
1102		qp->state = IB_QPS_RESET;
1103		qp->s_wq = swq;
1104		qp->s_size = init_attr->cap.max_send_wr + 1;
1105		qp->s_max_sge = init_attr->cap.max_send_sge;
1106		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1107			qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1108		dev = to_idev(ibpd->device);
1109		dd = dd_from_dev(dev);
1110		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1111				init_attr->port_num, gfp);
1112		if (err < 0) {
1113			ret = ERR_PTR(err);
1114			vfree(qp->r_rq.wq);
1115			goto bail_qp;
1116		}
1117		qp->ibqp.qp_num = err;
1118		qp->port_num = init_attr->port_num;
1119		qib_reset_qp(qp, init_attr->qp_type);
1120		break;
1121
1122	default:
1123		/* Don't support raw QPs */
1124		ret = ERR_PTR(-ENOSYS);
1125		goto bail;
1126	}
1127
1128	init_attr->cap.max_inline_data = 0;
1129
1130	/*
1131	 * Return the address of the RWQ as the offset to mmap.
1132	 * See qib_mmap() for details.
1133	 */
1134	if (udata && udata->outlen >= sizeof(__u64)) {
1135		if (!qp->r_rq.wq) {
1136			__u64 offset = 0;
1137
1138			err = ib_copy_to_udata(udata, &offset,
1139					       sizeof(offset));
1140			if (err) {
1141				ret = ERR_PTR(err);
1142				goto bail_ip;
1143			}
1144		} else {
1145			u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1146
1147			qp->ip = qib_create_mmap_info(dev, s,
1148						      ibpd->uobject->context,
1149						      qp->r_rq.wq);
1150			if (!qp->ip) {
1151				ret = ERR_PTR(-ENOMEM);
1152				goto bail_ip;
1153			}
1154
1155			err = ib_copy_to_udata(udata, &(qp->ip->offset),
1156					       sizeof(qp->ip->offset));
1157			if (err) {
1158				ret = ERR_PTR(err);
1159				goto bail_ip;
1160			}
1161		}
1162	}
1163
1164	spin_lock(&dev->n_qps_lock);
1165	if (dev->n_qps_allocated == ib_qib_max_qps) {
1166		spin_unlock(&dev->n_qps_lock);
1167		ret = ERR_PTR(-ENOMEM);
1168		goto bail_ip;
1169	}
1170
1171	dev->n_qps_allocated++;
1172	spin_unlock(&dev->n_qps_lock);
1173
1174	if (qp->ip) {
1175		spin_lock_irq(&dev->pending_lock);
1176		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1177		spin_unlock_irq(&dev->pending_lock);
1178	}
1179
1180	ret = &qp->ibqp;
1181	goto bail;
1182
1183bail_ip:
1184	if (qp->ip)
1185		kref_put(&qp->ip->ref, qib_release_mmap_info);
1186	else
1187		vfree(qp->r_rq.wq);
1188	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1189bail_qp:
1190	kfree(qp->s_hdr);
1191	kfree(qp);
1192bail_swq:
1193	vfree(swq);
1194bail:
1195	return ret;
1196}
1197
1198/**
1199 * qib_destroy_qp - destroy a queue pair
1200 * @ibqp: the queue pair to destroy
1201 *
1202 * Returns 0 on success.
1203 *
1204 * Note that this can be called while the QP is actively sending or
1205 * receiving!
1206 */
1207int qib_destroy_qp(struct ib_qp *ibqp)
1208{
1209	struct qib_qp *qp = to_iqp(ibqp);
1210	struct qib_ibdev *dev = to_idev(ibqp->device);
1211
1212	/* Make sure HW and driver activity is stopped. */
1213	spin_lock_irq(&qp->s_lock);
1214	if (qp->state != IB_QPS_RESET) {
1215		qp->state = IB_QPS_RESET;
1216		spin_lock(&dev->pending_lock);
1217		if (!list_empty(&qp->iowait))
1218			list_del_init(&qp->iowait);
1219		spin_unlock(&dev->pending_lock);
1220		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1221		spin_unlock_irq(&qp->s_lock);
1222		cancel_work_sync(&qp->s_work);
1223		del_timer_sync(&qp->s_timer);
1224		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1225		if (qp->s_tx) {
1226			qib_put_txreq(qp->s_tx);
1227			qp->s_tx = NULL;
1228		}
1229		remove_qp(dev, qp);
1230		wait_event(qp->wait, !atomic_read(&qp->refcount));
1231		clear_mr_refs(qp, 1);
1232	} else
1233		spin_unlock_irq(&qp->s_lock);
1234
1235	/* all user's cleaned up, mark it available */
1236	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1237	spin_lock(&dev->n_qps_lock);
1238	dev->n_qps_allocated--;
1239	spin_unlock(&dev->n_qps_lock);
1240
1241	if (qp->ip)
1242		kref_put(&qp->ip->ref, qib_release_mmap_info);
1243	else
1244		vfree(qp->r_rq.wq);
1245	vfree(qp->s_wq);
1246	kfree(qp->s_hdr);
1247	kfree(qp);
1248	return 0;
1249}
1250
1251/**
1252 * qib_init_qpn_table - initialize the QP number table for a device
1253 * @qpt: the QPN table
1254 */
1255void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1256{
1257	spin_lock_init(&qpt->lock);
1258	qpt->last = 1;          /* start with QPN 2 */
1259	qpt->nmaps = 1;
1260	qpt->mask = dd->qpn_mask;
1261}
1262
1263/**
1264 * qib_free_qpn_table - free the QP number table for a device
1265 * @qpt: the QPN table
1266 */
1267void qib_free_qpn_table(struct qib_qpn_table *qpt)
1268{
1269	int i;
1270
1271	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1272		if (qpt->map[i].page)
1273			free_page((unsigned long) qpt->map[i].page);
1274}
1275
1276/**
1277 * qib_get_credit - flush the send work queue of a QP
1278 * @qp: the qp who's send work queue to flush
1279 * @aeth: the Acknowledge Extended Transport Header
1280 *
1281 * The QP s_lock should be held.
1282 */
1283void qib_get_credit(struct qib_qp *qp, u32 aeth)
1284{
1285	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1286
1287	/*
1288	 * If the credit is invalid, we can send
1289	 * as many packets as we like.  Otherwise, we have to
1290	 * honor the credit field.
1291	 */
1292	if (credit == QIB_AETH_CREDIT_INVAL) {
1293		if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1294			qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1295			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1296				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1297				qib_schedule_send(qp);
1298			}
1299		}
1300	} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1301		/* Compute new LSN (i.e., MSN + credit) */
1302		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1303		if (qib_cmp24(credit, qp->s_lsn) > 0) {
1304			qp->s_lsn = credit;
1305			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1306				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1307				qib_schedule_send(qp);
1308			}
1309		}
1310	}
1311}
1312
1313#ifdef CONFIG_DEBUG_FS
1314
1315struct qib_qp_iter {
1316	struct qib_ibdev *dev;
1317	struct qib_qp *qp;
1318	int n;
1319};
1320
1321struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1322{
1323	struct qib_qp_iter *iter;
1324
1325	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1326	if (!iter)
1327		return NULL;
1328
1329	iter->dev = dev;
1330	if (qib_qp_iter_next(iter)) {
1331		kfree(iter);
1332		return NULL;
1333	}
1334
1335	return iter;
1336}
1337
1338int qib_qp_iter_next(struct qib_qp_iter *iter)
1339{
1340	struct qib_ibdev *dev = iter->dev;
1341	int n = iter->n;
1342	int ret = 1;
1343	struct qib_qp *pqp = iter->qp;
1344	struct qib_qp *qp;
1345
1346	for (; n < dev->qp_table_size; n++) {
1347		if (pqp)
1348			qp = rcu_dereference(pqp->next);
1349		else
1350			qp = rcu_dereference(dev->qp_table[n]);
1351		pqp = qp;
1352		if (qp) {
1353			iter->qp = qp;
1354			iter->n = n;
1355			return 0;
1356		}
1357	}
1358	return ret;
1359}
1360
1361static const char * const qp_type_str[] = {
1362	"SMI", "GSI", "RC", "UC", "UD",
1363};
1364
1365void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1366{
1367	struct qib_swqe *wqe;
1368	struct qib_qp *qp = iter->qp;
1369
1370	wqe = get_swqe_ptr(qp, qp->s_last);
1371	seq_printf(s,
1372		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1373		   iter->n,
1374		   qp->ibqp.qp_num,
1375		   qp_type_str[qp->ibqp.qp_type],
1376		   qp->state,
1377		   wqe->wr.opcode,
1378		   qp->s_hdrwords,
1379		   qp->s_flags,
1380		   atomic_read(&qp->s_dma_busy),
1381		   !list_empty(&qp->iowait),
1382		   qp->timeout,
1383		   wqe->ssn,
1384		   qp->s_lsn,
1385		   qp->s_last_psn,
1386		   qp->s_psn, qp->s_next_psn,
1387		   qp->s_sending_psn, qp->s_sending_hpsn,
1388		   qp->s_last, qp->s_acked, qp->s_cur,
1389		   qp->s_tail, qp->s_head, qp->s_size,
1390		   qp->remote_qpn,
1391		   qp->remote_ah_attr.dlid);
1392}
1393
1394#endif
1395