1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39#include <linux/errno.h>
40#include <linux/err.h>
41#include <linux/export.h>
42#include <linux/string.h>
43#include <linux/slab.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h>
47#include <rdma/ib_addr.h>
48
49#include "core_priv.h"
50
51__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
52{
53	switch (rate) {
54	case IB_RATE_2_5_GBPS: return  1;
55	case IB_RATE_5_GBPS:   return  2;
56	case IB_RATE_10_GBPS:  return  4;
57	case IB_RATE_20_GBPS:  return  8;
58	case IB_RATE_30_GBPS:  return 12;
59	case IB_RATE_40_GBPS:  return 16;
60	case IB_RATE_60_GBPS:  return 24;
61	case IB_RATE_80_GBPS:  return 32;
62	case IB_RATE_120_GBPS: return 48;
63	default:	       return -1;
64	}
65}
66EXPORT_SYMBOL(ib_rate_to_mult);
67
68__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
69{
70	switch (mult) {
71	case 1:  return IB_RATE_2_5_GBPS;
72	case 2:  return IB_RATE_5_GBPS;
73	case 4:  return IB_RATE_10_GBPS;
74	case 8:  return IB_RATE_20_GBPS;
75	case 12: return IB_RATE_30_GBPS;
76	case 16: return IB_RATE_40_GBPS;
77	case 24: return IB_RATE_60_GBPS;
78	case 32: return IB_RATE_80_GBPS;
79	case 48: return IB_RATE_120_GBPS;
80	default: return IB_RATE_PORT_CURRENT;
81	}
82}
83EXPORT_SYMBOL(mult_to_ib_rate);
84
85__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
86{
87	switch (rate) {
88	case IB_RATE_2_5_GBPS: return 2500;
89	case IB_RATE_5_GBPS:   return 5000;
90	case IB_RATE_10_GBPS:  return 10000;
91	case IB_RATE_20_GBPS:  return 20000;
92	case IB_RATE_30_GBPS:  return 30000;
93	case IB_RATE_40_GBPS:  return 40000;
94	case IB_RATE_60_GBPS:  return 60000;
95	case IB_RATE_80_GBPS:  return 80000;
96	case IB_RATE_120_GBPS: return 120000;
97	case IB_RATE_14_GBPS:  return 14062;
98	case IB_RATE_56_GBPS:  return 56250;
99	case IB_RATE_112_GBPS: return 112500;
100	case IB_RATE_168_GBPS: return 168750;
101	case IB_RATE_25_GBPS:  return 25781;
102	case IB_RATE_100_GBPS: return 103125;
103	case IB_RATE_200_GBPS: return 206250;
104	case IB_RATE_300_GBPS: return 309375;
105	default:	       return -1;
106	}
107}
108EXPORT_SYMBOL(ib_rate_to_mbps);
109
110__attribute_const__ enum rdma_transport_type
111rdma_node_get_transport(enum rdma_node_type node_type)
112{
113	switch (node_type) {
114	case RDMA_NODE_IB_CA:
115	case RDMA_NODE_IB_SWITCH:
116	case RDMA_NODE_IB_ROUTER:
117		return RDMA_TRANSPORT_IB;
118	case RDMA_NODE_RNIC:
119		return RDMA_TRANSPORT_IWARP;
120	case RDMA_NODE_USNIC:
121		return RDMA_TRANSPORT_USNIC;
122	case RDMA_NODE_USNIC_UDP:
123		return RDMA_TRANSPORT_USNIC_UDP;
124	default:
125		BUG();
126		return 0;
127	}
128}
129EXPORT_SYMBOL(rdma_node_get_transport);
130
131enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
132{
133	if (device->get_link_layer)
134		return device->get_link_layer(device, port_num);
135
136	switch (rdma_node_get_transport(device->node_type)) {
137	case RDMA_TRANSPORT_IB:
138		return IB_LINK_LAYER_INFINIBAND;
139	case RDMA_TRANSPORT_IWARP:
140	case RDMA_TRANSPORT_USNIC:
141	case RDMA_TRANSPORT_USNIC_UDP:
142		return IB_LINK_LAYER_ETHERNET;
143	default:
144		return IB_LINK_LAYER_UNSPECIFIED;
145	}
146}
147EXPORT_SYMBOL(rdma_port_get_link_layer);
148
149/* Protection domains */
150
151struct ib_pd *ib_alloc_pd(struct ib_device *device)
152{
153	struct ib_pd *pd;
154
155	pd = device->alloc_pd(device, NULL, NULL);
156
157	if (!IS_ERR(pd)) {
158		pd->device  = device;
159		pd->uobject = NULL;
160		atomic_set(&pd->usecnt, 0);
161	}
162
163	return pd;
164}
165EXPORT_SYMBOL(ib_alloc_pd);
166
167int ib_dealloc_pd(struct ib_pd *pd)
168{
169	if (atomic_read(&pd->usecnt))
170		return -EBUSY;
171
172	return pd->device->dealloc_pd(pd);
173}
174EXPORT_SYMBOL(ib_dealloc_pd);
175
176/* Address handles */
177
178struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
179{
180	struct ib_ah *ah;
181
182	ah = pd->device->create_ah(pd, ah_attr);
183
184	if (!IS_ERR(ah)) {
185		ah->device  = pd->device;
186		ah->pd      = pd;
187		ah->uobject = NULL;
188		atomic_inc(&pd->usecnt);
189	}
190
191	return ah;
192}
193EXPORT_SYMBOL(ib_create_ah);
194
195int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
196		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
197{
198	u32 flow_class;
199	u16 gid_index;
200	int ret;
201	int is_eth = (rdma_port_get_link_layer(device, port_num) ==
202			IB_LINK_LAYER_ETHERNET);
203
204	memset(ah_attr, 0, sizeof *ah_attr);
205	if (is_eth) {
206		if (!(wc->wc_flags & IB_WC_GRH))
207			return -EPROTOTYPE;
208
209		if (wc->wc_flags & IB_WC_WITH_SMAC &&
210		    wc->wc_flags & IB_WC_WITH_VLAN) {
211			memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
212			ah_attr->vlan_id = wc->vlan_id;
213		} else {
214			ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
215					ah_attr->dmac, &ah_attr->vlan_id);
216			if (ret)
217				return ret;
218		}
219	} else {
220		ah_attr->vlan_id = 0xffff;
221	}
222
223	ah_attr->dlid = wc->slid;
224	ah_attr->sl = wc->sl;
225	ah_attr->src_path_bits = wc->dlid_path_bits;
226	ah_attr->port_num = port_num;
227
228	if (wc->wc_flags & IB_WC_GRH) {
229		ah_attr->ah_flags = IB_AH_GRH;
230		ah_attr->grh.dgid = grh->sgid;
231
232		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
233					 &gid_index);
234		if (ret)
235			return ret;
236
237		ah_attr->grh.sgid_index = (u8) gid_index;
238		flow_class = be32_to_cpu(grh->version_tclass_flow);
239		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
240		ah_attr->grh.hop_limit = 0xFF;
241		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
242	}
243	return 0;
244}
245EXPORT_SYMBOL(ib_init_ah_from_wc);
246
247struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
248				   struct ib_grh *grh, u8 port_num)
249{
250	struct ib_ah_attr ah_attr;
251	int ret;
252
253	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
254	if (ret)
255		return ERR_PTR(ret);
256
257	return ib_create_ah(pd, &ah_attr);
258}
259EXPORT_SYMBOL(ib_create_ah_from_wc);
260
261int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
262{
263	return ah->device->modify_ah ?
264		ah->device->modify_ah(ah, ah_attr) :
265		-ENOSYS;
266}
267EXPORT_SYMBOL(ib_modify_ah);
268
269int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
270{
271	return ah->device->query_ah ?
272		ah->device->query_ah(ah, ah_attr) :
273		-ENOSYS;
274}
275EXPORT_SYMBOL(ib_query_ah);
276
277int ib_destroy_ah(struct ib_ah *ah)
278{
279	struct ib_pd *pd;
280	int ret;
281
282	pd = ah->pd;
283	ret = ah->device->destroy_ah(ah);
284	if (!ret)
285		atomic_dec(&pd->usecnt);
286
287	return ret;
288}
289EXPORT_SYMBOL(ib_destroy_ah);
290
291/* Shared receive queues */
292
293struct ib_srq *ib_create_srq(struct ib_pd *pd,
294			     struct ib_srq_init_attr *srq_init_attr)
295{
296	struct ib_srq *srq;
297
298	if (!pd->device->create_srq)
299		return ERR_PTR(-ENOSYS);
300
301	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
302
303	if (!IS_ERR(srq)) {
304		srq->device    	   = pd->device;
305		srq->pd        	   = pd;
306		srq->uobject       = NULL;
307		srq->event_handler = srq_init_attr->event_handler;
308		srq->srq_context   = srq_init_attr->srq_context;
309		srq->srq_type      = srq_init_attr->srq_type;
310		if (srq->srq_type == IB_SRQT_XRC) {
311			srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
312			srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
313			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
314			atomic_inc(&srq->ext.xrc.cq->usecnt);
315		}
316		atomic_inc(&pd->usecnt);
317		atomic_set(&srq->usecnt, 0);
318	}
319
320	return srq;
321}
322EXPORT_SYMBOL(ib_create_srq);
323
324int ib_modify_srq(struct ib_srq *srq,
325		  struct ib_srq_attr *srq_attr,
326		  enum ib_srq_attr_mask srq_attr_mask)
327{
328	return srq->device->modify_srq ?
329		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
330		-ENOSYS;
331}
332EXPORT_SYMBOL(ib_modify_srq);
333
334int ib_query_srq(struct ib_srq *srq,
335		 struct ib_srq_attr *srq_attr)
336{
337	return srq->device->query_srq ?
338		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
339}
340EXPORT_SYMBOL(ib_query_srq);
341
342int ib_destroy_srq(struct ib_srq *srq)
343{
344	struct ib_pd *pd;
345	enum ib_srq_type srq_type;
346	struct ib_xrcd *uninitialized_var(xrcd);
347	struct ib_cq *uninitialized_var(cq);
348	int ret;
349
350	if (atomic_read(&srq->usecnt))
351		return -EBUSY;
352
353	pd = srq->pd;
354	srq_type = srq->srq_type;
355	if (srq_type == IB_SRQT_XRC) {
356		xrcd = srq->ext.xrc.xrcd;
357		cq = srq->ext.xrc.cq;
358	}
359
360	ret = srq->device->destroy_srq(srq);
361	if (!ret) {
362		atomic_dec(&pd->usecnt);
363		if (srq_type == IB_SRQT_XRC) {
364			atomic_dec(&xrcd->usecnt);
365			atomic_dec(&cq->usecnt);
366		}
367	}
368
369	return ret;
370}
371EXPORT_SYMBOL(ib_destroy_srq);
372
373/* Queue pairs */
374
375static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
376{
377	struct ib_qp *qp = context;
378	unsigned long flags;
379
380	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
381	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
382		if (event->element.qp->event_handler)
383			event->element.qp->event_handler(event, event->element.qp->qp_context);
384	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
385}
386
387static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
388{
389	mutex_lock(&xrcd->tgt_qp_mutex);
390	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
391	mutex_unlock(&xrcd->tgt_qp_mutex);
392}
393
394static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
395				  void (*event_handler)(struct ib_event *, void *),
396				  void *qp_context)
397{
398	struct ib_qp *qp;
399	unsigned long flags;
400
401	qp = kzalloc(sizeof *qp, GFP_KERNEL);
402	if (!qp)
403		return ERR_PTR(-ENOMEM);
404
405	qp->real_qp = real_qp;
406	atomic_inc(&real_qp->usecnt);
407	qp->device = real_qp->device;
408	qp->event_handler = event_handler;
409	qp->qp_context = qp_context;
410	qp->qp_num = real_qp->qp_num;
411	qp->qp_type = real_qp->qp_type;
412
413	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
414	list_add(&qp->open_list, &real_qp->open_list);
415	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
416
417	return qp;
418}
419
420struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
421			 struct ib_qp_open_attr *qp_open_attr)
422{
423	struct ib_qp *qp, *real_qp;
424
425	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
426		return ERR_PTR(-EINVAL);
427
428	qp = ERR_PTR(-EINVAL);
429	mutex_lock(&xrcd->tgt_qp_mutex);
430	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
431		if (real_qp->qp_num == qp_open_attr->qp_num) {
432			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
433					  qp_open_attr->qp_context);
434			break;
435		}
436	}
437	mutex_unlock(&xrcd->tgt_qp_mutex);
438	return qp;
439}
440EXPORT_SYMBOL(ib_open_qp);
441
442struct ib_qp *ib_create_qp(struct ib_pd *pd,
443			   struct ib_qp_init_attr *qp_init_attr)
444{
445	struct ib_qp *qp, *real_qp;
446	struct ib_device *device;
447
448	device = pd ? pd->device : qp_init_attr->xrcd->device;
449	qp = device->create_qp(pd, qp_init_attr, NULL);
450
451	if (!IS_ERR(qp)) {
452		qp->device     = device;
453		qp->real_qp    = qp;
454		qp->uobject    = NULL;
455		qp->qp_type    = qp_init_attr->qp_type;
456
457		atomic_set(&qp->usecnt, 0);
458		if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
459			qp->event_handler = __ib_shared_qp_event_handler;
460			qp->qp_context = qp;
461			qp->pd = NULL;
462			qp->send_cq = qp->recv_cq = NULL;
463			qp->srq = NULL;
464			qp->xrcd = qp_init_attr->xrcd;
465			atomic_inc(&qp_init_attr->xrcd->usecnt);
466			INIT_LIST_HEAD(&qp->open_list);
467
468			real_qp = qp;
469			qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
470					  qp_init_attr->qp_context);
471			if (!IS_ERR(qp))
472				__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
473			else
474				real_qp->device->destroy_qp(real_qp);
475		} else {
476			qp->event_handler = qp_init_attr->event_handler;
477			qp->qp_context = qp_init_attr->qp_context;
478			if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
479				qp->recv_cq = NULL;
480				qp->srq = NULL;
481			} else {
482				qp->recv_cq = qp_init_attr->recv_cq;
483				atomic_inc(&qp_init_attr->recv_cq->usecnt);
484				qp->srq = qp_init_attr->srq;
485				if (qp->srq)
486					atomic_inc(&qp_init_attr->srq->usecnt);
487			}
488
489			qp->pd	    = pd;
490			qp->send_cq = qp_init_attr->send_cq;
491			qp->xrcd    = NULL;
492
493			atomic_inc(&pd->usecnt);
494			atomic_inc(&qp_init_attr->send_cq->usecnt);
495		}
496	}
497
498	return qp;
499}
500EXPORT_SYMBOL(ib_create_qp);
501
502static const struct {
503	int			valid;
504	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
505	enum ib_qp_attr_mask	req_param_add_eth[IB_QPT_MAX];
506	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
507	enum ib_qp_attr_mask	opt_param_add_eth[IB_QPT_MAX];
508} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
509	[IB_QPS_RESET] = {
510		[IB_QPS_RESET] = { .valid = 1 },
511		[IB_QPS_INIT]  = {
512			.valid = 1,
513			.req_param = {
514				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
515						IB_QP_PORT			|
516						IB_QP_QKEY),
517				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
518				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
519						IB_QP_PORT			|
520						IB_QP_ACCESS_FLAGS),
521				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
522						IB_QP_PORT			|
523						IB_QP_ACCESS_FLAGS),
524				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
525						IB_QP_PORT			|
526						IB_QP_ACCESS_FLAGS),
527				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
528						IB_QP_PORT			|
529						IB_QP_ACCESS_FLAGS),
530				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
531						IB_QP_QKEY),
532				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
533						IB_QP_QKEY),
534			}
535		},
536	},
537	[IB_QPS_INIT]  = {
538		[IB_QPS_RESET] = { .valid = 1 },
539		[IB_QPS_ERR] =   { .valid = 1 },
540		[IB_QPS_INIT]  = {
541			.valid = 1,
542			.opt_param = {
543				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
544						IB_QP_PORT			|
545						IB_QP_QKEY),
546				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
547						IB_QP_PORT			|
548						IB_QP_ACCESS_FLAGS),
549				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
550						IB_QP_PORT			|
551						IB_QP_ACCESS_FLAGS),
552				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
553						IB_QP_PORT			|
554						IB_QP_ACCESS_FLAGS),
555				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
556						IB_QP_PORT			|
557						IB_QP_ACCESS_FLAGS),
558				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
559						IB_QP_QKEY),
560				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
561						IB_QP_QKEY),
562			}
563		},
564		[IB_QPS_RTR]   = {
565			.valid = 1,
566			.req_param = {
567				[IB_QPT_UC]  = (IB_QP_AV			|
568						IB_QP_PATH_MTU			|
569						IB_QP_DEST_QPN			|
570						IB_QP_RQ_PSN),
571				[IB_QPT_RC]  = (IB_QP_AV			|
572						IB_QP_PATH_MTU			|
573						IB_QP_DEST_QPN			|
574						IB_QP_RQ_PSN			|
575						IB_QP_MAX_DEST_RD_ATOMIC	|
576						IB_QP_MIN_RNR_TIMER),
577				[IB_QPT_XRC_INI] = (IB_QP_AV			|
578						IB_QP_PATH_MTU			|
579						IB_QP_DEST_QPN			|
580						IB_QP_RQ_PSN),
581				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
582						IB_QP_PATH_MTU			|
583						IB_QP_DEST_QPN			|
584						IB_QP_RQ_PSN			|
585						IB_QP_MAX_DEST_RD_ATOMIC	|
586						IB_QP_MIN_RNR_TIMER),
587			},
588			.req_param_add_eth = {
589				[IB_QPT_RC]  = (IB_QP_SMAC),
590				[IB_QPT_UC]  = (IB_QP_SMAC),
591				[IB_QPT_XRC_INI]  = (IB_QP_SMAC),
592				[IB_QPT_XRC_TGT]  = (IB_QP_SMAC)
593			},
594			.opt_param = {
595				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
596						 IB_QP_QKEY),
597				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
598						 IB_QP_ACCESS_FLAGS		|
599						 IB_QP_PKEY_INDEX),
600				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
601						 IB_QP_ACCESS_FLAGS		|
602						 IB_QP_PKEY_INDEX),
603				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
604						 IB_QP_ACCESS_FLAGS		|
605						 IB_QP_PKEY_INDEX),
606				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
607						 IB_QP_ACCESS_FLAGS		|
608						 IB_QP_PKEY_INDEX),
609				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
610						 IB_QP_QKEY),
611				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
612						 IB_QP_QKEY),
613			 },
614			.opt_param_add_eth = {
615				[IB_QPT_RC]  = (IB_QP_ALT_SMAC			|
616						IB_QP_VID			|
617						IB_QP_ALT_VID),
618				[IB_QPT_UC]  = (IB_QP_ALT_SMAC			|
619						IB_QP_VID			|
620						IB_QP_ALT_VID),
621				[IB_QPT_XRC_INI]  = (IB_QP_ALT_SMAC			|
622						IB_QP_VID			|
623						IB_QP_ALT_VID),
624				[IB_QPT_XRC_TGT]  = (IB_QP_ALT_SMAC			|
625						IB_QP_VID			|
626						IB_QP_ALT_VID)
627			}
628		}
629	},
630	[IB_QPS_RTR]   = {
631		[IB_QPS_RESET] = { .valid = 1 },
632		[IB_QPS_ERR] =   { .valid = 1 },
633		[IB_QPS_RTS]   = {
634			.valid = 1,
635			.req_param = {
636				[IB_QPT_UD]  = IB_QP_SQ_PSN,
637				[IB_QPT_UC]  = IB_QP_SQ_PSN,
638				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
639						IB_QP_RETRY_CNT			|
640						IB_QP_RNR_RETRY			|
641						IB_QP_SQ_PSN			|
642						IB_QP_MAX_QP_RD_ATOMIC),
643				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
644						IB_QP_RETRY_CNT			|
645						IB_QP_RNR_RETRY			|
646						IB_QP_SQ_PSN			|
647						IB_QP_MAX_QP_RD_ATOMIC),
648				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
649						IB_QP_SQ_PSN),
650				[IB_QPT_SMI] = IB_QP_SQ_PSN,
651				[IB_QPT_GSI] = IB_QP_SQ_PSN,
652			},
653			.opt_param = {
654				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
655						 IB_QP_QKEY),
656				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
657						 IB_QP_ALT_PATH			|
658						 IB_QP_ACCESS_FLAGS		|
659						 IB_QP_PATH_MIG_STATE),
660				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
661						 IB_QP_ALT_PATH			|
662						 IB_QP_ACCESS_FLAGS		|
663						 IB_QP_MIN_RNR_TIMER		|
664						 IB_QP_PATH_MIG_STATE),
665				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
666						 IB_QP_ALT_PATH			|
667						 IB_QP_ACCESS_FLAGS		|
668						 IB_QP_PATH_MIG_STATE),
669				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
670						 IB_QP_ALT_PATH			|
671						 IB_QP_ACCESS_FLAGS		|
672						 IB_QP_MIN_RNR_TIMER		|
673						 IB_QP_PATH_MIG_STATE),
674				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
675						 IB_QP_QKEY),
676				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
677						 IB_QP_QKEY),
678			 }
679		}
680	},
681	[IB_QPS_RTS]   = {
682		[IB_QPS_RESET] = { .valid = 1 },
683		[IB_QPS_ERR] =   { .valid = 1 },
684		[IB_QPS_RTS]   = {
685			.valid = 1,
686			.opt_param = {
687				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
688						IB_QP_QKEY),
689				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
690						IB_QP_ACCESS_FLAGS		|
691						IB_QP_ALT_PATH			|
692						IB_QP_PATH_MIG_STATE),
693				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
694						IB_QP_ACCESS_FLAGS		|
695						IB_QP_ALT_PATH			|
696						IB_QP_PATH_MIG_STATE		|
697						IB_QP_MIN_RNR_TIMER),
698				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
699						IB_QP_ACCESS_FLAGS		|
700						IB_QP_ALT_PATH			|
701						IB_QP_PATH_MIG_STATE),
702				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
703						IB_QP_ACCESS_FLAGS		|
704						IB_QP_ALT_PATH			|
705						IB_QP_PATH_MIG_STATE		|
706						IB_QP_MIN_RNR_TIMER),
707				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
708						IB_QP_QKEY),
709				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
710						IB_QP_QKEY),
711			}
712		},
713		[IB_QPS_SQD]   = {
714			.valid = 1,
715			.opt_param = {
716				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
717				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
718				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
719				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
720				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
721				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
722				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
723			}
724		},
725	},
726	[IB_QPS_SQD]   = {
727		[IB_QPS_RESET] = { .valid = 1 },
728		[IB_QPS_ERR] =   { .valid = 1 },
729		[IB_QPS_RTS]   = {
730			.valid = 1,
731			.opt_param = {
732				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
733						IB_QP_QKEY),
734				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
735						IB_QP_ALT_PATH			|
736						IB_QP_ACCESS_FLAGS		|
737						IB_QP_PATH_MIG_STATE),
738				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
739						IB_QP_ALT_PATH			|
740						IB_QP_ACCESS_FLAGS		|
741						IB_QP_MIN_RNR_TIMER		|
742						IB_QP_PATH_MIG_STATE),
743				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
744						IB_QP_ALT_PATH			|
745						IB_QP_ACCESS_FLAGS		|
746						IB_QP_PATH_MIG_STATE),
747				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
748						IB_QP_ALT_PATH			|
749						IB_QP_ACCESS_FLAGS		|
750						IB_QP_MIN_RNR_TIMER		|
751						IB_QP_PATH_MIG_STATE),
752				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
753						IB_QP_QKEY),
754				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
755						IB_QP_QKEY),
756			}
757		},
758		[IB_QPS_SQD]   = {
759			.valid = 1,
760			.opt_param = {
761				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
762						IB_QP_QKEY),
763				[IB_QPT_UC]  = (IB_QP_AV			|
764						IB_QP_ALT_PATH			|
765						IB_QP_ACCESS_FLAGS		|
766						IB_QP_PKEY_INDEX		|
767						IB_QP_PATH_MIG_STATE),
768				[IB_QPT_RC]  = (IB_QP_PORT			|
769						IB_QP_AV			|
770						IB_QP_TIMEOUT			|
771						IB_QP_RETRY_CNT			|
772						IB_QP_RNR_RETRY			|
773						IB_QP_MAX_QP_RD_ATOMIC		|
774						IB_QP_MAX_DEST_RD_ATOMIC	|
775						IB_QP_ALT_PATH			|
776						IB_QP_ACCESS_FLAGS		|
777						IB_QP_PKEY_INDEX		|
778						IB_QP_MIN_RNR_TIMER		|
779						IB_QP_PATH_MIG_STATE),
780				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
781						IB_QP_AV			|
782						IB_QP_TIMEOUT			|
783						IB_QP_RETRY_CNT			|
784						IB_QP_RNR_RETRY			|
785						IB_QP_MAX_QP_RD_ATOMIC		|
786						IB_QP_ALT_PATH			|
787						IB_QP_ACCESS_FLAGS		|
788						IB_QP_PKEY_INDEX		|
789						IB_QP_PATH_MIG_STATE),
790				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
791						IB_QP_AV			|
792						IB_QP_TIMEOUT			|
793						IB_QP_MAX_DEST_RD_ATOMIC	|
794						IB_QP_ALT_PATH			|
795						IB_QP_ACCESS_FLAGS		|
796						IB_QP_PKEY_INDEX		|
797						IB_QP_MIN_RNR_TIMER		|
798						IB_QP_PATH_MIG_STATE),
799				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
800						IB_QP_QKEY),
801				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
802						IB_QP_QKEY),
803			}
804		}
805	},
806	[IB_QPS_SQE]   = {
807		[IB_QPS_RESET] = { .valid = 1 },
808		[IB_QPS_ERR] =   { .valid = 1 },
809		[IB_QPS_RTS]   = {
810			.valid = 1,
811			.opt_param = {
812				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
813						IB_QP_QKEY),
814				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
815						IB_QP_ACCESS_FLAGS),
816				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
817						IB_QP_QKEY),
818				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
819						IB_QP_QKEY),
820			}
821		}
822	},
823	[IB_QPS_ERR] = {
824		[IB_QPS_RESET] = { .valid = 1 },
825		[IB_QPS_ERR] =   { .valid = 1 }
826	}
827};
828
829int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
830		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
831		       enum rdma_link_layer ll)
832{
833	enum ib_qp_attr_mask req_param, opt_param;
834
835	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
836	    next_state < 0 || next_state > IB_QPS_ERR)
837		return 0;
838
839	if (mask & IB_QP_CUR_STATE  &&
840	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
841	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
842		return 0;
843
844	if (!qp_state_table[cur_state][next_state].valid)
845		return 0;
846
847	req_param = qp_state_table[cur_state][next_state].req_param[type];
848	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
849
850	if (ll == IB_LINK_LAYER_ETHERNET) {
851		req_param |= qp_state_table[cur_state][next_state].
852			req_param_add_eth[type];
853		opt_param |= qp_state_table[cur_state][next_state].
854			opt_param_add_eth[type];
855	}
856
857	if ((mask & req_param) != req_param)
858		return 0;
859
860	if (mask & ~(req_param | opt_param | IB_QP_STATE))
861		return 0;
862
863	return 1;
864}
865EXPORT_SYMBOL(ib_modify_qp_is_ok);
866
867int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
868			    struct ib_qp_attr *qp_attr, int *qp_attr_mask)
869{
870	int           ret = 0;
871	union ib_gid  sgid;
872
873	if ((*qp_attr_mask & IB_QP_AV)  &&
874	    (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
875		ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
876				   qp_attr->ah_attr.grh.sgid_index, &sgid);
877		if (ret)
878			goto out;
879		if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
880			rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
881			rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
882			if (!(*qp_attr_mask & IB_QP_VID))
883				qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
884		} else {
885			ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
886					qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
887			if (ret)
888				goto out;
889			ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
890			if (ret)
891				goto out;
892		}
893		*qp_attr_mask |= IB_QP_SMAC;
894		if (qp_attr->vlan_id < 0xFFFF)
895			*qp_attr_mask |= IB_QP_VID;
896	}
897out:
898	return ret;
899}
900EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
901
902
903int ib_modify_qp(struct ib_qp *qp,
904		 struct ib_qp_attr *qp_attr,
905		 int qp_attr_mask)
906{
907	int ret;
908
909	ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
910	if (ret)
911		return ret;
912
913	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
914}
915EXPORT_SYMBOL(ib_modify_qp);
916
917int ib_query_qp(struct ib_qp *qp,
918		struct ib_qp_attr *qp_attr,
919		int qp_attr_mask,
920		struct ib_qp_init_attr *qp_init_attr)
921{
922	return qp->device->query_qp ?
923		qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
924		-ENOSYS;
925}
926EXPORT_SYMBOL(ib_query_qp);
927
928int ib_close_qp(struct ib_qp *qp)
929{
930	struct ib_qp *real_qp;
931	unsigned long flags;
932
933	real_qp = qp->real_qp;
934	if (real_qp == qp)
935		return -EINVAL;
936
937	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
938	list_del(&qp->open_list);
939	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
940
941	atomic_dec(&real_qp->usecnt);
942	kfree(qp);
943
944	return 0;
945}
946EXPORT_SYMBOL(ib_close_qp);
947
948static int __ib_destroy_shared_qp(struct ib_qp *qp)
949{
950	struct ib_xrcd *xrcd;
951	struct ib_qp *real_qp;
952	int ret;
953
954	real_qp = qp->real_qp;
955	xrcd = real_qp->xrcd;
956
957	mutex_lock(&xrcd->tgt_qp_mutex);
958	ib_close_qp(qp);
959	if (atomic_read(&real_qp->usecnt) == 0)
960		list_del(&real_qp->xrcd_list);
961	else
962		real_qp = NULL;
963	mutex_unlock(&xrcd->tgt_qp_mutex);
964
965	if (real_qp) {
966		ret = ib_destroy_qp(real_qp);
967		if (!ret)
968			atomic_dec(&xrcd->usecnt);
969		else
970			__ib_insert_xrcd_qp(xrcd, real_qp);
971	}
972
973	return 0;
974}
975
976int ib_destroy_qp(struct ib_qp *qp)
977{
978	struct ib_pd *pd;
979	struct ib_cq *scq, *rcq;
980	struct ib_srq *srq;
981	int ret;
982
983	if (atomic_read(&qp->usecnt))
984		return -EBUSY;
985
986	if (qp->real_qp != qp)
987		return __ib_destroy_shared_qp(qp);
988
989	pd   = qp->pd;
990	scq  = qp->send_cq;
991	rcq  = qp->recv_cq;
992	srq  = qp->srq;
993
994	ret = qp->device->destroy_qp(qp);
995	if (!ret) {
996		if (pd)
997			atomic_dec(&pd->usecnt);
998		if (scq)
999			atomic_dec(&scq->usecnt);
1000		if (rcq)
1001			atomic_dec(&rcq->usecnt);
1002		if (srq)
1003			atomic_dec(&srq->usecnt);
1004	}
1005
1006	return ret;
1007}
1008EXPORT_SYMBOL(ib_destroy_qp);
1009
1010/* Completion queues */
1011
1012struct ib_cq *ib_create_cq(struct ib_device *device,
1013			   ib_comp_handler comp_handler,
1014			   void (*event_handler)(struct ib_event *, void *),
1015			   void *cq_context, int cqe, int comp_vector)
1016{
1017	struct ib_cq *cq;
1018
1019	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
1020
1021	if (!IS_ERR(cq)) {
1022		cq->device        = device;
1023		cq->uobject       = NULL;
1024		cq->comp_handler  = comp_handler;
1025		cq->event_handler = event_handler;
1026		cq->cq_context    = cq_context;
1027		atomic_set(&cq->usecnt, 0);
1028	}
1029
1030	return cq;
1031}
1032EXPORT_SYMBOL(ib_create_cq);
1033
1034int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1035{
1036	return cq->device->modify_cq ?
1037		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1038}
1039EXPORT_SYMBOL(ib_modify_cq);
1040
1041int ib_destroy_cq(struct ib_cq *cq)
1042{
1043	if (atomic_read(&cq->usecnt))
1044		return -EBUSY;
1045
1046	return cq->device->destroy_cq(cq);
1047}
1048EXPORT_SYMBOL(ib_destroy_cq);
1049
1050int ib_resize_cq(struct ib_cq *cq, int cqe)
1051{
1052	return cq->device->resize_cq ?
1053		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1054}
1055EXPORT_SYMBOL(ib_resize_cq);
1056
1057/* Memory regions */
1058
1059struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1060{
1061	struct ib_mr *mr;
1062	int err;
1063
1064	err = ib_check_mr_access(mr_access_flags);
1065	if (err)
1066		return ERR_PTR(err);
1067
1068	mr = pd->device->get_dma_mr(pd, mr_access_flags);
1069
1070	if (!IS_ERR(mr)) {
1071		mr->device  = pd->device;
1072		mr->pd      = pd;
1073		mr->uobject = NULL;
1074		atomic_inc(&pd->usecnt);
1075		atomic_set(&mr->usecnt, 0);
1076	}
1077
1078	return mr;
1079}
1080EXPORT_SYMBOL(ib_get_dma_mr);
1081
1082struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1083			     struct ib_phys_buf *phys_buf_array,
1084			     int num_phys_buf,
1085			     int mr_access_flags,
1086			     u64 *iova_start)
1087{
1088	struct ib_mr *mr;
1089	int err;
1090
1091	err = ib_check_mr_access(mr_access_flags);
1092	if (err)
1093		return ERR_PTR(err);
1094
1095	if (!pd->device->reg_phys_mr)
1096		return ERR_PTR(-ENOSYS);
1097
1098	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1099				     mr_access_flags, iova_start);
1100
1101	if (!IS_ERR(mr)) {
1102		mr->device  = pd->device;
1103		mr->pd      = pd;
1104		mr->uobject = NULL;
1105		atomic_inc(&pd->usecnt);
1106		atomic_set(&mr->usecnt, 0);
1107	}
1108
1109	return mr;
1110}
1111EXPORT_SYMBOL(ib_reg_phys_mr);
1112
1113int ib_rereg_phys_mr(struct ib_mr *mr,
1114		     int mr_rereg_mask,
1115		     struct ib_pd *pd,
1116		     struct ib_phys_buf *phys_buf_array,
1117		     int num_phys_buf,
1118		     int mr_access_flags,
1119		     u64 *iova_start)
1120{
1121	struct ib_pd *old_pd;
1122	int ret;
1123
1124	ret = ib_check_mr_access(mr_access_flags);
1125	if (ret)
1126		return ret;
1127
1128	if (!mr->device->rereg_phys_mr)
1129		return -ENOSYS;
1130
1131	if (atomic_read(&mr->usecnt))
1132		return -EBUSY;
1133
1134	old_pd = mr->pd;
1135
1136	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1137					phys_buf_array, num_phys_buf,
1138					mr_access_flags, iova_start);
1139
1140	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1141		atomic_dec(&old_pd->usecnt);
1142		atomic_inc(&pd->usecnt);
1143	}
1144
1145	return ret;
1146}
1147EXPORT_SYMBOL(ib_rereg_phys_mr);
1148
1149int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1150{
1151	return mr->device->query_mr ?
1152		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1153}
1154EXPORT_SYMBOL(ib_query_mr);
1155
1156int ib_dereg_mr(struct ib_mr *mr)
1157{
1158	struct ib_pd *pd;
1159	int ret;
1160
1161	if (atomic_read(&mr->usecnt))
1162		return -EBUSY;
1163
1164	pd = mr->pd;
1165	ret = mr->device->dereg_mr(mr);
1166	if (!ret)
1167		atomic_dec(&pd->usecnt);
1168
1169	return ret;
1170}
1171EXPORT_SYMBOL(ib_dereg_mr);
1172
1173struct ib_mr *ib_create_mr(struct ib_pd *pd,
1174			   struct ib_mr_init_attr *mr_init_attr)
1175{
1176	struct ib_mr *mr;
1177
1178	if (!pd->device->create_mr)
1179		return ERR_PTR(-ENOSYS);
1180
1181	mr = pd->device->create_mr(pd, mr_init_attr);
1182
1183	if (!IS_ERR(mr)) {
1184		mr->device  = pd->device;
1185		mr->pd      = pd;
1186		mr->uobject = NULL;
1187		atomic_inc(&pd->usecnt);
1188		atomic_set(&mr->usecnt, 0);
1189	}
1190
1191	return mr;
1192}
1193EXPORT_SYMBOL(ib_create_mr);
1194
1195int ib_destroy_mr(struct ib_mr *mr)
1196{
1197	struct ib_pd *pd;
1198	int ret;
1199
1200	if (atomic_read(&mr->usecnt))
1201		return -EBUSY;
1202
1203	pd = mr->pd;
1204	ret = mr->device->destroy_mr(mr);
1205	if (!ret)
1206		atomic_dec(&pd->usecnt);
1207
1208	return ret;
1209}
1210EXPORT_SYMBOL(ib_destroy_mr);
1211
1212struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
1213{
1214	struct ib_mr *mr;
1215
1216	if (!pd->device->alloc_fast_reg_mr)
1217		return ERR_PTR(-ENOSYS);
1218
1219	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
1220
1221	if (!IS_ERR(mr)) {
1222		mr->device  = pd->device;
1223		mr->pd      = pd;
1224		mr->uobject = NULL;
1225		atomic_inc(&pd->usecnt);
1226		atomic_set(&mr->usecnt, 0);
1227	}
1228
1229	return mr;
1230}
1231EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
1232
1233struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1234							  int max_page_list_len)
1235{
1236	struct ib_fast_reg_page_list *page_list;
1237
1238	if (!device->alloc_fast_reg_page_list)
1239		return ERR_PTR(-ENOSYS);
1240
1241	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1242
1243	if (!IS_ERR(page_list)) {
1244		page_list->device = device;
1245		page_list->max_page_list_len = max_page_list_len;
1246	}
1247
1248	return page_list;
1249}
1250EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1251
1252void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1253{
1254	page_list->device->free_fast_reg_page_list(page_list);
1255}
1256EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1257
1258/* Memory windows */
1259
1260struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1261{
1262	struct ib_mw *mw;
1263
1264	if (!pd->device->alloc_mw)
1265		return ERR_PTR(-ENOSYS);
1266
1267	mw = pd->device->alloc_mw(pd, type);
1268	if (!IS_ERR(mw)) {
1269		mw->device  = pd->device;
1270		mw->pd      = pd;
1271		mw->uobject = NULL;
1272		mw->type    = type;
1273		atomic_inc(&pd->usecnt);
1274	}
1275
1276	return mw;
1277}
1278EXPORT_SYMBOL(ib_alloc_mw);
1279
1280int ib_dealloc_mw(struct ib_mw *mw)
1281{
1282	struct ib_pd *pd;
1283	int ret;
1284
1285	pd = mw->pd;
1286	ret = mw->device->dealloc_mw(mw);
1287	if (!ret)
1288		atomic_dec(&pd->usecnt);
1289
1290	return ret;
1291}
1292EXPORT_SYMBOL(ib_dealloc_mw);
1293
1294/* "Fast" memory regions */
1295
1296struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1297			    int mr_access_flags,
1298			    struct ib_fmr_attr *fmr_attr)
1299{
1300	struct ib_fmr *fmr;
1301
1302	if (!pd->device->alloc_fmr)
1303		return ERR_PTR(-ENOSYS);
1304
1305	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1306	if (!IS_ERR(fmr)) {
1307		fmr->device = pd->device;
1308		fmr->pd     = pd;
1309		atomic_inc(&pd->usecnt);
1310	}
1311
1312	return fmr;
1313}
1314EXPORT_SYMBOL(ib_alloc_fmr);
1315
1316int ib_unmap_fmr(struct list_head *fmr_list)
1317{
1318	struct ib_fmr *fmr;
1319
1320	if (list_empty(fmr_list))
1321		return 0;
1322
1323	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1324	return fmr->device->unmap_fmr(fmr_list);
1325}
1326EXPORT_SYMBOL(ib_unmap_fmr);
1327
1328int ib_dealloc_fmr(struct ib_fmr *fmr)
1329{
1330	struct ib_pd *pd;
1331	int ret;
1332
1333	pd = fmr->pd;
1334	ret = fmr->device->dealloc_fmr(fmr);
1335	if (!ret)
1336		atomic_dec(&pd->usecnt);
1337
1338	return ret;
1339}
1340EXPORT_SYMBOL(ib_dealloc_fmr);
1341
1342/* Multicast groups */
1343
1344int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1345{
1346	int ret;
1347
1348	if (!qp->device->attach_mcast)
1349		return -ENOSYS;
1350	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1351		return -EINVAL;
1352
1353	ret = qp->device->attach_mcast(qp, gid, lid);
1354	if (!ret)
1355		atomic_inc(&qp->usecnt);
1356	return ret;
1357}
1358EXPORT_SYMBOL(ib_attach_mcast);
1359
1360int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1361{
1362	int ret;
1363
1364	if (!qp->device->detach_mcast)
1365		return -ENOSYS;
1366	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1367		return -EINVAL;
1368
1369	ret = qp->device->detach_mcast(qp, gid, lid);
1370	if (!ret)
1371		atomic_dec(&qp->usecnt);
1372	return ret;
1373}
1374EXPORT_SYMBOL(ib_detach_mcast);
1375
1376struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1377{
1378	struct ib_xrcd *xrcd;
1379
1380	if (!device->alloc_xrcd)
1381		return ERR_PTR(-ENOSYS);
1382
1383	xrcd = device->alloc_xrcd(device, NULL, NULL);
1384	if (!IS_ERR(xrcd)) {
1385		xrcd->device = device;
1386		xrcd->inode = NULL;
1387		atomic_set(&xrcd->usecnt, 0);
1388		mutex_init(&xrcd->tgt_qp_mutex);
1389		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1390	}
1391
1392	return xrcd;
1393}
1394EXPORT_SYMBOL(ib_alloc_xrcd);
1395
1396int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1397{
1398	struct ib_qp *qp;
1399	int ret;
1400
1401	if (atomic_read(&xrcd->usecnt))
1402		return -EBUSY;
1403
1404	while (!list_empty(&xrcd->tgt_qp_list)) {
1405		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1406		ret = ib_destroy_qp(qp);
1407		if (ret)
1408			return ret;
1409	}
1410
1411	return xrcd->device->dealloc_xrcd(xrcd);
1412}
1413EXPORT_SYMBOL(ib_dealloc_xrcd);
1414
1415struct ib_flow *ib_create_flow(struct ib_qp *qp,
1416			       struct ib_flow_attr *flow_attr,
1417			       int domain)
1418{
1419	struct ib_flow *flow_id;
1420	if (!qp->device->create_flow)
1421		return ERR_PTR(-ENOSYS);
1422
1423	flow_id = qp->device->create_flow(qp, flow_attr, domain);
1424	if (!IS_ERR(flow_id))
1425		atomic_inc(&qp->usecnt);
1426	return flow_id;
1427}
1428EXPORT_SYMBOL(ib_create_flow);
1429
1430int ib_destroy_flow(struct ib_flow *flow_id)
1431{
1432	int err;
1433	struct ib_qp *qp = flow_id->qp;
1434
1435	err = qp->device->destroy_flow(flow_id);
1436	if (!err)
1437		atomic_dec(&qp->usecnt);
1438	return err;
1439}
1440EXPORT_SYMBOL(ib_destroy_flow);
1441
1442int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1443		       struct ib_mr_status *mr_status)
1444{
1445	return mr->device->check_mr_status ?
1446		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1447}
1448EXPORT_SYMBOL(ib_check_mr_status);
1449