1/*
2 * Copyright (c) 2005 Ammasso, Inc.  All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34#include <linux/slab.h>
35
36#include "c2.h"
37#include "c2_wr.h"
38#include "c2_vq.h"
39#include <rdma/iw_cm.h>
40
41int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
42{
43	struct c2_dev *c2dev = to_c2dev(cm_id->device);
44	struct ib_qp *ibqp;
45	struct c2_qp *qp;
46	struct c2wr_qp_connect_req *wr;	/* variable size needs a malloc. */
47	struct c2_vq_req *vq_req;
48	int err;
49	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
50
51	if (cm_id->remote_addr.ss_family != AF_INET)
52		return -ENOSYS;
53
54	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
55	if (!ibqp)
56		return -EINVAL;
57	qp = to_c2qp(ibqp);
58
59	/* Associate QP <--> CM_ID */
60	cm_id->provider_data = qp;
61	cm_id->add_ref(cm_id);
62	qp->cm_id = cm_id;
63
64	/*
65	 * only support the max private_data length
66	 */
67	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
68		err = -EINVAL;
69		goto bail0;
70	}
71	/*
72	 * Set the rdma read limits
73	 */
74	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
75	if (err)
76		goto bail0;
77
78	/*
79	 * Create and send a WR_QP_CONNECT...
80	 */
81	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
82	if (!wr) {
83		err = -ENOMEM;
84		goto bail0;
85	}
86
87	vq_req = vq_req_alloc(c2dev);
88	if (!vq_req) {
89		err = -ENOMEM;
90		goto bail1;
91	}
92
93	c2_wr_set_id(wr, CCWR_QP_CONNECT);
94	wr->hdr.context = 0;
95	wr->rnic_handle = c2dev->adapter_handle;
96	wr->qp_handle = qp->adapter_handle;
97
98	wr->remote_addr = raddr->sin_addr.s_addr;
99	wr->remote_port = raddr->sin_port;
100
101	/*
102	 * Move any private data from the callers's buf into
103	 * the WR.
104	 */
105	if (iw_param->private_data) {
106		wr->private_data_length =
107			cpu_to_be32(iw_param->private_data_len);
108		memcpy(&wr->private_data[0], iw_param->private_data,
109		       iw_param->private_data_len);
110	} else
111		wr->private_data_length = 0;
112
113	/*
114	 * Send WR to adapter.  NOTE: There is no synch reply from
115	 * the adapter.
116	 */
117	err = vq_send_wr(c2dev, (union c2wr *) wr);
118	vq_req_free(c2dev, vq_req);
119
120 bail1:
121	kfree(wr);
122 bail0:
123	if (err) {
124		/*
125		 * If we fail, release reference on QP and
126		 * disassociate QP from CM_ID
127		 */
128		cm_id->provider_data = NULL;
129		qp->cm_id = NULL;
130		cm_id->rem_ref(cm_id);
131	}
132	return err;
133}
134
135int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
136{
137	struct c2_dev *c2dev;
138	struct c2wr_ep_listen_create_req wr;
139	struct c2wr_ep_listen_create_rep *reply;
140	struct c2_vq_req *vq_req;
141	int err;
142	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
143
144	if (cm_id->local_addr.ss_family != AF_INET)
145		return -ENOSYS;
146
147	c2dev = to_c2dev(cm_id->device);
148	if (c2dev == NULL)
149		return -EINVAL;
150
151	/*
152	 * Allocate verbs request.
153	 */
154	vq_req = vq_req_alloc(c2dev);
155	if (!vq_req)
156		return -ENOMEM;
157
158	/*
159	 * Build the WR
160	 */
161	c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
162	wr.hdr.context = (u64) (unsigned long) vq_req;
163	wr.rnic_handle = c2dev->adapter_handle;
164	wr.local_addr = laddr->sin_addr.s_addr;
165	wr.local_port = laddr->sin_port;
166	wr.backlog = cpu_to_be32(backlog);
167	wr.user_context = (u64) (unsigned long) cm_id;
168
169	/*
170	 * Reference the request struct.  Dereferenced in the int handler.
171	 */
172	vq_req_get(c2dev, vq_req);
173
174	/*
175	 * Send WR to adapter
176	 */
177	err = vq_send_wr(c2dev, (union c2wr *) & wr);
178	if (err) {
179		vq_req_put(c2dev, vq_req);
180		goto bail0;
181	}
182
183	/*
184	 * Wait for reply from adapter
185	 */
186	err = vq_wait_for_reply(c2dev, vq_req);
187	if (err)
188		goto bail0;
189
190	/*
191	 * Process reply
192	 */
193	reply =
194	    (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
195	if (!reply) {
196		err = -ENOMEM;
197		goto bail1;
198	}
199
200	if ((err = c2_errno(reply)) != 0)
201		goto bail1;
202
203	/*
204	 * Keep the adapter handle. Used in subsequent destroy
205	 */
206	cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
207
208	/*
209	 * free vq stuff
210	 */
211	vq_repbuf_free(c2dev, reply);
212	vq_req_free(c2dev, vq_req);
213
214	return 0;
215
216 bail1:
217	vq_repbuf_free(c2dev, reply);
218 bail0:
219	vq_req_free(c2dev, vq_req);
220	return err;
221}
222
223
224int c2_llp_service_destroy(struct iw_cm_id *cm_id)
225{
226
227	struct c2_dev *c2dev;
228	struct c2wr_ep_listen_destroy_req wr;
229	struct c2wr_ep_listen_destroy_rep *reply;
230	struct c2_vq_req *vq_req;
231	int err;
232
233	c2dev = to_c2dev(cm_id->device);
234	if (c2dev == NULL)
235		return -EINVAL;
236
237	/*
238	 * Allocate verbs request.
239	 */
240	vq_req = vq_req_alloc(c2dev);
241	if (!vq_req)
242		return -ENOMEM;
243
244	/*
245	 * Build the WR
246	 */
247	c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
248	wr.hdr.context = (unsigned long) vq_req;
249	wr.rnic_handle = c2dev->adapter_handle;
250	wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
251
252	/*
253	 * reference the request struct.  dereferenced in the int handler.
254	 */
255	vq_req_get(c2dev, vq_req);
256
257	/*
258	 * Send WR to adapter
259	 */
260	err = vq_send_wr(c2dev, (union c2wr *) & wr);
261	if (err) {
262		vq_req_put(c2dev, vq_req);
263		goto bail0;
264	}
265
266	/*
267	 * Wait for reply from adapter
268	 */
269	err = vq_wait_for_reply(c2dev, vq_req);
270	if (err)
271		goto bail0;
272
273	/*
274	 * Process reply
275	 */
276	reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
277	if (!reply) {
278		err = -ENOMEM;
279		goto bail0;
280	}
281	if ((err = c2_errno(reply)) != 0)
282		goto bail1;
283
284 bail1:
285	vq_repbuf_free(c2dev, reply);
286 bail0:
287	vq_req_free(c2dev, vq_req);
288	return err;
289}
290
291int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
292{
293	struct c2_dev *c2dev = to_c2dev(cm_id->device);
294	struct c2_qp *qp;
295	struct ib_qp *ibqp;
296	struct c2wr_cr_accept_req *wr;	/* variable length WR */
297	struct c2_vq_req *vq_req;
298	struct c2wr_cr_accept_rep *reply;	/* VQ Reply msg ptr. */
299	int err;
300
301	ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
302	if (!ibqp)
303		return -EINVAL;
304	qp = to_c2qp(ibqp);
305
306	/* Set the RDMA read limits */
307	err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
308	if (err)
309		goto bail0;
310
311	/* Allocate verbs request. */
312	vq_req = vq_req_alloc(c2dev);
313	if (!vq_req) {
314		err = -ENOMEM;
315		goto bail0;
316	}
317	vq_req->qp = qp;
318	vq_req->cm_id = cm_id;
319	vq_req->event = IW_CM_EVENT_ESTABLISHED;
320
321	wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
322	if (!wr) {
323		err = -ENOMEM;
324		goto bail1;
325	}
326
327	/* Build the WR */
328	c2_wr_set_id(wr, CCWR_CR_ACCEPT);
329	wr->hdr.context = (unsigned long) vq_req;
330	wr->rnic_handle = c2dev->adapter_handle;
331	wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
332	wr->qp_handle = qp->adapter_handle;
333
334	/* Replace the cr_handle with the QP after accept */
335	cm_id->provider_data = qp;
336	cm_id->add_ref(cm_id);
337	qp->cm_id = cm_id;
338
339	cm_id->provider_data = qp;
340
341	/* Validate private_data length */
342	if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
343		err = -EINVAL;
344		goto bail1;
345	}
346
347	if (iw_param->private_data) {
348		wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
349		memcpy(&wr->private_data[0],
350		       iw_param->private_data, iw_param->private_data_len);
351	} else
352		wr->private_data_length = 0;
353
354	/* Reference the request struct.  Dereferenced in the int handler. */
355	vq_req_get(c2dev, vq_req);
356
357	/* Send WR to adapter */
358	err = vq_send_wr(c2dev, (union c2wr *) wr);
359	if (err) {
360		vq_req_put(c2dev, vq_req);
361		goto bail1;
362	}
363
364	/* Wait for reply from adapter */
365	err = vq_wait_for_reply(c2dev, vq_req);
366	if (err)
367		goto bail1;
368
369	/* Check that reply is present */
370	reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
371	if (!reply) {
372		err = -ENOMEM;
373		goto bail1;
374	}
375
376	err = c2_errno(reply);
377	vq_repbuf_free(c2dev, reply);
378
379	if (!err)
380		c2_set_qp_state(qp, C2_QP_STATE_RTS);
381 bail1:
382	kfree(wr);
383	vq_req_free(c2dev, vq_req);
384 bail0:
385	if (err) {
386		/*
387		 * If we fail, release reference on QP and
388		 * disassociate QP from CM_ID
389		 */
390		cm_id->provider_data = NULL;
391		qp->cm_id = NULL;
392		cm_id->rem_ref(cm_id);
393	}
394	return err;
395}
396
397int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
398{
399	struct c2_dev *c2dev;
400	struct c2wr_cr_reject_req wr;
401	struct c2_vq_req *vq_req;
402	struct c2wr_cr_reject_rep *reply;
403	int err;
404
405	c2dev = to_c2dev(cm_id->device);
406
407	/*
408	 * Allocate verbs request.
409	 */
410	vq_req = vq_req_alloc(c2dev);
411	if (!vq_req)
412		return -ENOMEM;
413
414	/*
415	 * Build the WR
416	 */
417	c2_wr_set_id(&wr, CCWR_CR_REJECT);
418	wr.hdr.context = (unsigned long) vq_req;
419	wr.rnic_handle = c2dev->adapter_handle;
420	wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
421
422	/*
423	 * reference the request struct.  dereferenced in the int handler.
424	 */
425	vq_req_get(c2dev, vq_req);
426
427	/*
428	 * Send WR to adapter
429	 */
430	err = vq_send_wr(c2dev, (union c2wr *) & wr);
431	if (err) {
432		vq_req_put(c2dev, vq_req);
433		goto bail0;
434	}
435
436	/*
437	 * Wait for reply from adapter
438	 */
439	err = vq_wait_for_reply(c2dev, vq_req);
440	if (err)
441		goto bail0;
442
443	/*
444	 * Process reply
445	 */
446	reply = (struct c2wr_cr_reject_rep *) (unsigned long)
447		vq_req->reply_msg;
448	if (!reply) {
449		err = -ENOMEM;
450		goto bail0;
451	}
452	err = c2_errno(reply);
453	/*
454	 * free vq stuff
455	 */
456	vq_repbuf_free(c2dev, reply);
457
458 bail0:
459	vq_req_free(c2dev, vq_req);
460	return err;
461}
462