1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/o2iblnd/o2iblnd.c
37  *
38  * Author: Eric Barton <eric@bartonsoftware.com>
39  */
40 
41 #include "o2iblnd.h"
42 #include <asm/div64.h>
43 
44 static lnd_t the_o2iblnd = {
45 	.lnd_type       = O2IBLND,
46 	.lnd_startup    = kiblnd_startup,
47 	.lnd_shutdown   = kiblnd_shutdown,
48 	.lnd_ctl	= kiblnd_ctl,
49 	.lnd_query      = kiblnd_query,
50 	.lnd_send       = kiblnd_send,
51 	.lnd_recv       = kiblnd_recv,
52 };
53 
54 kib_data_t	      kiblnd_data;
55 
kiblnd_cksum(void * ptr,int nob)56 static __u32 kiblnd_cksum(void *ptr, int nob)
57 {
58 	char  *c  = ptr;
59 	__u32  sum = 0;
60 
61 	while (nob-- > 0)
62 		sum = ((sum << 1) | (sum >> 31)) + *c++;
63 
64 	/* ensure I don't return 0 (== no checksum) */
65 	return (sum == 0) ? 1 : sum;
66 }
67 
kiblnd_msgtype2str(int type)68 static char *kiblnd_msgtype2str(int type)
69 {
70 	switch (type) {
71 	case IBLND_MSG_CONNREQ:
72 		return "CONNREQ";
73 
74 	case IBLND_MSG_CONNACK:
75 		return "CONNACK";
76 
77 	case IBLND_MSG_NOOP:
78 		return "NOOP";
79 
80 	case IBLND_MSG_IMMEDIATE:
81 		return "IMMEDIATE";
82 
83 	case IBLND_MSG_PUT_REQ:
84 		return "PUT_REQ";
85 
86 	case IBLND_MSG_PUT_NAK:
87 		return "PUT_NAK";
88 
89 	case IBLND_MSG_PUT_ACK:
90 		return "PUT_ACK";
91 
92 	case IBLND_MSG_PUT_DONE:
93 		return "PUT_DONE";
94 
95 	case IBLND_MSG_GET_REQ:
96 		return "GET_REQ";
97 
98 	case IBLND_MSG_GET_DONE:
99 		return "GET_DONE";
100 
101 	default:
102 		return "???";
103 	}
104 }
105 
kiblnd_msgtype2size(int type)106 static int kiblnd_msgtype2size(int type)
107 {
108 	const int hdr_size = offsetof(kib_msg_t, ibm_u);
109 
110 	switch (type) {
111 	case IBLND_MSG_CONNREQ:
112 	case IBLND_MSG_CONNACK:
113 		return hdr_size + sizeof(kib_connparams_t);
114 
115 	case IBLND_MSG_NOOP:
116 		return hdr_size;
117 
118 	case IBLND_MSG_IMMEDIATE:
119 		return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
120 
121 	case IBLND_MSG_PUT_REQ:
122 		return hdr_size + sizeof(kib_putreq_msg_t);
123 
124 	case IBLND_MSG_PUT_ACK:
125 		return hdr_size + sizeof(kib_putack_msg_t);
126 
127 	case IBLND_MSG_GET_REQ:
128 		return hdr_size + sizeof(kib_get_msg_t);
129 
130 	case IBLND_MSG_PUT_NAK:
131 	case IBLND_MSG_PUT_DONE:
132 	case IBLND_MSG_GET_DONE:
133 		return hdr_size + sizeof(kib_completion_msg_t);
134 	default:
135 		return -1;
136 	}
137 }
138 
kiblnd_unpack_rd(kib_msg_t * msg,int flip)139 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
140 {
141 	kib_rdma_desc_t   *rd;
142 	int		nob;
143 	int		n;
144 	int		i;
145 
146 	LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
147 		 msg->ibm_type == IBLND_MSG_PUT_ACK);
148 
149 	rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
150 			      &msg->ibm_u.get.ibgm_rd :
151 			      &msg->ibm_u.putack.ibpam_rd;
152 
153 	if (flip) {
154 		__swab32s(&rd->rd_key);
155 		__swab32s(&rd->rd_nfrags);
156 	}
157 
158 	n = rd->rd_nfrags;
159 
160 	if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
161 		CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
162 		       n, IBLND_MAX_RDMA_FRAGS);
163 		return 1;
164 	}
165 
166 	nob = offsetof(kib_msg_t, ibm_u) +
167 	      kiblnd_rd_msg_size(rd, msg->ibm_type, n);
168 
169 	if (msg->ibm_nob < nob) {
170 		CERROR("Short %s: %d(%d)\n",
171 		       kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
172 		return 1;
173 	}
174 
175 	if (!flip)
176 		return 0;
177 
178 	for (i = 0; i < n; i++) {
179 		__swab32s(&rd->rd_frags[i].rf_nob);
180 		__swab64s(&rd->rd_frags[i].rf_addr);
181 	}
182 
183 	return 0;
184 }
185 
kiblnd_pack_msg(lnet_ni_t * ni,kib_msg_t * msg,int version,int credits,lnet_nid_t dstnid,__u64 dststamp)186 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
187 		     int credits, lnet_nid_t dstnid, __u64 dststamp)
188 {
189 	kib_net_t *net = ni->ni_data;
190 
191 	/* CAVEAT EMPTOR! all message fields not set here should have been
192 	 * initialised previously. */
193 	msg->ibm_magic    = IBLND_MSG_MAGIC;
194 	msg->ibm_version  = version;
195 	/*   ibm_type */
196 	msg->ibm_credits  = credits;
197 	/*   ibm_nob */
198 	msg->ibm_cksum    = 0;
199 	msg->ibm_srcnid   = ni->ni_nid;
200 	msg->ibm_srcstamp = net->ibn_incarnation;
201 	msg->ibm_dstnid   = dstnid;
202 	msg->ibm_dststamp = dststamp;
203 
204 	if (*kiblnd_tunables.kib_cksum) {
205 		/* NB ibm_cksum zero while computing cksum */
206 		msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
207 	}
208 }
209 
kiblnd_unpack_msg(kib_msg_t * msg,int nob)210 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
211 {
212 	const int hdr_size = offsetof(kib_msg_t, ibm_u);
213 	__u32     msg_cksum;
214 	__u16     version;
215 	int       msg_nob;
216 	int       flip;
217 
218 	/* 6 bytes are enough to have received magic + version */
219 	if (nob < 6) {
220 		CERROR("Short message: %d\n", nob);
221 		return -EPROTO;
222 	}
223 
224 	if (msg->ibm_magic == IBLND_MSG_MAGIC) {
225 		flip = 0;
226 	} else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
227 		flip = 1;
228 	} else {
229 		CERROR("Bad magic: %08x\n", msg->ibm_magic);
230 		return -EPROTO;
231 	}
232 
233 	version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
234 	if (version != IBLND_MSG_VERSION &&
235 	    version != IBLND_MSG_VERSION_1) {
236 		CERROR("Bad version: %x\n", version);
237 		return -EPROTO;
238 	}
239 
240 	if (nob < hdr_size) {
241 		CERROR("Short message: %d\n", nob);
242 		return -EPROTO;
243 	}
244 
245 	msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
246 	if (msg_nob > nob) {
247 		CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
248 		return -EPROTO;
249 	}
250 
251 	/* checksum must be computed with ibm_cksum zero and BEFORE anything
252 	 * gets flipped */
253 	msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
254 	msg->ibm_cksum = 0;
255 	if (msg_cksum != 0 &&
256 	    msg_cksum != kiblnd_cksum(msg, msg_nob)) {
257 		CERROR("Bad checksum\n");
258 		return -EPROTO;
259 	}
260 
261 	msg->ibm_cksum = msg_cksum;
262 
263 	if (flip) {
264 		/* leave magic unflipped as a clue to peer endianness */
265 		msg->ibm_version = version;
266 		CLASSERT(sizeof(msg->ibm_type) == 1);
267 		CLASSERT(sizeof(msg->ibm_credits) == 1);
268 		msg->ibm_nob     = msg_nob;
269 		__swab64s(&msg->ibm_srcnid);
270 		__swab64s(&msg->ibm_srcstamp);
271 		__swab64s(&msg->ibm_dstnid);
272 		__swab64s(&msg->ibm_dststamp);
273 	}
274 
275 	if (msg->ibm_srcnid == LNET_NID_ANY) {
276 		CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
277 		return -EPROTO;
278 	}
279 
280 	if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
281 		CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
282 		       msg_nob, kiblnd_msgtype2size(msg->ibm_type));
283 		return -EPROTO;
284 	}
285 
286 	switch (msg->ibm_type) {
287 	default:
288 		CERROR("Unknown message type %x\n", msg->ibm_type);
289 		return -EPROTO;
290 
291 	case IBLND_MSG_NOOP:
292 	case IBLND_MSG_IMMEDIATE:
293 	case IBLND_MSG_PUT_REQ:
294 		break;
295 
296 	case IBLND_MSG_PUT_ACK:
297 	case IBLND_MSG_GET_REQ:
298 		if (kiblnd_unpack_rd(msg, flip))
299 			return -EPROTO;
300 		break;
301 
302 	case IBLND_MSG_PUT_NAK:
303 	case IBLND_MSG_PUT_DONE:
304 	case IBLND_MSG_GET_DONE:
305 		if (flip)
306 			__swab32s(&msg->ibm_u.completion.ibcm_status);
307 		break;
308 
309 	case IBLND_MSG_CONNREQ:
310 	case IBLND_MSG_CONNACK:
311 		if (flip) {
312 			__swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
313 			__swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
314 			__swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
315 		}
316 		break;
317 	}
318 	return 0;
319 }
320 
kiblnd_create_peer(lnet_ni_t * ni,kib_peer_t ** peerp,lnet_nid_t nid)321 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
322 {
323 	kib_peer_t	*peer;
324 	kib_net_t	*net = ni->ni_data;
325 	int		cpt = lnet_cpt_of_nid(nid);
326 	unsigned long   flags;
327 
328 	LASSERT(net != NULL);
329 	LASSERT(nid != LNET_NID_ANY);
330 
331 	LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
332 	if (peer == NULL) {
333 		CERROR("Cannot allocate peer\n");
334 		return -ENOMEM;
335 	}
336 
337 	memset(peer, 0, sizeof(*peer));	 /* zero flags etc */
338 
339 	peer->ibp_ni = ni;
340 	peer->ibp_nid = nid;
341 	peer->ibp_error = 0;
342 	peer->ibp_last_alive = 0;
343 	atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
344 
345 	INIT_LIST_HEAD(&peer->ibp_list);     /* not in the peer table yet */
346 	INIT_LIST_HEAD(&peer->ibp_conns);
347 	INIT_LIST_HEAD(&peer->ibp_tx_queue);
348 
349 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
350 
351 	/* always called with a ref on ni, which prevents ni being shutdown */
352 	LASSERT(net->ibn_shutdown == 0);
353 
354 	/* npeers only grows with the global lock held */
355 	atomic_inc(&net->ibn_npeers);
356 
357 	write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
358 
359 	*peerp = peer;
360 	return 0;
361 }
362 
kiblnd_destroy_peer(kib_peer_t * peer)363 void kiblnd_destroy_peer(kib_peer_t *peer)
364 {
365 	kib_net_t *net = peer->ibp_ni->ni_data;
366 
367 	LASSERT(net != NULL);
368 	LASSERT(atomic_read(&peer->ibp_refcount) == 0);
369 	LASSERT(!kiblnd_peer_active(peer));
370 	LASSERT(peer->ibp_connecting == 0);
371 	LASSERT(peer->ibp_accepting == 0);
372 	LASSERT(list_empty(&peer->ibp_conns));
373 	LASSERT(list_empty(&peer->ibp_tx_queue));
374 
375 	LIBCFS_FREE(peer, sizeof(*peer));
376 
377 	/* NB a peer's connections keep a reference on their peer until
378 	 * they are destroyed, so we can be assured that _all_ state to do
379 	 * with this peer has been cleaned up when its refcount drops to
380 	 * zero. */
381 	atomic_dec(&net->ibn_npeers);
382 }
383 
kiblnd_find_peer_locked(lnet_nid_t nid)384 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
385 {
386 	/* the caller is responsible for accounting the additional reference
387 	 * that this creates */
388 	struct list_head       *peer_list = kiblnd_nid2peerlist(nid);
389 	struct list_head       *tmp;
390 	kib_peer_t       *peer;
391 
392 	list_for_each(tmp, peer_list) {
393 
394 		peer = list_entry(tmp, kib_peer_t, ibp_list);
395 
396 		LASSERT(peer->ibp_connecting > 0 || /* creating conns */
397 			 peer->ibp_accepting > 0 ||
398 			 !list_empty(&peer->ibp_conns));  /* active conn */
399 
400 		if (peer->ibp_nid != nid)
401 			continue;
402 
403 		CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
404 		       peer, libcfs_nid2str(nid),
405 		       atomic_read(&peer->ibp_refcount),
406 		       peer->ibp_version);
407 		return peer;
408 	}
409 	return NULL;
410 }
411 
kiblnd_unlink_peer_locked(kib_peer_t * peer)412 void kiblnd_unlink_peer_locked(kib_peer_t *peer)
413 {
414 	LASSERT(list_empty(&peer->ibp_conns));
415 
416 	LASSERT(kiblnd_peer_active(peer));
417 	list_del_init(&peer->ibp_list);
418 	/* lose peerlist's ref */
419 	kiblnd_peer_decref(peer);
420 }
421 
kiblnd_get_peer_info(lnet_ni_t * ni,int index,lnet_nid_t * nidp,int * count)422 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
423 				lnet_nid_t *nidp, int *count)
424 {
425 	kib_peer_t	    *peer;
426 	struct list_head	    *ptmp;
427 	int		    i;
428 	unsigned long	  flags;
429 
430 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
431 
432 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
433 
434 		list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
435 
436 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
437 			LASSERT(peer->ibp_connecting > 0 ||
438 				 peer->ibp_accepting > 0 ||
439 				 !list_empty(&peer->ibp_conns));
440 
441 			if (peer->ibp_ni != ni)
442 				continue;
443 
444 			if (index-- > 0)
445 				continue;
446 
447 			*nidp = peer->ibp_nid;
448 			*count = atomic_read(&peer->ibp_refcount);
449 
450 			read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
451 					       flags);
452 			return 0;
453 		}
454 	}
455 
456 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
457 	return -ENOENT;
458 }
459 
kiblnd_del_peer_locked(kib_peer_t * peer)460 static void kiblnd_del_peer_locked(kib_peer_t *peer)
461 {
462 	struct list_head	   *ctmp;
463 	struct list_head	   *cnxt;
464 	kib_conn_t	   *conn;
465 
466 	if (list_empty(&peer->ibp_conns)) {
467 		kiblnd_unlink_peer_locked(peer);
468 	} else {
469 		list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
470 			conn = list_entry(ctmp, kib_conn_t, ibc_list);
471 
472 			kiblnd_close_conn_locked(conn, 0);
473 		}
474 		/* NB closing peer's last conn unlinked it. */
475 	}
476 	/* NB peer now unlinked; might even be freed if the peer table had the
477 	 * last ref on it. */
478 }
479 
kiblnd_del_peer(lnet_ni_t * ni,lnet_nid_t nid)480 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
481 {
482 	LIST_HEAD(zombies);
483 	struct list_head	    *ptmp;
484 	struct list_head	    *pnxt;
485 	kib_peer_t	    *peer;
486 	int		    lo;
487 	int		    hi;
488 	int		    i;
489 	unsigned long	  flags;
490 	int		    rc = -ENOENT;
491 
492 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
493 
494 	if (nid != LNET_NID_ANY) {
495 		lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
496 	} else {
497 		lo = 0;
498 		hi = kiblnd_data.kib_peer_hash_size - 1;
499 	}
500 
501 	for (i = lo; i <= hi; i++) {
502 		list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
503 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
504 			LASSERT(peer->ibp_connecting > 0 ||
505 				 peer->ibp_accepting > 0 ||
506 				 !list_empty(&peer->ibp_conns));
507 
508 			if (peer->ibp_ni != ni)
509 				continue;
510 
511 			if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
512 				continue;
513 
514 			if (!list_empty(&peer->ibp_tx_queue)) {
515 				LASSERT(list_empty(&peer->ibp_conns));
516 
517 				list_splice_init(&peer->ibp_tx_queue,
518 						     &zombies);
519 			}
520 
521 			kiblnd_del_peer_locked(peer);
522 			rc = 0;	 /* matched something */
523 		}
524 	}
525 
526 	write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
527 
528 	kiblnd_txlist_done(ni, &zombies, -EIO);
529 
530 	return rc;
531 }
532 
kiblnd_get_conn_by_idx(lnet_ni_t * ni,int index)533 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
534 {
535 	kib_peer_t	    *peer;
536 	struct list_head	    *ptmp;
537 	kib_conn_t	    *conn;
538 	struct list_head	    *ctmp;
539 	int		    i;
540 	unsigned long	  flags;
541 
542 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
543 
544 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
545 		list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
546 
547 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
548 			LASSERT(peer->ibp_connecting > 0 ||
549 				 peer->ibp_accepting > 0 ||
550 				 !list_empty(&peer->ibp_conns));
551 
552 			if (peer->ibp_ni != ni)
553 				continue;
554 
555 			list_for_each(ctmp, &peer->ibp_conns) {
556 				if (index-- > 0)
557 					continue;
558 
559 				conn = list_entry(ctmp, kib_conn_t,
560 						      ibc_list);
561 				kiblnd_conn_addref(conn);
562 				read_unlock_irqrestore(
563 					&kiblnd_data.kib_global_lock,
564 					flags);
565 				return conn;
566 			}
567 		}
568 	}
569 
570 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
571 	return NULL;
572 }
573 
kiblnd_translate_mtu(int value)574 int kiblnd_translate_mtu(int value)
575 {
576 	switch (value) {
577 	default:
578 		return -1;
579 	case 0:
580 		return 0;
581 	case 256:
582 		return IB_MTU_256;
583 	case 512:
584 		return IB_MTU_512;
585 	case 1024:
586 		return IB_MTU_1024;
587 	case 2048:
588 		return IB_MTU_2048;
589 	case 4096:
590 		return IB_MTU_4096;
591 	}
592 }
593 
kiblnd_setup_mtu_locked(struct rdma_cm_id * cmid)594 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
595 {
596 	int	   mtu;
597 
598 	/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
599 	if (cmid->route.path_rec == NULL)
600 		return;
601 
602 	mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
603 	LASSERT(mtu >= 0);
604 	if (mtu != 0)
605 		cmid->route.path_rec->mtu = mtu;
606 }
607 
kiblnd_get_completion_vector(kib_conn_t * conn,int cpt)608 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
609 {
610 	cpumask_t	*mask;
611 	int		vectors;
612 	int		off;
613 	int		i;
614 	lnet_nid_t	nid = conn->ibc_peer->ibp_nid;
615 
616 	vectors = conn->ibc_cmid->device->num_comp_vectors;
617 	if (vectors <= 1)
618 		return 0;
619 
620 	mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
621 	if (mask == NULL)
622 		return 0;
623 
624 	/* hash NID to CPU id in this partition... */
625 	off = do_div(nid, cpumask_weight(mask));
626 	for_each_cpu(i, mask) {
627 		if (off-- == 0)
628 			return i % vectors;
629 	}
630 
631 	LBUG();
632 	return 1;
633 }
634 
kiblnd_create_conn(kib_peer_t * peer,struct rdma_cm_id * cmid,int state,int version)635 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
636 				int state, int version)
637 {
638 	/* CAVEAT EMPTOR:
639 	 * If the new conn is created successfully it takes over the caller's
640 	 * ref on 'peer'.  It also "owns" 'cmid' and destroys it when it itself
641 	 * is destroyed.  On failure, the caller's ref on 'peer' remains and
642 	 * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
643 	 * to destroy 'cmid' here since I'm called from the CM which still has
644 	 * its ref on 'cmid'). */
645 	rwlock_t		*glock = &kiblnd_data.kib_global_lock;
646 	kib_net_t	      *net = peer->ibp_ni->ni_data;
647 	kib_dev_t	      *dev;
648 	struct ib_qp_init_attr *init_qp_attr;
649 	struct kib_sched_info	*sched;
650 	kib_conn_t		*conn;
651 	struct ib_cq		*cq;
652 	unsigned long		flags;
653 	int			cpt;
654 	int			rc;
655 	int			i;
656 
657 	LASSERT(net != NULL);
658 	LASSERT(!in_interrupt());
659 
660 	dev = net->ibn_dev;
661 
662 	cpt = lnet_cpt_of_nid(peer->ibp_nid);
663 	sched = kiblnd_data.kib_scheds[cpt];
664 
665 	LASSERT(sched->ibs_nthreads > 0);
666 
667 	LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
668 			 sizeof(*init_qp_attr));
669 	if (init_qp_attr == NULL) {
670 		CERROR("Can't allocate qp_attr for %s\n",
671 		       libcfs_nid2str(peer->ibp_nid));
672 		goto failed_0;
673 	}
674 
675 	LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
676 	if (conn == NULL) {
677 		CERROR("Can't allocate connection for %s\n",
678 		       libcfs_nid2str(peer->ibp_nid));
679 		goto failed_1;
680 	}
681 
682 	conn->ibc_state = IBLND_CONN_INIT;
683 	conn->ibc_version = version;
684 	conn->ibc_peer = peer;		  /* I take the caller's ref */
685 	cmid->context = conn;		   /* for future CM callbacks */
686 	conn->ibc_cmid = cmid;
687 
688 	INIT_LIST_HEAD(&conn->ibc_early_rxs);
689 	INIT_LIST_HEAD(&conn->ibc_tx_noops);
690 	INIT_LIST_HEAD(&conn->ibc_tx_queue);
691 	INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
692 	INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
693 	INIT_LIST_HEAD(&conn->ibc_active_txs);
694 	spin_lock_init(&conn->ibc_lock);
695 
696 	LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
697 			 sizeof(*conn->ibc_connvars));
698 	if (conn->ibc_connvars == NULL) {
699 		CERROR("Can't allocate in-progress connection state\n");
700 		goto failed_2;
701 	}
702 
703 	write_lock_irqsave(glock, flags);
704 	if (dev->ibd_failover) {
705 		write_unlock_irqrestore(glock, flags);
706 		CERROR("%s: failover in progress\n", dev->ibd_ifname);
707 		goto failed_2;
708 	}
709 
710 	if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
711 		/* wakeup failover thread and teardown connection */
712 		if (kiblnd_dev_can_failover(dev)) {
713 			list_add_tail(&dev->ibd_fail_list,
714 				      &kiblnd_data.kib_failed_devs);
715 			wake_up(&kiblnd_data.kib_failover_waitq);
716 		}
717 
718 		write_unlock_irqrestore(glock, flags);
719 		CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
720 		       cmid->device->name, dev->ibd_ifname);
721 		goto failed_2;
722 	}
723 
724 	kiblnd_hdev_addref_locked(dev->ibd_hdev);
725 	conn->ibc_hdev = dev->ibd_hdev;
726 
727 	kiblnd_setup_mtu_locked(cmid);
728 
729 	write_unlock_irqrestore(glock, flags);
730 
731 	LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
732 			 IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
733 	if (conn->ibc_rxs == NULL) {
734 		CERROR("Cannot allocate RX buffers\n");
735 		goto failed_2;
736 	}
737 
738 	rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
739 				IBLND_RX_MSG_PAGES(version));
740 	if (rc != 0)
741 		goto failed_2;
742 
743 	kiblnd_map_rx_descs(conn);
744 
745 	cq = ib_create_cq(cmid->device,
746 			  kiblnd_cq_completion, kiblnd_cq_event, conn,
747 			  IBLND_CQ_ENTRIES(version),
748 			  kiblnd_get_completion_vector(conn, cpt));
749 	if (IS_ERR(cq)) {
750 		CERROR("Can't create CQ: %ld, cqe: %d\n",
751 		       PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
752 		goto failed_2;
753 	}
754 
755 	conn->ibc_cq = cq;
756 
757 	rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
758 	if (rc != 0) {
759 		CERROR("Can't request completion notificiation: %d\n", rc);
760 		goto failed_2;
761 	}
762 
763 	init_qp_attr->event_handler = kiblnd_qp_event;
764 	init_qp_attr->qp_context = conn;
765 	init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
766 	init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
767 	init_qp_attr->cap.max_send_sge = 1;
768 	init_qp_attr->cap.max_recv_sge = 1;
769 	init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
770 	init_qp_attr->qp_type = IB_QPT_RC;
771 	init_qp_attr->send_cq = cq;
772 	init_qp_attr->recv_cq = cq;
773 
774 	conn->ibc_sched = sched;
775 
776 	rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
777 	if (rc != 0) {
778 		CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
779 		       rc, init_qp_attr->cap.max_send_wr,
780 		       init_qp_attr->cap.max_recv_wr);
781 		goto failed_2;
782 	}
783 
784 	LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
785 
786 	/* 1 ref for caller and each rxmsg */
787 	atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
788 	conn->ibc_nrx = IBLND_RX_MSGS(version);
789 
790 	/* post receives */
791 	for (i = 0; i < IBLND_RX_MSGS(version); i++) {
792 		rc = kiblnd_post_rx(&conn->ibc_rxs[i],
793 				    IBLND_POSTRX_NO_CREDIT);
794 		if (rc != 0) {
795 			CERROR("Can't post rxmsg: %d\n", rc);
796 
797 			/* Make posted receives complete */
798 			kiblnd_abort_receives(conn);
799 
800 			/* correct # of posted buffers
801 			 * NB locking needed now I'm racing with completion */
802 			spin_lock_irqsave(&sched->ibs_lock, flags);
803 			conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
804 			spin_unlock_irqrestore(&sched->ibs_lock, flags);
805 
806 			/* cmid will be destroyed by CM(ofed) after cm_callback
807 			 * returned, so we can't refer it anymore
808 			 * (by kiblnd_connd()->kiblnd_destroy_conn) */
809 			rdma_destroy_qp(conn->ibc_cmid);
810 			conn->ibc_cmid = NULL;
811 
812 			/* Drop my own and unused rxbuffer refcounts */
813 			while (i++ <= IBLND_RX_MSGS(version))
814 				kiblnd_conn_decref(conn);
815 
816 			return NULL;
817 		}
818 	}
819 
820 	/* Init successful! */
821 	LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
822 		 state == IBLND_CONN_PASSIVE_WAIT);
823 	conn->ibc_state = state;
824 
825 	/* 1 more conn */
826 	atomic_inc(&net->ibn_nconns);
827 	return conn;
828 
829  failed_2:
830 	kiblnd_destroy_conn(conn);
831  failed_1:
832 	LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
833  failed_0:
834 	return NULL;
835 }
836 
kiblnd_destroy_conn(kib_conn_t * conn)837 void kiblnd_destroy_conn(kib_conn_t *conn)
838 {
839 	struct rdma_cm_id *cmid = conn->ibc_cmid;
840 	kib_peer_t	*peer = conn->ibc_peer;
841 	int		rc;
842 
843 	LASSERT(!in_interrupt());
844 	LASSERT(atomic_read(&conn->ibc_refcount) == 0);
845 	LASSERT(list_empty(&conn->ibc_early_rxs));
846 	LASSERT(list_empty(&conn->ibc_tx_noops));
847 	LASSERT(list_empty(&conn->ibc_tx_queue));
848 	LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
849 	LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
850 	LASSERT(list_empty(&conn->ibc_active_txs));
851 	LASSERT(conn->ibc_noops_posted == 0);
852 	LASSERT(conn->ibc_nsends_posted == 0);
853 
854 	switch (conn->ibc_state) {
855 	default:
856 		/* conn must be completely disengaged from the network */
857 		LBUG();
858 
859 	case IBLND_CONN_DISCONNECTED:
860 		/* connvars should have been freed already */
861 		LASSERT(conn->ibc_connvars == NULL);
862 		break;
863 
864 	case IBLND_CONN_INIT:
865 		break;
866 	}
867 
868 	/* conn->ibc_cmid might be destroyed by CM already */
869 	if (cmid != NULL && cmid->qp != NULL)
870 		rdma_destroy_qp(cmid);
871 
872 	if (conn->ibc_cq != NULL) {
873 		rc = ib_destroy_cq(conn->ibc_cq);
874 		if (rc != 0)
875 			CWARN("Error destroying CQ: %d\n", rc);
876 	}
877 
878 	if (conn->ibc_rx_pages != NULL)
879 		kiblnd_unmap_rx_descs(conn);
880 
881 	if (conn->ibc_rxs != NULL) {
882 		LIBCFS_FREE(conn->ibc_rxs,
883 			    IBLND_RX_MSGS(conn->ibc_version)
884 			      * sizeof(kib_rx_t));
885 	}
886 
887 	if (conn->ibc_connvars != NULL)
888 		LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
889 
890 	if (conn->ibc_hdev != NULL)
891 		kiblnd_hdev_decref(conn->ibc_hdev);
892 
893 	/* See CAVEAT EMPTOR above in kiblnd_create_conn */
894 	if (conn->ibc_state != IBLND_CONN_INIT) {
895 		kib_net_t *net = peer->ibp_ni->ni_data;
896 
897 		kiblnd_peer_decref(peer);
898 		rdma_destroy_id(cmid);
899 		atomic_dec(&net->ibn_nconns);
900 	}
901 
902 	LIBCFS_FREE(conn, sizeof(*conn));
903 }
904 
kiblnd_close_peer_conns_locked(kib_peer_t * peer,int why)905 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
906 {
907 	kib_conn_t	     *conn;
908 	struct list_head	     *ctmp;
909 	struct list_head	     *cnxt;
910 	int		     count = 0;
911 
912 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
913 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
914 
915 		CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
916 		       libcfs_nid2str(peer->ibp_nid),
917 		       conn->ibc_version, why);
918 
919 		kiblnd_close_conn_locked(conn, why);
920 		count++;
921 	}
922 
923 	return count;
924 }
925 
kiblnd_close_stale_conns_locked(kib_peer_t * peer,int version,__u64 incarnation)926 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
927 				     int version, __u64 incarnation)
928 {
929 	kib_conn_t	     *conn;
930 	struct list_head	     *ctmp;
931 	struct list_head	     *cnxt;
932 	int		     count = 0;
933 
934 	list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
935 		conn = list_entry(ctmp, kib_conn_t, ibc_list);
936 
937 		if (conn->ibc_version     == version &&
938 		    conn->ibc_incarnation == incarnation)
939 			continue;
940 
941 		CDEBUG(D_NET,
942 		       "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
943 		       libcfs_nid2str(peer->ibp_nid),
944 		       conn->ibc_version, conn->ibc_incarnation,
945 		       version, incarnation);
946 
947 		kiblnd_close_conn_locked(conn, -ESTALE);
948 		count++;
949 	}
950 
951 	return count;
952 }
953 
kiblnd_close_matching_conns(lnet_ni_t * ni,lnet_nid_t nid)954 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
955 {
956 	kib_peer_t	     *peer;
957 	struct list_head	     *ptmp;
958 	struct list_head	     *pnxt;
959 	int		     lo;
960 	int		     hi;
961 	int		     i;
962 	unsigned long	   flags;
963 	int		     count = 0;
964 
965 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
966 
967 	if (nid != LNET_NID_ANY)
968 		lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
969 	else {
970 		lo = 0;
971 		hi = kiblnd_data.kib_peer_hash_size - 1;
972 	}
973 
974 	for (i = lo; i <= hi; i++) {
975 		list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
976 
977 			peer = list_entry(ptmp, kib_peer_t, ibp_list);
978 			LASSERT(peer->ibp_connecting > 0 ||
979 				 peer->ibp_accepting > 0 ||
980 				 !list_empty(&peer->ibp_conns));
981 
982 			if (peer->ibp_ni != ni)
983 				continue;
984 
985 			if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
986 				continue;
987 
988 			count += kiblnd_close_peer_conns_locked(peer, 0);
989 		}
990 	}
991 
992 	write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
993 
994 	/* wildcards always succeed */
995 	if (nid == LNET_NID_ANY)
996 		return 0;
997 
998 	return (count == 0) ? -ENOENT : 0;
999 }
1000 
kiblnd_ctl(lnet_ni_t * ni,unsigned int cmd,void * arg)1001 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1002 {
1003 	struct libcfs_ioctl_data *data = arg;
1004 	int		       rc = -EINVAL;
1005 
1006 	switch (cmd) {
1007 	case IOC_LIBCFS_GET_PEER: {
1008 		lnet_nid_t   nid = 0;
1009 		int	  count = 0;
1010 
1011 		rc = kiblnd_get_peer_info(ni, data->ioc_count,
1012 					  &nid, &count);
1013 		data->ioc_nid    = nid;
1014 		data->ioc_count  = count;
1015 		break;
1016 	}
1017 
1018 	case IOC_LIBCFS_DEL_PEER: {
1019 		rc = kiblnd_del_peer(ni, data->ioc_nid);
1020 		break;
1021 	}
1022 	case IOC_LIBCFS_GET_CONN: {
1023 		kib_conn_t *conn;
1024 
1025 		rc = 0;
1026 		conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1027 		if (conn == NULL) {
1028 			rc = -ENOENT;
1029 			break;
1030 		}
1031 
1032 		LASSERT(conn->ibc_cmid != NULL);
1033 		data->ioc_nid = conn->ibc_peer->ibp_nid;
1034 		if (conn->ibc_cmid->route.path_rec == NULL)
1035 			data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1036 		else
1037 			data->ioc_u32[0] =
1038 			ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1039 		kiblnd_conn_decref(conn);
1040 		break;
1041 	}
1042 	case IOC_LIBCFS_CLOSE_CONNECTION: {
1043 		rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1044 		break;
1045 	}
1046 
1047 	default:
1048 		break;
1049 	}
1050 
1051 	return rc;
1052 }
1053 
kiblnd_query(lnet_ni_t * ni,lnet_nid_t nid,unsigned long * when)1054 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1055 {
1056 	unsigned long	last_alive = 0;
1057 	unsigned long	now = cfs_time_current();
1058 	rwlock_t	*glock = &kiblnd_data.kib_global_lock;
1059 	kib_peer_t	*peer;
1060 	unsigned long	flags;
1061 
1062 	read_lock_irqsave(glock, flags);
1063 
1064 	peer = kiblnd_find_peer_locked(nid);
1065 	if (peer != NULL) {
1066 		LASSERT(peer->ibp_connecting > 0 || /* creating conns */
1067 			 peer->ibp_accepting > 0 ||
1068 			 !list_empty(&peer->ibp_conns));  /* active conn */
1069 		last_alive = peer->ibp_last_alive;
1070 	}
1071 
1072 	read_unlock_irqrestore(glock, flags);
1073 
1074 	if (last_alive != 0)
1075 		*when = last_alive;
1076 
1077 	/* peer is not persistent in hash, trigger peer creation
1078 	 * and connection establishment with a NULL tx */
1079 	if (peer == NULL)
1080 		kiblnd_launch_tx(ni, NULL, nid);
1081 
1082 	CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1083 	       libcfs_nid2str(nid), peer,
1084 	       last_alive ? cfs_duration_sec(now - last_alive) : -1);
1085 }
1086 
kiblnd_free_pages(kib_pages_t * p)1087 void kiblnd_free_pages(kib_pages_t *p)
1088 {
1089 	int	npages = p->ibp_npages;
1090 	int	i;
1091 
1092 	for (i = 0; i < npages; i++) {
1093 		if (p->ibp_pages[i] != NULL)
1094 			__free_page(p->ibp_pages[i]);
1095 	}
1096 
1097 	LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
1098 }
1099 
kiblnd_alloc_pages(kib_pages_t ** pp,int cpt,int npages)1100 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
1101 {
1102 	kib_pages_t	*p;
1103 	int		i;
1104 
1105 	LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1106 			 offsetof(kib_pages_t, ibp_pages[npages]));
1107 	if (p == NULL) {
1108 		CERROR("Can't allocate descriptor for %d pages\n", npages);
1109 		return -ENOMEM;
1110 	}
1111 
1112 	memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1113 	p->ibp_npages = npages;
1114 
1115 	for (i = 0; i < npages; i++) {
1116 		p->ibp_pages[i] = alloc_pages_node(
1117 				    cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1118 				    GFP_NOFS, 0);
1119 		if (p->ibp_pages[i] == NULL) {
1120 			CERROR("Can't allocate page %d of %d\n", i, npages);
1121 			kiblnd_free_pages(p);
1122 			return -ENOMEM;
1123 		}
1124 	}
1125 
1126 	*pp = p;
1127 	return 0;
1128 }
1129 
kiblnd_unmap_rx_descs(kib_conn_t * conn)1130 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
1131 {
1132 	kib_rx_t *rx;
1133 	int       i;
1134 
1135 	LASSERT(conn->ibc_rxs != NULL);
1136 	LASSERT(conn->ibc_hdev != NULL);
1137 
1138 	for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1139 		rx = &conn->ibc_rxs[i];
1140 
1141 		LASSERT(rx->rx_nob >= 0); /* not posted */
1142 
1143 		kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1144 					KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1145 							  rx->rx_msgaddr),
1146 					IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1147 	}
1148 
1149 	kiblnd_free_pages(conn->ibc_rx_pages);
1150 
1151 	conn->ibc_rx_pages = NULL;
1152 }
1153 
kiblnd_map_rx_descs(kib_conn_t * conn)1154 void kiblnd_map_rx_descs(kib_conn_t *conn)
1155 {
1156 	kib_rx_t       *rx;
1157 	struct page    *pg;
1158 	int	     pg_off;
1159 	int	     ipg;
1160 	int	     i;
1161 
1162 	for (pg_off = ipg = i = 0;
1163 	     i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1164 		pg = conn->ibc_rx_pages->ibp_pages[ipg];
1165 		rx = &conn->ibc_rxs[i];
1166 
1167 		rx->rx_conn = conn;
1168 		rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
1169 
1170 		rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1171 						       rx->rx_msg,
1172 						       IBLND_MSG_SIZE,
1173 						       DMA_FROM_DEVICE);
1174 		LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1175 						   rx->rx_msgaddr));
1176 		KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1177 
1178 		CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1179 		       i, rx->rx_msg, rx->rx_msgaddr,
1180 		       lnet_page2phys(pg) + pg_off);
1181 
1182 		pg_off += IBLND_MSG_SIZE;
1183 		LASSERT(pg_off <= PAGE_SIZE);
1184 
1185 		if (pg_off == PAGE_SIZE) {
1186 			pg_off = 0;
1187 			ipg++;
1188 			LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
1189 		}
1190 	}
1191 }
1192 
kiblnd_unmap_tx_pool(kib_tx_pool_t * tpo)1193 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
1194 {
1195 	kib_hca_dev_t  *hdev = tpo->tpo_hdev;
1196 	kib_tx_t       *tx;
1197 	int	     i;
1198 
1199 	LASSERT(tpo->tpo_pool.po_allocated == 0);
1200 
1201 	if (hdev == NULL)
1202 		return;
1203 
1204 	for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1205 		tx = &tpo->tpo_tx_descs[i];
1206 		kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1207 					KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1208 							  tx->tx_msgaddr),
1209 					IBLND_MSG_SIZE, DMA_TO_DEVICE);
1210 	}
1211 
1212 	kiblnd_hdev_decref(hdev);
1213 	tpo->tpo_hdev = NULL;
1214 }
1215 
kiblnd_current_hdev(kib_dev_t * dev)1216 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
1217 {
1218 	kib_hca_dev_t *hdev;
1219 	unsigned long  flags;
1220 	int	    i = 0;
1221 
1222 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1223 	while (dev->ibd_failover) {
1224 		read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1225 		if (i++ % 50 == 0)
1226 			CDEBUG(D_NET, "%s: Wait for failover\n",
1227 			       dev->ibd_ifname);
1228 		schedule_timeout(cfs_time_seconds(1) / 100);
1229 
1230 		read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1231 	}
1232 
1233 	kiblnd_hdev_addref_locked(dev->ibd_hdev);
1234 	hdev = dev->ibd_hdev;
1235 
1236 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1237 
1238 	return hdev;
1239 }
1240 
kiblnd_map_tx_pool(kib_tx_pool_t * tpo)1241 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1242 {
1243 	kib_pages_t    *txpgs = tpo->tpo_tx_pages;
1244 	kib_pool_t     *pool  = &tpo->tpo_pool;
1245 	kib_net_t      *net   = pool->po_owner->ps_net;
1246 	kib_dev_t      *dev;
1247 	struct page    *page;
1248 	kib_tx_t       *tx;
1249 	int	     page_offset;
1250 	int	     ipage;
1251 	int	     i;
1252 
1253 	LASSERT(net != NULL);
1254 
1255 	dev = net->ibn_dev;
1256 
1257 	/* pre-mapped messages are not bigger than 1 page */
1258 	CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
1259 
1260 	/* No fancy arithmetic when we do the buffer calculations */
1261 	CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
1262 
1263 	tpo->tpo_hdev = kiblnd_current_hdev(dev);
1264 
1265 	for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1266 		page = txpgs->ibp_pages[ipage];
1267 		tx = &tpo->tpo_tx_descs[i];
1268 
1269 		tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1270 					   page_offset);
1271 
1272 		tx->tx_msgaddr = kiblnd_dma_map_single(
1273 			tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
1274 			IBLND_MSG_SIZE, DMA_TO_DEVICE);
1275 		LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1276 						   tx->tx_msgaddr));
1277 		KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1278 
1279 		list_add(&tx->tx_list, &pool->po_free_list);
1280 
1281 		page_offset += IBLND_MSG_SIZE;
1282 		LASSERT(page_offset <= PAGE_SIZE);
1283 
1284 		if (page_offset == PAGE_SIZE) {
1285 			page_offset = 0;
1286 			ipage++;
1287 			LASSERT(ipage <= txpgs->ibp_npages);
1288 		}
1289 	}
1290 }
1291 
kiblnd_find_dma_mr(kib_hca_dev_t * hdev,__u64 addr,__u64 size)1292 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
1293 {
1294 	__u64   index;
1295 
1296 	LASSERT(hdev->ibh_mrs[0] != NULL);
1297 
1298 	if (hdev->ibh_nmrs == 1)
1299 		return hdev->ibh_mrs[0];
1300 
1301 	index = addr >> hdev->ibh_mr_shift;
1302 
1303 	if (index <  hdev->ibh_nmrs &&
1304 	    index == ((addr + size - 1) >> hdev->ibh_mr_shift))
1305 		return hdev->ibh_mrs[index];
1306 
1307 	return NULL;
1308 }
1309 
kiblnd_find_rd_dma_mr(kib_hca_dev_t * hdev,kib_rdma_desc_t * rd)1310 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
1311 {
1312 	struct ib_mr *prev_mr;
1313 	struct ib_mr *mr;
1314 	int	   i;
1315 
1316 	LASSERT(hdev->ibh_mrs[0] != NULL);
1317 
1318 	if (*kiblnd_tunables.kib_map_on_demand > 0 &&
1319 	    *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
1320 		return NULL;
1321 
1322 	if (hdev->ibh_nmrs == 1)
1323 		return hdev->ibh_mrs[0];
1324 
1325 	for (i = 0, mr = prev_mr = NULL;
1326 	     i < rd->rd_nfrags; i++) {
1327 		mr = kiblnd_find_dma_mr(hdev,
1328 					rd->rd_frags[i].rf_addr,
1329 					rd->rd_frags[i].rf_nob);
1330 		if (prev_mr == NULL)
1331 			prev_mr = mr;
1332 
1333 		if (mr == NULL || prev_mr != mr) {
1334 			/* Can't covered by one single MR */
1335 			mr = NULL;
1336 			break;
1337 		}
1338 	}
1339 
1340 	return mr;
1341 }
1342 
kiblnd_destroy_fmr_pool(kib_fmr_pool_t * pool)1343 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
1344 {
1345 	LASSERT(pool->fpo_map_count == 0);
1346 
1347 	if (pool->fpo_fmr_pool != NULL)
1348 		ib_destroy_fmr_pool(pool->fpo_fmr_pool);
1349 
1350 	if (pool->fpo_hdev != NULL)
1351 		kiblnd_hdev_decref(pool->fpo_hdev);
1352 
1353 	LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
1354 }
1355 
kiblnd_destroy_fmr_pool_list(struct list_head * head)1356 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
1357 {
1358 	kib_fmr_pool_t *pool;
1359 
1360 	while (!list_empty(head)) {
1361 		pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
1362 		list_del(&pool->fpo_list);
1363 		kiblnd_destroy_fmr_pool(pool);
1364 	}
1365 }
1366 
kiblnd_fmr_pool_size(int ncpts)1367 static int kiblnd_fmr_pool_size(int ncpts)
1368 {
1369 	int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
1370 
1371 	return max(IBLND_FMR_POOL, size);
1372 }
1373 
kiblnd_fmr_flush_trigger(int ncpts)1374 static int kiblnd_fmr_flush_trigger(int ncpts)
1375 {
1376 	int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
1377 
1378 	return max(IBLND_FMR_POOL_FLUSH, size);
1379 }
1380 
kiblnd_create_fmr_pool(kib_fmr_poolset_t * fps,kib_fmr_pool_t ** pp_fpo)1381 static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1382 				  kib_fmr_pool_t **pp_fpo)
1383 {
1384 	/* FMR pool for RDMA */
1385 	kib_dev_t	       *dev = fps->fps_net->ibn_dev;
1386 	kib_fmr_pool_t	  *fpo;
1387 	struct ib_fmr_pool_param param = {
1388 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1389 		.page_shift	= PAGE_SHIFT,
1390 		.access	    = (IB_ACCESS_LOCAL_WRITE |
1391 				      IB_ACCESS_REMOTE_WRITE),
1392 		.pool_size	   = fps->fps_pool_size,
1393 		.dirty_watermark   = fps->fps_flush_trigger,
1394 		.flush_function    = NULL,
1395 		.flush_arg	 = NULL,
1396 		.cache	     = !!*kiblnd_tunables.kib_fmr_cache};
1397 	int rc;
1398 
1399 	LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1400 	if (fpo == NULL)
1401 		return -ENOMEM;
1402 
1403 	fpo->fpo_hdev = kiblnd_current_hdev(dev);
1404 
1405 	fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
1406 	if (IS_ERR(fpo->fpo_fmr_pool)) {
1407 		rc = PTR_ERR(fpo->fpo_fmr_pool);
1408 		CERROR("Failed to create FMR pool: %d\n", rc);
1409 
1410 		kiblnd_hdev_decref(fpo->fpo_hdev);
1411 		LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
1412 		return rc;
1413 	}
1414 
1415 	fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1416 	fpo->fpo_owner    = fps;
1417 	*pp_fpo = fpo;
1418 
1419 	return 0;
1420 }
1421 
kiblnd_fail_fmr_poolset(kib_fmr_poolset_t * fps,struct list_head * zombies)1422 static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
1423 				    struct list_head *zombies)
1424 {
1425 	if (fps->fps_net == NULL) /* intialized? */
1426 		return;
1427 
1428 	spin_lock(&fps->fps_lock);
1429 
1430 	while (!list_empty(&fps->fps_pool_list)) {
1431 		kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
1432 						 kib_fmr_pool_t, fpo_list);
1433 		fpo->fpo_failed = 1;
1434 		list_del(&fpo->fpo_list);
1435 		if (fpo->fpo_map_count == 0)
1436 			list_add(&fpo->fpo_list, zombies);
1437 		else
1438 			list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1439 	}
1440 
1441 	spin_unlock(&fps->fps_lock);
1442 }
1443 
kiblnd_fini_fmr_poolset(kib_fmr_poolset_t * fps)1444 static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1445 {
1446 	if (fps->fps_net != NULL) { /* initialized? */
1447 		kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1448 		kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1449 	}
1450 }
1451 
kiblnd_init_fmr_poolset(kib_fmr_poolset_t * fps,int cpt,kib_net_t * net,int pool_size,int flush_trigger)1452 static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
1453 				   kib_net_t *net, int pool_size,
1454 				   int flush_trigger)
1455 {
1456 	kib_fmr_pool_t *fpo;
1457 	int	     rc;
1458 
1459 	memset(fps, 0, sizeof(kib_fmr_poolset_t));
1460 
1461 	fps->fps_net = net;
1462 	fps->fps_cpt = cpt;
1463 	fps->fps_pool_size = pool_size;
1464 	fps->fps_flush_trigger = flush_trigger;
1465 	spin_lock_init(&fps->fps_lock);
1466 	INIT_LIST_HEAD(&fps->fps_pool_list);
1467 	INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1468 
1469 	rc = kiblnd_create_fmr_pool(fps, &fpo);
1470 	if (rc == 0)
1471 		list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1472 
1473 	return rc;
1474 }
1475 
kiblnd_fmr_pool_is_idle(kib_fmr_pool_t * fpo,unsigned long now)1476 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
1477 {
1478 	if (fpo->fpo_map_count != 0) /* still in use */
1479 		return 0;
1480 	if (fpo->fpo_failed)
1481 		return 1;
1482 	return cfs_time_aftereq(now, fpo->fpo_deadline);
1483 }
1484 
kiblnd_fmr_pool_unmap(kib_fmr_t * fmr,int status)1485 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1486 {
1487 	LIST_HEAD(zombies);
1488 	kib_fmr_pool_t    *fpo = fmr->fmr_pool;
1489 	kib_fmr_poolset_t *fps = fpo->fpo_owner;
1490 	unsigned long	 now = cfs_time_current();
1491 	kib_fmr_pool_t    *tmp;
1492 	int		rc;
1493 
1494 	rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1495 	LASSERT(rc == 0);
1496 
1497 	if (status != 0) {
1498 		rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
1499 		LASSERT(rc == 0);
1500 	}
1501 
1502 	fmr->fmr_pool = NULL;
1503 	fmr->fmr_pfmr = NULL;
1504 
1505 	spin_lock(&fps->fps_lock);
1506 	fpo->fpo_map_count--;  /* decref the pool */
1507 
1508 	list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1509 		/* the first pool is persistent */
1510 		if (fps->fps_pool_list.next == &fpo->fpo_list)
1511 			continue;
1512 
1513 		if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1514 			list_move(&fpo->fpo_list, &zombies);
1515 			fps->fps_version++;
1516 		}
1517 	}
1518 	spin_unlock(&fps->fps_lock);
1519 
1520 	if (!list_empty(&zombies))
1521 		kiblnd_destroy_fmr_pool_list(&zombies);
1522 }
1523 
kiblnd_fmr_pool_map(kib_fmr_poolset_t * fps,__u64 * pages,int npages,__u64 iov,kib_fmr_t * fmr)1524 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
1525 			__u64 iov, kib_fmr_t *fmr)
1526 {
1527 	struct ib_pool_fmr *pfmr;
1528 	kib_fmr_pool_t     *fpo;
1529 	__u64	       version;
1530 	int		 rc;
1531 
1532  again:
1533 	spin_lock(&fps->fps_lock);
1534 	version = fps->fps_version;
1535 	list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1536 		fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1537 		fpo->fpo_map_count++;
1538 		spin_unlock(&fps->fps_lock);
1539 
1540 		pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
1541 					    pages, npages, iov);
1542 		if (likely(!IS_ERR(pfmr))) {
1543 			fmr->fmr_pool = fpo;
1544 			fmr->fmr_pfmr = pfmr;
1545 			return 0;
1546 		}
1547 
1548 		spin_lock(&fps->fps_lock);
1549 		fpo->fpo_map_count--;
1550 		if (PTR_ERR(pfmr) != -EAGAIN) {
1551 			spin_unlock(&fps->fps_lock);
1552 			return PTR_ERR(pfmr);
1553 		}
1554 
1555 		/* EAGAIN and ... */
1556 		if (version != fps->fps_version) {
1557 			spin_unlock(&fps->fps_lock);
1558 			goto again;
1559 		}
1560 	}
1561 
1562 	if (fps->fps_increasing) {
1563 		spin_unlock(&fps->fps_lock);
1564 		CDEBUG(D_NET,
1565 			"Another thread is allocating new FMR pool, waiting for her to complete\n");
1566 		schedule();
1567 		goto again;
1568 
1569 	}
1570 
1571 	if (time_before(cfs_time_current(), fps->fps_next_retry)) {
1572 		/* someone failed recently */
1573 		spin_unlock(&fps->fps_lock);
1574 		return -EAGAIN;
1575 	}
1576 
1577 	fps->fps_increasing = 1;
1578 	spin_unlock(&fps->fps_lock);
1579 
1580 	CDEBUG(D_NET, "Allocate new FMR pool\n");
1581 	rc = kiblnd_create_fmr_pool(fps, &fpo);
1582 	spin_lock(&fps->fps_lock);
1583 	fps->fps_increasing = 0;
1584 	if (rc == 0) {
1585 		fps->fps_version++;
1586 		list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1587 	} else {
1588 		fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1589 	}
1590 	spin_unlock(&fps->fps_lock);
1591 
1592 	goto again;
1593 }
1594 
kiblnd_fini_pool(kib_pool_t * pool)1595 static void kiblnd_fini_pool(kib_pool_t *pool)
1596 {
1597 	LASSERT(list_empty(&pool->po_free_list));
1598 	LASSERT(pool->po_allocated == 0);
1599 
1600 	CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1601 }
1602 
kiblnd_init_pool(kib_poolset_t * ps,kib_pool_t * pool,int size)1603 static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
1604 {
1605 	CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1606 
1607 	memset(pool, 0, sizeof(kib_pool_t));
1608 	INIT_LIST_HEAD(&pool->po_free_list);
1609 	pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1610 	pool->po_owner    = ps;
1611 	pool->po_size     = size;
1612 }
1613 
kiblnd_destroy_pool_list(struct list_head * head)1614 static void kiblnd_destroy_pool_list(struct list_head *head)
1615 {
1616 	kib_pool_t *pool;
1617 
1618 	while (!list_empty(head)) {
1619 		pool = list_entry(head->next, kib_pool_t, po_list);
1620 		list_del(&pool->po_list);
1621 
1622 		LASSERT(pool->po_owner != NULL);
1623 		pool->po_owner->ps_pool_destroy(pool);
1624 	}
1625 }
1626 
kiblnd_fail_poolset(kib_poolset_t * ps,struct list_head * zombies)1627 static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
1628 {
1629 	if (ps->ps_net == NULL) /* intialized? */
1630 		return;
1631 
1632 	spin_lock(&ps->ps_lock);
1633 	while (!list_empty(&ps->ps_pool_list)) {
1634 		kib_pool_t *po = list_entry(ps->ps_pool_list.next,
1635 					    kib_pool_t, po_list);
1636 		po->po_failed = 1;
1637 		list_del(&po->po_list);
1638 		if (po->po_allocated == 0)
1639 			list_add(&po->po_list, zombies);
1640 		else
1641 			list_add(&po->po_list, &ps->ps_failed_pool_list);
1642 	}
1643 	spin_unlock(&ps->ps_lock);
1644 }
1645 
kiblnd_fini_poolset(kib_poolset_t * ps)1646 static void kiblnd_fini_poolset(kib_poolset_t *ps)
1647 {
1648 	if (ps->ps_net != NULL) { /* initialized? */
1649 		kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
1650 		kiblnd_destroy_pool_list(&ps->ps_pool_list);
1651 	}
1652 }
1653 
kiblnd_init_poolset(kib_poolset_t * ps,int cpt,kib_net_t * net,char * name,int size,kib_ps_pool_create_t po_create,kib_ps_pool_destroy_t po_destroy,kib_ps_node_init_t nd_init,kib_ps_node_fini_t nd_fini)1654 static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
1655 			       kib_net_t *net, char *name, int size,
1656 			       kib_ps_pool_create_t po_create,
1657 			       kib_ps_pool_destroy_t po_destroy,
1658 			       kib_ps_node_init_t nd_init,
1659 			       kib_ps_node_fini_t nd_fini)
1660 {
1661 	kib_pool_t	*pool;
1662 	int		rc;
1663 
1664 	memset(ps, 0, sizeof(kib_poolset_t));
1665 
1666 	ps->ps_cpt	    = cpt;
1667 	ps->ps_net	  = net;
1668 	ps->ps_pool_create  = po_create;
1669 	ps->ps_pool_destroy = po_destroy;
1670 	ps->ps_node_init    = nd_init;
1671 	ps->ps_node_fini    = nd_fini;
1672 	ps->ps_pool_size    = size;
1673 	if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
1674 	    >= sizeof(ps->ps_name))
1675 		return -E2BIG;
1676 	spin_lock_init(&ps->ps_lock);
1677 	INIT_LIST_HEAD(&ps->ps_pool_list);
1678 	INIT_LIST_HEAD(&ps->ps_failed_pool_list);
1679 
1680 	rc = ps->ps_pool_create(ps, size, &pool);
1681 	if (rc == 0)
1682 		list_add(&pool->po_list, &ps->ps_pool_list);
1683 	else
1684 		CERROR("Failed to create the first pool for %s\n", ps->ps_name);
1685 
1686 	return rc;
1687 }
1688 
kiblnd_pool_is_idle(kib_pool_t * pool,unsigned long now)1689 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
1690 {
1691 	if (pool->po_allocated != 0) /* still in use */
1692 		return 0;
1693 	if (pool->po_failed)
1694 		return 1;
1695 	return cfs_time_aftereq(now, pool->po_deadline);
1696 }
1697 
kiblnd_pool_free_node(kib_pool_t * pool,struct list_head * node)1698 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
1699 {
1700 	LIST_HEAD(zombies);
1701 	kib_poolset_t  *ps = pool->po_owner;
1702 	kib_pool_t     *tmp;
1703 	unsigned long      now = cfs_time_current();
1704 
1705 	spin_lock(&ps->ps_lock);
1706 
1707 	if (ps->ps_node_fini != NULL)
1708 		ps->ps_node_fini(pool, node);
1709 
1710 	LASSERT(pool->po_allocated > 0);
1711 	list_add(node, &pool->po_free_list);
1712 	pool->po_allocated--;
1713 
1714 	list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
1715 		/* the first pool is persistent */
1716 		if (ps->ps_pool_list.next == &pool->po_list)
1717 			continue;
1718 
1719 		if (kiblnd_pool_is_idle(pool, now))
1720 			list_move(&pool->po_list, &zombies);
1721 	}
1722 	spin_unlock(&ps->ps_lock);
1723 
1724 	if (!list_empty(&zombies))
1725 		kiblnd_destroy_pool_list(&zombies);
1726 }
1727 
kiblnd_pool_alloc_node(kib_poolset_t * ps)1728 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
1729 {
1730 	struct list_head	    *node;
1731 	kib_pool_t	    *pool;
1732 	int		    rc;
1733 
1734  again:
1735 	spin_lock(&ps->ps_lock);
1736 	list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
1737 		if (list_empty(&pool->po_free_list))
1738 			continue;
1739 
1740 		pool->po_allocated++;
1741 		pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1742 		node = pool->po_free_list.next;
1743 		list_del(node);
1744 
1745 		if (ps->ps_node_init != NULL) {
1746 			/* still hold the lock */
1747 			ps->ps_node_init(pool, node);
1748 		}
1749 		spin_unlock(&ps->ps_lock);
1750 		return node;
1751 	}
1752 
1753 	/* no available tx pool and ... */
1754 	if (ps->ps_increasing) {
1755 		/* another thread is allocating a new pool */
1756 		spin_unlock(&ps->ps_lock);
1757 		CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
1758 		       ps->ps_name);
1759 		schedule();
1760 		goto again;
1761 	}
1762 
1763 	if (time_before(cfs_time_current(), ps->ps_next_retry)) {
1764 		/* someone failed recently */
1765 		spin_unlock(&ps->ps_lock);
1766 		return NULL;
1767 	}
1768 
1769 	ps->ps_increasing = 1;
1770 	spin_unlock(&ps->ps_lock);
1771 
1772 	CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
1773 
1774 	rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
1775 
1776 	spin_lock(&ps->ps_lock);
1777 	ps->ps_increasing = 0;
1778 	if (rc == 0) {
1779 		list_add_tail(&pool->po_list, &ps->ps_pool_list);
1780 	} else {
1781 		ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1782 		CERROR("Can't allocate new %s pool because out of memory\n",
1783 		       ps->ps_name);
1784 	}
1785 	spin_unlock(&ps->ps_lock);
1786 
1787 	goto again;
1788 }
1789 
kiblnd_pmr_pool_unmap(kib_phys_mr_t * pmr)1790 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
1791 {
1792 	kib_pmr_pool_t      *ppo = pmr->pmr_pool;
1793 	struct ib_mr	*mr  = pmr->pmr_mr;
1794 
1795 	pmr->pmr_mr = NULL;
1796 	kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
1797 	if (mr != NULL)
1798 		ib_dereg_mr(mr);
1799 }
1800 
kiblnd_pmr_pool_map(kib_pmr_poolset_t * pps,kib_hca_dev_t * hdev,kib_rdma_desc_t * rd,__u64 * iova,kib_phys_mr_t ** pp_pmr)1801 int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
1802 		    kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
1803 {
1804 	kib_phys_mr_t *pmr;
1805 	struct list_head    *node;
1806 	int	    rc;
1807 	int	    i;
1808 
1809 	node = kiblnd_pool_alloc_node(&pps->pps_poolset);
1810 	if (node == NULL) {
1811 		CERROR("Failed to allocate PMR descriptor\n");
1812 		return -ENOMEM;
1813 	}
1814 
1815 	pmr = container_of(node, kib_phys_mr_t, pmr_list);
1816 	if (pmr->pmr_pool->ppo_hdev != hdev) {
1817 		kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
1818 		return -EAGAIN;
1819 	}
1820 
1821 	for (i = 0; i < rd->rd_nfrags; i++) {
1822 		pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
1823 		pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
1824 	}
1825 
1826 	pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
1827 				     pmr->pmr_ipb, rd->rd_nfrags,
1828 				     IB_ACCESS_LOCAL_WRITE |
1829 				     IB_ACCESS_REMOTE_WRITE,
1830 				     iova);
1831 	if (!IS_ERR(pmr->pmr_mr)) {
1832 		pmr->pmr_iova = *iova;
1833 		*pp_pmr = pmr;
1834 		return 0;
1835 	}
1836 
1837 	rc = PTR_ERR(pmr->pmr_mr);
1838 	CERROR("Failed ib_reg_phys_mr: %d\n", rc);
1839 
1840 	pmr->pmr_mr = NULL;
1841 	kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
1842 
1843 	return rc;
1844 }
1845 
kiblnd_destroy_pmr_pool(kib_pool_t * pool)1846 static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
1847 {
1848 	kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
1849 	kib_phys_mr_t  *pmr;
1850 	kib_phys_mr_t *tmp;
1851 
1852 	LASSERT(pool->po_allocated == 0);
1853 
1854 	list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) {
1855 		LASSERT(pmr->pmr_mr == NULL);
1856 		list_del(&pmr->pmr_list);
1857 
1858 		if (pmr->pmr_ipb != NULL) {
1859 			LIBCFS_FREE(pmr->pmr_ipb,
1860 				    IBLND_MAX_RDMA_FRAGS *
1861 				    sizeof(struct ib_phys_buf));
1862 		}
1863 
1864 		LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
1865 	}
1866 
1867 	kiblnd_fini_pool(pool);
1868 	if (ppo->ppo_hdev != NULL)
1869 		kiblnd_hdev_decref(ppo->ppo_hdev);
1870 
1871 	LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
1872 }
1873 
kiblnd_pmr_pool_size(int ncpts)1874 static inline int kiblnd_pmr_pool_size(int ncpts)
1875 {
1876 	int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
1877 
1878 	return max(IBLND_PMR_POOL, size);
1879 }
1880 
kiblnd_create_pmr_pool(kib_poolset_t * ps,int size,kib_pool_t ** pp_po)1881 static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
1882 				  kib_pool_t **pp_po)
1883 {
1884 	struct kib_pmr_pool	*ppo;
1885 	struct kib_pool		*pool;
1886 	kib_phys_mr_t		*pmr;
1887 	int			i;
1888 
1889 	LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
1890 			 ps->ps_cpt, sizeof(kib_pmr_pool_t));
1891 	if (ppo == NULL) {
1892 		CERROR("Failed to allocate PMR pool\n");
1893 		return -ENOMEM;
1894 	}
1895 
1896 	pool = &ppo->ppo_pool;
1897 	kiblnd_init_pool(ps, pool, size);
1898 
1899 	for (i = 0; i < size; i++) {
1900 		LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
1901 				 ps->ps_cpt, sizeof(kib_phys_mr_t));
1902 		if (pmr == NULL)
1903 			break;
1904 
1905 		pmr->pmr_pool = ppo;
1906 		LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
1907 				 IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
1908 		if (pmr->pmr_ipb == NULL)
1909 			break;
1910 
1911 		list_add(&pmr->pmr_list, &pool->po_free_list);
1912 	}
1913 
1914 	if (i < size) {
1915 		ps->ps_pool_destroy(pool);
1916 		return -ENOMEM;
1917 	}
1918 
1919 	ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
1920 	*pp_po = pool;
1921 	return 0;
1922 }
1923 
kiblnd_destroy_tx_pool(kib_pool_t * pool)1924 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
1925 {
1926 	kib_tx_pool_t  *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
1927 	int	     i;
1928 
1929 	LASSERT(pool->po_allocated == 0);
1930 
1931 	if (tpo->tpo_tx_pages != NULL) {
1932 		kiblnd_unmap_tx_pool(tpo);
1933 		kiblnd_free_pages(tpo->tpo_tx_pages);
1934 	}
1935 
1936 	if (tpo->tpo_tx_descs == NULL)
1937 		goto out;
1938 
1939 	for (i = 0; i < pool->po_size; i++) {
1940 		kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1941 
1942 		list_del(&tx->tx_list);
1943 		if (tx->tx_pages != NULL)
1944 			LIBCFS_FREE(tx->tx_pages,
1945 				    LNET_MAX_IOV *
1946 				    sizeof(*tx->tx_pages));
1947 		if (tx->tx_frags != NULL)
1948 			LIBCFS_FREE(tx->tx_frags,
1949 				    IBLND_MAX_RDMA_FRAGS *
1950 					    sizeof(*tx->tx_frags));
1951 		if (tx->tx_wrq != NULL)
1952 			LIBCFS_FREE(tx->tx_wrq,
1953 				    (1 + IBLND_MAX_RDMA_FRAGS) *
1954 				    sizeof(*tx->tx_wrq));
1955 		if (tx->tx_sge != NULL)
1956 			LIBCFS_FREE(tx->tx_sge,
1957 				    (1 + IBLND_MAX_RDMA_FRAGS) *
1958 				    sizeof(*tx->tx_sge));
1959 		if (tx->tx_rd != NULL)
1960 			LIBCFS_FREE(tx->tx_rd,
1961 				    offsetof(kib_rdma_desc_t,
1962 					     rd_frags[IBLND_MAX_RDMA_FRAGS]));
1963 	}
1964 
1965 	LIBCFS_FREE(tpo->tpo_tx_descs,
1966 		    pool->po_size * sizeof(kib_tx_t));
1967 out:
1968 	kiblnd_fini_pool(pool);
1969 	LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
1970 }
1971 
kiblnd_tx_pool_size(int ncpts)1972 static int kiblnd_tx_pool_size(int ncpts)
1973 {
1974 	int ntx = *kiblnd_tunables.kib_ntx / ncpts;
1975 
1976 	return max(IBLND_TX_POOL, ntx);
1977 }
1978 
kiblnd_create_tx_pool(kib_poolset_t * ps,int size,kib_pool_t ** pp_po)1979 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
1980 				 kib_pool_t **pp_po)
1981 {
1982 	int	    i;
1983 	int	    npg;
1984 	kib_pool_t    *pool;
1985 	kib_tx_pool_t *tpo;
1986 
1987 	LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
1988 	if (tpo == NULL) {
1989 		CERROR("Failed to allocate TX pool\n");
1990 		return -ENOMEM;
1991 	}
1992 
1993 	pool = &tpo->tpo_pool;
1994 	kiblnd_init_pool(ps, pool, size);
1995 	tpo->tpo_tx_descs = NULL;
1996 	tpo->tpo_tx_pages = NULL;
1997 
1998 	npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
1999 	if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
2000 		CERROR("Can't allocate tx pages: %d\n", npg);
2001 		LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
2002 		return -ENOMEM;
2003 	}
2004 
2005 	LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
2006 			 size * sizeof(kib_tx_t));
2007 	if (tpo->tpo_tx_descs == NULL) {
2008 		CERROR("Can't allocate %d tx descriptors\n", size);
2009 		ps->ps_pool_destroy(pool);
2010 		return -ENOMEM;
2011 	}
2012 
2013 	memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
2014 
2015 	for (i = 0; i < size; i++) {
2016 		kib_tx_t *tx = &tpo->tpo_tx_descs[i];
2017 
2018 		tx->tx_pool = tpo;
2019 		if (ps->ps_net->ibn_fmr_ps != NULL) {
2020 			LIBCFS_CPT_ALLOC(tx->tx_pages,
2021 					 lnet_cpt_table(), ps->ps_cpt,
2022 					 LNET_MAX_IOV * sizeof(*tx->tx_pages));
2023 			if (tx->tx_pages == NULL)
2024 				break;
2025 		}
2026 
2027 		LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
2028 				 IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
2029 		if (tx->tx_frags == NULL)
2030 			break;
2031 
2032 		sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
2033 
2034 		LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2035 				 (1 + IBLND_MAX_RDMA_FRAGS) *
2036 				 sizeof(*tx->tx_wrq));
2037 		if (tx->tx_wrq == NULL)
2038 			break;
2039 
2040 		LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2041 				 (1 + IBLND_MAX_RDMA_FRAGS) *
2042 				 sizeof(*tx->tx_sge));
2043 		if (tx->tx_sge == NULL)
2044 			break;
2045 
2046 		LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
2047 				 offsetof(kib_rdma_desc_t,
2048 					  rd_frags[IBLND_MAX_RDMA_FRAGS]));
2049 		if (tx->tx_rd == NULL)
2050 			break;
2051 	}
2052 
2053 	if (i == size) {
2054 		kiblnd_map_tx_pool(tpo);
2055 		*pp_po = pool;
2056 		return 0;
2057 	}
2058 
2059 	ps->ps_pool_destroy(pool);
2060 	return -ENOMEM;
2061 }
2062 
kiblnd_tx_init(kib_pool_t * pool,struct list_head * node)2063 static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
2064 {
2065 	kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
2066 					     tps_poolset);
2067 	kib_tx_t	 *tx  = list_entry(node, kib_tx_t, tx_list);
2068 
2069 	tx->tx_cookie = tps->tps_next_tx_cookie++;
2070 }
2071 
kiblnd_net_fini_pools(kib_net_t * net)2072 static void kiblnd_net_fini_pools(kib_net_t *net)
2073 {
2074 	int	i;
2075 
2076 	cfs_cpt_for_each(i, lnet_cpt_table()) {
2077 		kib_tx_poolset_t	*tps;
2078 		kib_fmr_poolset_t	*fps;
2079 		kib_pmr_poolset_t	*pps;
2080 
2081 		if (net->ibn_tx_ps != NULL) {
2082 			tps = net->ibn_tx_ps[i];
2083 			kiblnd_fini_poolset(&tps->tps_poolset);
2084 		}
2085 
2086 		if (net->ibn_fmr_ps != NULL) {
2087 			fps = net->ibn_fmr_ps[i];
2088 			kiblnd_fini_fmr_poolset(fps);
2089 		}
2090 
2091 		if (net->ibn_pmr_ps != NULL) {
2092 			pps = net->ibn_pmr_ps[i];
2093 			kiblnd_fini_poolset(&pps->pps_poolset);
2094 		}
2095 	}
2096 
2097 	if (net->ibn_tx_ps != NULL) {
2098 		cfs_percpt_free(net->ibn_tx_ps);
2099 		net->ibn_tx_ps = NULL;
2100 	}
2101 
2102 	if (net->ibn_fmr_ps != NULL) {
2103 		cfs_percpt_free(net->ibn_fmr_ps);
2104 		net->ibn_fmr_ps = NULL;
2105 	}
2106 
2107 	if (net->ibn_pmr_ps != NULL) {
2108 		cfs_percpt_free(net->ibn_pmr_ps);
2109 		net->ibn_pmr_ps = NULL;
2110 	}
2111 }
2112 
kiblnd_net_init_pools(kib_net_t * net,__u32 * cpts,int ncpts)2113 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
2114 {
2115 	unsigned long	flags;
2116 	int		cpt;
2117 	int		rc;
2118 	int		i;
2119 
2120 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2121 	if (*kiblnd_tunables.kib_map_on_demand == 0 &&
2122 	    net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
2123 		read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2124 					   flags);
2125 		goto create_tx_pool;
2126 	}
2127 
2128 	read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2129 
2130 	if (*kiblnd_tunables.kib_fmr_pool_size <
2131 	    *kiblnd_tunables.kib_ntx / 4) {
2132 		CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2133 		       *kiblnd_tunables.kib_fmr_pool_size,
2134 		       *kiblnd_tunables.kib_ntx / 4);
2135 		rc = -EINVAL;
2136 		goto failed;
2137 	}
2138 
2139 	/* TX pool must be created later than FMR/PMR, see LU-2268
2140 	 * for details */
2141 	LASSERT(net->ibn_tx_ps == NULL);
2142 
2143 	/* premapping can fail if ibd_nmr > 1, so we always create
2144 	 * FMR/PMR pool and map-on-demand if premapping failed */
2145 
2146 	net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2147 					   sizeof(kib_fmr_poolset_t));
2148 	if (net->ibn_fmr_ps == NULL) {
2149 		CERROR("Failed to allocate FMR pool array\n");
2150 		rc = -ENOMEM;
2151 		goto failed;
2152 	}
2153 
2154 	for (i = 0; i < ncpts; i++) {
2155 		cpt = (cpts == NULL) ? i : cpts[i];
2156 		rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
2157 					     kiblnd_fmr_pool_size(ncpts),
2158 					     kiblnd_fmr_flush_trigger(ncpts));
2159 		if (rc == -ENOSYS && i == 0) /* no FMR */
2160 			break; /* create PMR pool */
2161 
2162 		if (rc != 0) { /* a real error */
2163 			CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2164 			       cpt, rc);
2165 			goto failed;
2166 		}
2167 	}
2168 
2169 	if (i > 0) {
2170 		LASSERT(i == ncpts);
2171 		goto create_tx_pool;
2172 	}
2173 
2174 	cfs_percpt_free(net->ibn_fmr_ps);
2175 	net->ibn_fmr_ps = NULL;
2176 
2177 	CWARN("Device does not support FMR, failing back to PMR\n");
2178 
2179 	if (*kiblnd_tunables.kib_pmr_pool_size <
2180 	    *kiblnd_tunables.kib_ntx / 4) {
2181 		CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
2182 		       *kiblnd_tunables.kib_pmr_pool_size,
2183 		       *kiblnd_tunables.kib_ntx / 4);
2184 		rc = -EINVAL;
2185 		goto failed;
2186 	}
2187 
2188 	net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2189 					   sizeof(kib_pmr_poolset_t));
2190 	if (net->ibn_pmr_ps == NULL) {
2191 		CERROR("Failed to allocate PMR pool array\n");
2192 		rc = -ENOMEM;
2193 		goto failed;
2194 	}
2195 
2196 	for (i = 0; i < ncpts; i++) {
2197 		cpt = (cpts == NULL) ? i : cpts[i];
2198 		rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
2199 					 cpt, net, "PMR",
2200 					 kiblnd_pmr_pool_size(ncpts),
2201 					 kiblnd_create_pmr_pool,
2202 					 kiblnd_destroy_pmr_pool, NULL, NULL);
2203 		if (rc != 0) {
2204 			CERROR("Can't initialize PMR pool for CPT %d: %d\n",
2205 			       cpt, rc);
2206 			goto failed;
2207 		}
2208 	}
2209 
2210  create_tx_pool:
2211 	net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2212 					  sizeof(kib_tx_poolset_t));
2213 	if (net->ibn_tx_ps == NULL) {
2214 		CERROR("Failed to allocate tx pool array\n");
2215 		rc = -ENOMEM;
2216 		goto failed;
2217 	}
2218 
2219 	for (i = 0; i < ncpts; i++) {
2220 		cpt = (cpts == NULL) ? i : cpts[i];
2221 		rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2222 					 cpt, net, "TX",
2223 					 kiblnd_tx_pool_size(ncpts),
2224 					 kiblnd_create_tx_pool,
2225 					 kiblnd_destroy_tx_pool,
2226 					 kiblnd_tx_init, NULL);
2227 		if (rc != 0) {
2228 			CERROR("Can't initialize TX pool for CPT %d: %d\n",
2229 			       cpt, rc);
2230 			goto failed;
2231 		}
2232 	}
2233 
2234 	return 0;
2235  failed:
2236 	kiblnd_net_fini_pools(net);
2237 	LASSERT(rc != 0);
2238 	return rc;
2239 }
2240 
kiblnd_hdev_get_attr(kib_hca_dev_t * hdev)2241 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
2242 {
2243 	struct ib_device_attr *attr;
2244 	int		    rc;
2245 
2246 	/* It's safe to assume a HCA can handle a page size
2247 	 * matching that of the native system */
2248 	hdev->ibh_page_shift = PAGE_SHIFT;
2249 	hdev->ibh_page_size  = 1 << PAGE_SHIFT;
2250 	hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
2251 
2252 	LIBCFS_ALLOC(attr, sizeof(*attr));
2253 	if (attr == NULL) {
2254 		CERROR("Out of memory\n");
2255 		return -ENOMEM;
2256 	}
2257 
2258 	rc = ib_query_device(hdev->ibh_ibdev, attr);
2259 	if (rc == 0)
2260 		hdev->ibh_mr_size = attr->max_mr_size;
2261 
2262 	LIBCFS_FREE(attr, sizeof(*attr));
2263 
2264 	if (rc != 0) {
2265 		CERROR("Failed to query IB device: %d\n", rc);
2266 		return rc;
2267 	}
2268 
2269 	if (hdev->ibh_mr_size == ~0ULL) {
2270 		hdev->ibh_mr_shift = 64;
2271 		return 0;
2272 	}
2273 
2274 	for (hdev->ibh_mr_shift = 0;
2275 	     hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
2276 		if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
2277 		    hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
2278 			return 0;
2279 	}
2280 
2281 	CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2282 	return -EINVAL;
2283 }
2284 
kiblnd_hdev_cleanup_mrs(kib_hca_dev_t * hdev)2285 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
2286 {
2287 	int     i;
2288 
2289 	if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
2290 		return;
2291 
2292 	for (i = 0; i < hdev->ibh_nmrs; i++) {
2293 		if (hdev->ibh_mrs[i] == NULL)
2294 			break;
2295 
2296 		ib_dereg_mr(hdev->ibh_mrs[i]);
2297 	}
2298 
2299 	LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2300 	hdev->ibh_mrs  = NULL;
2301 	hdev->ibh_nmrs = 0;
2302 }
2303 
kiblnd_hdev_destroy(kib_hca_dev_t * hdev)2304 void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
2305 {
2306 	kiblnd_hdev_cleanup_mrs(hdev);
2307 
2308 	if (hdev->ibh_pd != NULL)
2309 		ib_dealloc_pd(hdev->ibh_pd);
2310 
2311 	if (hdev->ibh_cmid != NULL)
2312 		rdma_destroy_id(hdev->ibh_cmid);
2313 
2314 	LIBCFS_FREE(hdev, sizeof(*hdev));
2315 }
2316 
kiblnd_hdev_setup_mrs(kib_hca_dev_t * hdev)2317 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
2318 {
2319 	struct ib_mr *mr;
2320 	int	   i;
2321 	int	   rc;
2322 	__u64	 mm_size;
2323 	__u64	 mr_size;
2324 	int	   acflags = IB_ACCESS_LOCAL_WRITE |
2325 				IB_ACCESS_REMOTE_WRITE;
2326 
2327 	rc = kiblnd_hdev_get_attr(hdev);
2328 	if (rc != 0)
2329 		return rc;
2330 
2331 	if (hdev->ibh_mr_shift == 64) {
2332 		LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
2333 		if (hdev->ibh_mrs == NULL) {
2334 			CERROR("Failed to allocate MRs table\n");
2335 			return -ENOMEM;
2336 		}
2337 
2338 		hdev->ibh_mrs[0] = NULL;
2339 		hdev->ibh_nmrs   = 1;
2340 
2341 		mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2342 		if (IS_ERR(mr)) {
2343 			CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
2344 			kiblnd_hdev_cleanup_mrs(hdev);
2345 			return PTR_ERR(mr);
2346 		}
2347 
2348 		hdev->ibh_mrs[0] = mr;
2349 
2350 		goto out;
2351 	}
2352 
2353 	mr_size = 1ULL << hdev->ibh_mr_shift;
2354 	mm_size = (unsigned long)high_memory - PAGE_OFFSET;
2355 
2356 	hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
2357 
2358 	if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
2359 		/* it's 4T..., assume we will re-code at that time */
2360 		CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
2361 		       mm_size, mr_size);
2362 		return -EINVAL;
2363 	}
2364 
2365 	/* create an array of MRs to cover all memory */
2366 	LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2367 	if (hdev->ibh_mrs == NULL) {
2368 		CERROR("Failed to allocate MRs' table\n");
2369 		return -ENOMEM;
2370 	}
2371 
2372 	for (i = 0; i < hdev->ibh_nmrs; i++) {
2373 		struct ib_phys_buf ipb;
2374 		__u64	      iova;
2375 
2376 		ipb.size = hdev->ibh_mr_size;
2377 		ipb.addr = i * mr_size;
2378 		iova     = ipb.addr;
2379 
2380 		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
2381 		if (IS_ERR(mr)) {
2382 			CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
2383 			       ipb.addr, ipb.size, PTR_ERR(mr));
2384 			kiblnd_hdev_cleanup_mrs(hdev);
2385 			return PTR_ERR(mr);
2386 		}
2387 
2388 		LASSERT(iova == ipb.addr);
2389 
2390 		hdev->ibh_mrs[i] = mr;
2391 	}
2392 
2393 out:
2394 	if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
2395 		LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
2396 			      hdev->ibh_mr_size, hdev->ibh_nmrs);
2397 	return 0;
2398 }
2399 
2400 /* DUMMY */
kiblnd_dummy_callback(struct rdma_cm_id * cmid,struct rdma_cm_event * event)2401 static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
2402 				 struct rdma_cm_event *event)
2403 {
2404 	return 0;
2405 }
2406 
kiblnd_dev_need_failover(kib_dev_t * dev)2407 static int kiblnd_dev_need_failover(kib_dev_t *dev)
2408 {
2409 	struct rdma_cm_id  *cmid;
2410 	struct sockaddr_in  srcaddr;
2411 	struct sockaddr_in  dstaddr;
2412 	int		 rc;
2413 
2414 	if (dev->ibd_hdev == NULL || /* initializing */
2415 	    dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2416 	    *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2417 		return 1;
2418 
2419 	/* XXX: it's UGLY, but I don't have better way to find
2420 	 * ib-bonding HCA failover because:
2421 	 *
2422 	 * a. no reliable CM event for HCA failover...
2423 	 * b. no OFED API to get ib_device for current net_device...
2424 	 *
2425 	 * We have only two choices at this point:
2426 	 *
2427 	 * a. rdma_bind_addr(), it will conflict with listener cmid
2428 	 * b. rdma_resolve_addr() to zero addr */
2429 	cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2430 				     IB_QPT_RC);
2431 	if (IS_ERR(cmid)) {
2432 		rc = PTR_ERR(cmid);
2433 		CERROR("Failed to create cmid for failover: %d\n", rc);
2434 		return rc;
2435 	}
2436 
2437 	memset(&srcaddr, 0, sizeof(srcaddr));
2438 	srcaddr.sin_family      = AF_INET;
2439 	srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2440 
2441 	memset(&dstaddr, 0, sizeof(dstaddr));
2442 	dstaddr.sin_family = AF_INET;
2443 	rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2444 			       (struct sockaddr *)&dstaddr, 1);
2445 	if (rc != 0 || cmid->device == NULL) {
2446 		CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2447 		       dev->ibd_ifname, &dev->ibd_ifip,
2448 		       cmid->device, rc);
2449 		rdma_destroy_id(cmid);
2450 		return rc;
2451 	}
2452 
2453 	if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
2454 		/* don't need device failover */
2455 		rdma_destroy_id(cmid);
2456 		return 0;
2457 	}
2458 
2459 	return 1;
2460 }
2461 
kiblnd_dev_failover(kib_dev_t * dev)2462 int kiblnd_dev_failover(kib_dev_t *dev)
2463 {
2464 	LIST_HEAD(zombie_tpo);
2465 	LIST_HEAD(zombie_ppo);
2466 	LIST_HEAD(zombie_fpo);
2467 	struct rdma_cm_id  *cmid  = NULL;
2468 	kib_hca_dev_t      *hdev  = NULL;
2469 	kib_hca_dev_t      *old;
2470 	struct ib_pd       *pd;
2471 	kib_net_t	  *net;
2472 	struct sockaddr_in  addr;
2473 	unsigned long       flags;
2474 	int		 rc = 0;
2475 	int		    i;
2476 
2477 	LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
2478 		 dev->ibd_can_failover ||
2479 		 dev->ibd_hdev == NULL);
2480 
2481 	rc = kiblnd_dev_need_failover(dev);
2482 	if (rc <= 0)
2483 		goto out;
2484 
2485 	if (dev->ibd_hdev != NULL &&
2486 	    dev->ibd_hdev->ibh_cmid != NULL) {
2487 		/* XXX it's not good to close old listener at here,
2488 		 * because we can fail to create new listener.
2489 		 * But we have to close it now, otherwise rdma_bind_addr
2490 		 * will return EADDRINUSE... How crap! */
2491 		write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2492 
2493 		cmid = dev->ibd_hdev->ibh_cmid;
2494 		/* make next schedule of kiblnd_dev_need_failover()
2495 		 * return 1 for me */
2496 		dev->ibd_hdev->ibh_cmid  = NULL;
2497 		write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2498 
2499 		rdma_destroy_id(cmid);
2500 	}
2501 
2502 	cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2503 				     IB_QPT_RC);
2504 	if (IS_ERR(cmid)) {
2505 		rc = PTR_ERR(cmid);
2506 		CERROR("Failed to create cmid for failover: %d\n", rc);
2507 		goto out;
2508 	}
2509 
2510 	memset(&addr, 0, sizeof(addr));
2511 	addr.sin_family      = AF_INET;
2512 	addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2513 	addr.sin_port	= htons(*kiblnd_tunables.kib_service);
2514 
2515 	/* Bind to failover device or port */
2516 	rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2517 	if (rc != 0 || cmid->device == NULL) {
2518 		CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2519 		       dev->ibd_ifname, &dev->ibd_ifip,
2520 		       cmid->device, rc);
2521 		rdma_destroy_id(cmid);
2522 		goto out;
2523 	}
2524 
2525 	LIBCFS_ALLOC(hdev, sizeof(*hdev));
2526 	if (hdev == NULL) {
2527 		CERROR("Failed to allocate kib_hca_dev\n");
2528 		rdma_destroy_id(cmid);
2529 		rc = -ENOMEM;
2530 		goto out;
2531 	}
2532 
2533 	atomic_set(&hdev->ibh_ref, 1);
2534 	hdev->ibh_dev   = dev;
2535 	hdev->ibh_cmid  = cmid;
2536 	hdev->ibh_ibdev = cmid->device;
2537 
2538 	pd = ib_alloc_pd(cmid->device);
2539 	if (IS_ERR(pd)) {
2540 		rc = PTR_ERR(pd);
2541 		CERROR("Can't allocate PD: %d\n", rc);
2542 		goto out;
2543 	}
2544 
2545 	hdev->ibh_pd = pd;
2546 
2547 	rc = rdma_listen(cmid, 0);
2548 	if (rc != 0) {
2549 		CERROR("Can't start new listener: %d\n", rc);
2550 		goto out;
2551 	}
2552 
2553 	rc = kiblnd_hdev_setup_mrs(hdev);
2554 	if (rc != 0) {
2555 		CERROR("Can't setup device: %d\n", rc);
2556 		goto out;
2557 	}
2558 
2559 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2560 
2561 	old = dev->ibd_hdev;
2562 	dev->ibd_hdev = hdev; /* take over the refcount */
2563 	hdev = old;
2564 
2565 	list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2566 		cfs_cpt_for_each(i, lnet_cpt_table()) {
2567 			kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2568 					    &zombie_tpo);
2569 
2570 			if (net->ibn_fmr_ps != NULL) {
2571 				kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2572 							&zombie_fpo);
2573 
2574 			} else if (net->ibn_pmr_ps != NULL) {
2575 				kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
2576 						    pps_poolset, &zombie_ppo);
2577 			}
2578 		}
2579 	}
2580 
2581 	write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2582  out:
2583 	if (!list_empty(&zombie_tpo))
2584 		kiblnd_destroy_pool_list(&zombie_tpo);
2585 	if (!list_empty(&zombie_ppo))
2586 		kiblnd_destroy_pool_list(&zombie_ppo);
2587 	if (!list_empty(&zombie_fpo))
2588 		kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2589 	if (hdev != NULL)
2590 		kiblnd_hdev_decref(hdev);
2591 
2592 	if (rc != 0)
2593 		dev->ibd_failed_failover++;
2594 	else
2595 		dev->ibd_failed_failover = 0;
2596 
2597 	return rc;
2598 }
2599 
kiblnd_destroy_dev(kib_dev_t * dev)2600 void kiblnd_destroy_dev(kib_dev_t *dev)
2601 {
2602 	LASSERT(dev->ibd_nnets == 0);
2603 	LASSERT(list_empty(&dev->ibd_nets));
2604 
2605 	list_del(&dev->ibd_fail_list);
2606 	list_del(&dev->ibd_list);
2607 
2608 	if (dev->ibd_hdev != NULL)
2609 		kiblnd_hdev_decref(dev->ibd_hdev);
2610 
2611 	LIBCFS_FREE(dev, sizeof(*dev));
2612 }
2613 
kiblnd_create_dev(char * ifname)2614 static kib_dev_t *kiblnd_create_dev(char *ifname)
2615 {
2616 	struct net_device *netdev;
2617 	kib_dev_t	 *dev;
2618 	__u32	      netmask;
2619 	__u32	      ip;
2620 	int		up;
2621 	int		rc;
2622 
2623 	rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
2624 	if (rc != 0) {
2625 		CERROR("Can't query IPoIB interface %s: %d\n",
2626 		       ifname, rc);
2627 		return NULL;
2628 	}
2629 
2630 	if (!up) {
2631 		CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2632 		return NULL;
2633 	}
2634 
2635 	LIBCFS_ALLOC(dev, sizeof(*dev));
2636 	if (dev == NULL)
2637 		return NULL;
2638 
2639 	netdev = dev_get_by_name(&init_net, ifname);
2640 	if (netdev == NULL) {
2641 		dev->ibd_can_failover = 0;
2642 	} else {
2643 		dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2644 		dev_put(netdev);
2645 	}
2646 
2647 	INIT_LIST_HEAD(&dev->ibd_nets);
2648 	INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2649 	INIT_LIST_HEAD(&dev->ibd_fail_list);
2650 	dev->ibd_ifip = ip;
2651 	strcpy(&dev->ibd_ifname[0], ifname);
2652 
2653 	/* initialize the device */
2654 	rc = kiblnd_dev_failover(dev);
2655 	if (rc != 0) {
2656 		CERROR("Can't initialize device: %d\n", rc);
2657 		LIBCFS_FREE(dev, sizeof(*dev));
2658 		return NULL;
2659 	}
2660 
2661 	list_add_tail(&dev->ibd_list,
2662 			  &kiblnd_data.kib_devs);
2663 	return dev;
2664 }
2665 
kiblnd_base_shutdown(void)2666 static void kiblnd_base_shutdown(void)
2667 {
2668 	struct kib_sched_info	*sched;
2669 	int			i;
2670 
2671 	LASSERT(list_empty(&kiblnd_data.kib_devs));
2672 
2673 	CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
2674 	       atomic_read(&libcfs_kmemory));
2675 
2676 	switch (kiblnd_data.kib_init) {
2677 	default:
2678 		LBUG();
2679 
2680 	case IBLND_INIT_ALL:
2681 	case IBLND_INIT_DATA:
2682 		LASSERT(kiblnd_data.kib_peers != NULL);
2683 		for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2684 			LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2685 		LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2686 		LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2687 
2688 		/* flag threads to terminate; wake and wait for them to die */
2689 		kiblnd_data.kib_shutdown = 1;
2690 
2691 		/* NB: we really want to stop scheduler threads net by net
2692 		 * instead of the whole module, this should be improved
2693 		 * with dynamic configuration LNet */
2694 		cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2695 			wake_up_all(&sched->ibs_waitq);
2696 
2697 		wake_up_all(&kiblnd_data.kib_connd_waitq);
2698 		wake_up_all(&kiblnd_data.kib_failover_waitq);
2699 
2700 		i = 2;
2701 		while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2702 			i++;
2703 			/* power of 2 ? */
2704 			CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2705 			       "Waiting for %d threads to terminate\n",
2706 			       atomic_read(&kiblnd_data.kib_nthreads));
2707 			set_current_state(TASK_UNINTERRUPTIBLE);
2708 			schedule_timeout(cfs_time_seconds(1));
2709 		}
2710 
2711 		/* fall through */
2712 
2713 	case IBLND_INIT_NOTHING:
2714 		break;
2715 	}
2716 
2717 	if (kiblnd_data.kib_peers != NULL) {
2718 		LIBCFS_FREE(kiblnd_data.kib_peers,
2719 			    sizeof(struct list_head) *
2720 			    kiblnd_data.kib_peer_hash_size);
2721 	}
2722 
2723 	if (kiblnd_data.kib_scheds != NULL)
2724 		cfs_percpt_free(kiblnd_data.kib_scheds);
2725 
2726 	CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
2727 	       atomic_read(&libcfs_kmemory));
2728 
2729 	kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2730 	module_put(THIS_MODULE);
2731 }
2732 
kiblnd_shutdown(lnet_ni_t * ni)2733 void kiblnd_shutdown(lnet_ni_t *ni)
2734 {
2735 	kib_net_t	*net = ni->ni_data;
2736 	rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
2737 	int	       i;
2738 	unsigned long     flags;
2739 
2740 	LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2741 
2742 	if (net == NULL)
2743 		goto out;
2744 
2745 	CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
2746 	       atomic_read(&libcfs_kmemory));
2747 
2748 	write_lock_irqsave(g_lock, flags);
2749 	net->ibn_shutdown = 1;
2750 	write_unlock_irqrestore(g_lock, flags);
2751 
2752 	switch (net->ibn_init) {
2753 	default:
2754 		LBUG();
2755 
2756 	case IBLND_INIT_ALL:
2757 		/* nuke all existing peers within this net */
2758 		kiblnd_del_peer(ni, LNET_NID_ANY);
2759 
2760 		/* Wait for all peer state to clean up */
2761 		i = 2;
2762 		while (atomic_read(&net->ibn_npeers) != 0) {
2763 			i++;
2764 			CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
2765 			       "%s: waiting for %d peers to disconnect\n",
2766 			       libcfs_nid2str(ni->ni_nid),
2767 			       atomic_read(&net->ibn_npeers));
2768 			set_current_state(TASK_UNINTERRUPTIBLE);
2769 			schedule_timeout(cfs_time_seconds(1));
2770 		}
2771 
2772 		kiblnd_net_fini_pools(net);
2773 
2774 		write_lock_irqsave(g_lock, flags);
2775 		LASSERT(net->ibn_dev->ibd_nnets > 0);
2776 		net->ibn_dev->ibd_nnets--;
2777 		list_del(&net->ibn_list);
2778 		write_unlock_irqrestore(g_lock, flags);
2779 
2780 		/* fall through */
2781 
2782 	case IBLND_INIT_NOTHING:
2783 		LASSERT(atomic_read(&net->ibn_nconns) == 0);
2784 
2785 		if (net->ibn_dev != NULL &&
2786 		    net->ibn_dev->ibd_nnets == 0)
2787 			kiblnd_destroy_dev(net->ibn_dev);
2788 
2789 		break;
2790 	}
2791 
2792 	CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
2793 	       atomic_read(&libcfs_kmemory));
2794 
2795 	net->ibn_init = IBLND_INIT_NOTHING;
2796 	ni->ni_data = NULL;
2797 
2798 	LIBCFS_FREE(net, sizeof(*net));
2799 
2800 out:
2801 	if (list_empty(&kiblnd_data.kib_devs))
2802 		kiblnd_base_shutdown();
2803 }
2804 
kiblnd_base_startup(void)2805 static int kiblnd_base_startup(void)
2806 {
2807 	struct kib_sched_info	*sched;
2808 	int			rc;
2809 	int			i;
2810 
2811 	LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
2812 
2813 	try_module_get(THIS_MODULE);
2814 	/* zero pointers, flags etc */
2815 	memset(&kiblnd_data, 0, sizeof(kiblnd_data));
2816 
2817 	rwlock_init(&kiblnd_data.kib_global_lock);
2818 
2819 	INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2820 	INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2821 
2822 	kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2823 	LIBCFS_ALLOC(kiblnd_data.kib_peers,
2824 		     sizeof(struct list_head) *
2825 			    kiblnd_data.kib_peer_hash_size);
2826 	if (kiblnd_data.kib_peers == NULL)
2827 		goto failed;
2828 	for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2829 		INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2830 
2831 	spin_lock_init(&kiblnd_data.kib_connd_lock);
2832 	INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2833 	INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
2834 	init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2835 	init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2836 
2837 	kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2838 						  sizeof(*sched));
2839 	if (kiblnd_data.kib_scheds == NULL)
2840 		goto failed;
2841 
2842 	cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
2843 		int	nthrs;
2844 
2845 		spin_lock_init(&sched->ibs_lock);
2846 		INIT_LIST_HEAD(&sched->ibs_conns);
2847 		init_waitqueue_head(&sched->ibs_waitq);
2848 
2849 		nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2850 		if (*kiblnd_tunables.kib_nscheds > 0) {
2851 			nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
2852 		} else {
2853 			/* max to half of CPUs, another half is reserved for
2854 			 * upper layer modules */
2855 			nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2856 		}
2857 
2858 		sched->ibs_nthreads_max = nthrs;
2859 		sched->ibs_cpt = i;
2860 	}
2861 
2862 	kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
2863 
2864 	/* lists/ptrs/locks initialised */
2865 	kiblnd_data.kib_init = IBLND_INIT_DATA;
2866 	/*****************************************************/
2867 
2868 	rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
2869 	if (rc != 0) {
2870 		CERROR("Can't spawn o2iblnd connd: %d\n", rc);
2871 		goto failed;
2872 	}
2873 
2874 	if (*kiblnd_tunables.kib_dev_failover != 0)
2875 		rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
2876 					 "kiblnd_failover");
2877 
2878 	if (rc != 0) {
2879 		CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
2880 		goto failed;
2881 	}
2882 
2883 	/* flag everything initialised */
2884 	kiblnd_data.kib_init = IBLND_INIT_ALL;
2885 	/*****************************************************/
2886 
2887 	return 0;
2888 
2889  failed:
2890 	kiblnd_base_shutdown();
2891 	return -ENETDOWN;
2892 }
2893 
kiblnd_start_schedulers(struct kib_sched_info * sched)2894 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
2895 {
2896 	int	rc = 0;
2897 	int	nthrs;
2898 	int	i;
2899 
2900 	if (sched->ibs_nthreads == 0) {
2901 		if (*kiblnd_tunables.kib_nscheds > 0) {
2902 			nthrs = sched->ibs_nthreads_max;
2903 		} else {
2904 			nthrs = cfs_cpt_weight(lnet_cpt_table(),
2905 					       sched->ibs_cpt);
2906 			nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2907 			nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
2908 		}
2909 	} else {
2910 		LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
2911 		/* increase one thread if there is new interface */
2912 		nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
2913 	}
2914 
2915 	for (i = 0; i < nthrs; i++) {
2916 		long	id;
2917 		char	name[20];
2918 
2919 		id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
2920 		snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
2921 			 KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
2922 		rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
2923 		if (rc == 0)
2924 			continue;
2925 
2926 		CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2927 		       sched->ibs_cpt, sched->ibs_nthreads + i, rc);
2928 		break;
2929 	}
2930 
2931 	sched->ibs_nthreads += i;
2932 	return rc;
2933 }
2934 
kiblnd_dev_start_threads(kib_dev_t * dev,int newdev,__u32 * cpts,int ncpts)2935 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
2936 				    int ncpts)
2937 {
2938 	int	cpt;
2939 	int	rc;
2940 	int	i;
2941 
2942 	for (i = 0; i < ncpts; i++) {
2943 		struct kib_sched_info *sched;
2944 
2945 		cpt = (cpts == NULL) ? i : cpts[i];
2946 		sched = kiblnd_data.kib_scheds[cpt];
2947 
2948 		if (!newdev && sched->ibs_nthreads > 0)
2949 			continue;
2950 
2951 		rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
2952 		if (rc != 0) {
2953 			CERROR("Failed to start scheduler threads for %s\n",
2954 			       dev->ibd_ifname);
2955 			return rc;
2956 		}
2957 	}
2958 	return 0;
2959 }
2960 
kiblnd_dev_search(char * ifname)2961 static kib_dev_t *kiblnd_dev_search(char *ifname)
2962 {
2963 	kib_dev_t	*alias = NULL;
2964 	kib_dev_t	*dev;
2965 	char		*colon;
2966 	char		*colon2;
2967 
2968 	colon = strchr(ifname, ':');
2969 	list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
2970 		if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2971 			return dev;
2972 
2973 		if (alias != NULL)
2974 			continue;
2975 
2976 		colon2 = strchr(dev->ibd_ifname, ':');
2977 		if (colon != NULL)
2978 			*colon = 0;
2979 		if (colon2 != NULL)
2980 			*colon2 = 0;
2981 
2982 		if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2983 			alias = dev;
2984 
2985 		if (colon != NULL)
2986 			*colon = ':';
2987 		if (colon2 != NULL)
2988 			*colon2 = ':';
2989 	}
2990 	return alias;
2991 }
2992 
kiblnd_startup(lnet_ni_t * ni)2993 int kiblnd_startup(lnet_ni_t *ni)
2994 {
2995 	char		     *ifname;
2996 	kib_dev_t		*ibdev = NULL;
2997 	kib_net_t		*net;
2998 	struct timeval	    tv;
2999 	unsigned long	     flags;
3000 	int		       rc;
3001 	int			  newdev;
3002 
3003 	LASSERT(ni->ni_lnd == &the_o2iblnd);
3004 
3005 	if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
3006 		rc = kiblnd_base_startup();
3007 		if (rc != 0)
3008 			return rc;
3009 	}
3010 
3011 	LIBCFS_ALLOC(net, sizeof(*net));
3012 	ni->ni_data = net;
3013 	if (net == NULL)
3014 		goto net_failed;
3015 
3016 	do_gettimeofday(&tv);
3017 	net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
3018 
3019 	ni->ni_peertimeout    = *kiblnd_tunables.kib_peertimeout;
3020 	ni->ni_maxtxcredits   = *kiblnd_tunables.kib_credits;
3021 	ni->ni_peertxcredits  = *kiblnd_tunables.kib_peertxcredits;
3022 	ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
3023 
3024 	if (ni->ni_interfaces[0] != NULL) {
3025 		/* Use the IPoIB interface specified in 'networks=' */
3026 
3027 		CLASSERT(LNET_MAX_INTERFACES > 1);
3028 		if (ni->ni_interfaces[1] != NULL) {
3029 			CERROR("Multiple interfaces not supported\n");
3030 			goto failed;
3031 		}
3032 
3033 		ifname = ni->ni_interfaces[0];
3034 	} else {
3035 		ifname = *kiblnd_tunables.kib_default_ipif;
3036 	}
3037 
3038 	if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
3039 		CERROR("IPoIB interface name too long: %s\n", ifname);
3040 		goto failed;
3041 	}
3042 
3043 	ibdev = kiblnd_dev_search(ifname);
3044 
3045 	newdev = ibdev == NULL;
3046 	/* hmm...create kib_dev even for alias */
3047 	if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
3048 		ibdev = kiblnd_create_dev(ifname);
3049 
3050 	if (ibdev == NULL)
3051 		goto failed;
3052 
3053 	net->ibn_dev = ibdev;
3054 	ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
3055 
3056 	rc = kiblnd_dev_start_threads(ibdev, newdev,
3057 				      ni->ni_cpts, ni->ni_ncpts);
3058 	if (rc != 0)
3059 		goto failed;
3060 
3061 	rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
3062 	if (rc != 0) {
3063 		CERROR("Failed to initialize NI pools: %d\n", rc);
3064 		goto failed;
3065 	}
3066 
3067 	write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3068 	ibdev->ibd_nnets++;
3069 	list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
3070 	write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3071 
3072 	net->ibn_init = IBLND_INIT_ALL;
3073 
3074 	return 0;
3075 
3076 failed:
3077 	if (net->ibn_dev == NULL && ibdev != NULL)
3078 		kiblnd_destroy_dev(ibdev);
3079 
3080 net_failed:
3081 	kiblnd_shutdown(ni);
3082 
3083 	CDEBUG(D_NET, "kiblnd_startup failed\n");
3084 	return -ENETDOWN;
3085 }
3086 
kiblnd_module_fini(void)3087 static void __exit kiblnd_module_fini(void)
3088 {
3089 	lnet_unregister_lnd(&the_o2iblnd);
3090 }
3091 
kiblnd_module_init(void)3092 static int __init kiblnd_module_init(void)
3093 {
3094 	int    rc;
3095 
3096 	CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
3097 	CLASSERT(offsetof(kib_msg_t,
3098 		ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3099 		<= IBLND_MSG_SIZE);
3100 	CLASSERT(offsetof(kib_msg_t,
3101 		ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3102 		<= IBLND_MSG_SIZE);
3103 
3104 	rc = kiblnd_tunables_init();
3105 	if (rc != 0)
3106 		return rc;
3107 
3108 	lnet_register_lnd(&the_o2iblnd);
3109 
3110 	return 0;
3111 }
3112 
3113 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3114 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
3115 MODULE_LICENSE("GPL");
3116 
3117 module_init(kiblnd_module_init);
3118 module_exit(kiblnd_module_fini);
3119