1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Copyright (c) 2011, 2012, Intel Corporation.
5 *
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
10 *
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 *
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
16 *
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with Portals; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27 #include "socklnd.h"
28
29 ksock_tx_t *
ksocknal_alloc_tx(int type,int size)30 ksocknal_alloc_tx(int type, int size)
31 {
32 ksock_tx_t *tx = NULL;
33
34 if (type == KSOCK_MSG_NOOP) {
35 LASSERT(size == KSOCK_NOOP_TX_SIZE);
36
37 /* searching for a noop tx in free list */
38 spin_lock(&ksocknal_data.ksnd_tx_lock);
39
40 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
42 next, ksock_tx_t, tx_list);
43 LASSERT(tx->tx_desc_size == size);
44 list_del(&tx->tx_list);
45 }
46
47 spin_unlock(&ksocknal_data.ksnd_tx_lock);
48 }
49
50 if (tx == NULL)
51 LIBCFS_ALLOC(tx, size);
52
53 if (tx == NULL)
54 return NULL;
55
56 atomic_set(&tx->tx_refcount, 1);
57 tx->tx_zc_aborted = 0;
58 tx->tx_zc_capable = 0;
59 tx->tx_zc_checked = 0;
60 tx->tx_desc_size = size;
61
62 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
63
64 return tx;
65 }
66
67 ksock_tx_t *
ksocknal_alloc_tx_noop(__u64 cookie,int nonblk)68 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
69 {
70 ksock_tx_t *tx;
71
72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
73 if (tx == NULL) {
74 CERROR("Can't allocate noop tx desc\n");
75 return NULL;
76 }
77
78 tx->tx_conn = NULL;
79 tx->tx_lnetmsg = NULL;
80 tx->tx_kiov = NULL;
81 tx->tx_nkiov = 0;
82 tx->tx_iov = tx->tx_frags.virt.iov;
83 tx->tx_niov = 1;
84 tx->tx_nonblk = nonblk;
85
86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
87 tx->tx_msg.ksm_zc_cookies[1] = cookie;
88
89 return tx;
90 }
91
92
93 void
ksocknal_free_tx(ksock_tx_t * tx)94 ksocknal_free_tx (ksock_tx_t *tx)
95 {
96 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
97
98 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
99 /* it's a noop tx */
100 spin_lock(&ksocknal_data.ksnd_tx_lock);
101
102 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
103
104 spin_unlock(&ksocknal_data.ksnd_tx_lock);
105 } else {
106 LIBCFS_FREE(tx, tx->tx_desc_size);
107 }
108 }
109
110 static int
ksocknal_send_iov(ksock_conn_t * conn,ksock_tx_t * tx)111 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
112 {
113 struct kvec *iov = tx->tx_iov;
114 int nob;
115 int rc;
116
117 LASSERT (tx->tx_niov > 0);
118
119 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
120 rc = ksocknal_lib_send_iov(conn, tx);
121
122 if (rc <= 0) /* sent nothing? */
123 return rc;
124
125 nob = rc;
126 LASSERT (nob <= tx->tx_resid);
127 tx->tx_resid -= nob;
128
129 /* "consume" iov */
130 do {
131 LASSERT (tx->tx_niov > 0);
132
133 if (nob < (int) iov->iov_len) {
134 iov->iov_base = (void *)((char *)iov->iov_base + nob);
135 iov->iov_len -= nob;
136 return rc;
137 }
138
139 nob -= iov->iov_len;
140 tx->tx_iov = ++iov;
141 tx->tx_niov--;
142 } while (nob != 0);
143
144 return rc;
145 }
146
147 static int
ksocknal_send_kiov(ksock_conn_t * conn,ksock_tx_t * tx)148 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
149 {
150 lnet_kiov_t *kiov = tx->tx_kiov;
151 int nob;
152 int rc;
153
154 LASSERT (tx->tx_niov == 0);
155 LASSERT (tx->tx_nkiov > 0);
156
157 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
158 rc = ksocknal_lib_send_kiov(conn, tx);
159
160 if (rc <= 0) /* sent nothing? */
161 return rc;
162
163 nob = rc;
164 LASSERT (nob <= tx->tx_resid);
165 tx->tx_resid -= nob;
166
167 /* "consume" kiov */
168 do {
169 LASSERT(tx->tx_nkiov > 0);
170
171 if (nob < (int)kiov->kiov_len) {
172 kiov->kiov_offset += nob;
173 kiov->kiov_len -= nob;
174 return rc;
175 }
176
177 nob -= (int)kiov->kiov_len;
178 tx->tx_kiov = ++kiov;
179 tx->tx_nkiov--;
180 } while (nob != 0);
181
182 return rc;
183 }
184
185 static int
ksocknal_transmit(ksock_conn_t * conn,ksock_tx_t * tx)186 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
187 {
188 int rc;
189 int bufnob;
190
191 if (ksocknal_data.ksnd_stall_tx != 0) {
192 set_current_state(TASK_UNINTERRUPTIBLE);
193 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
194 }
195
196 LASSERT (tx->tx_resid != 0);
197
198 rc = ksocknal_connsock_addref(conn);
199 if (rc != 0) {
200 LASSERT (conn->ksnc_closing);
201 return -ESHUTDOWN;
202 }
203
204 do {
205 if (ksocknal_data.ksnd_enomem_tx > 0) {
206 /* testing... */
207 ksocknal_data.ksnd_enomem_tx--;
208 rc = -EAGAIN;
209 } else if (tx->tx_niov != 0) {
210 rc = ksocknal_send_iov (conn, tx);
211 } else {
212 rc = ksocknal_send_kiov (conn, tx);
213 }
214
215 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
216 if (rc > 0) /* sent something? */
217 conn->ksnc_tx_bufnob += rc; /* account it */
218
219 if (bufnob < conn->ksnc_tx_bufnob) {
220 /* allocated send buffer bytes < computed; infer
221 * something got ACKed */
222 conn->ksnc_tx_deadline =
223 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
224 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
225 conn->ksnc_tx_bufnob = bufnob;
226 mb();
227 }
228
229 if (rc <= 0) { /* Didn't write anything? */
230
231 if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
232 rc = -EAGAIN;
233
234 /* Check if EAGAIN is due to memory pressure */
235 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
236 rc = -ENOMEM;
237
238 break;
239 }
240
241 /* socket's wmem_queued now includes 'rc' bytes */
242 atomic_sub (rc, &conn->ksnc_tx_nob);
243 rc = 0;
244
245 } while (tx->tx_resid != 0);
246
247 ksocknal_connsock_decref(conn);
248 return rc;
249 }
250
251 static int
ksocknal_recv_iov(ksock_conn_t * conn)252 ksocknal_recv_iov (ksock_conn_t *conn)
253 {
254 struct kvec *iov = conn->ksnc_rx_iov;
255 int nob;
256 int rc;
257
258 LASSERT (conn->ksnc_rx_niov > 0);
259
260 /* Never touch conn->ksnc_rx_iov or change connection
261 * status inside ksocknal_lib_recv_iov */
262 rc = ksocknal_lib_recv_iov(conn);
263
264 if (rc <= 0)
265 return rc;
266
267 /* received something... */
268 nob = rc;
269
270 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
271 conn->ksnc_rx_deadline =
272 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
273 mb(); /* order with setting rx_started */
274 conn->ksnc_rx_started = 1;
275
276 conn->ksnc_rx_nob_wanted -= nob;
277 conn->ksnc_rx_nob_left -= nob;
278
279 do {
280 LASSERT (conn->ksnc_rx_niov > 0);
281
282 if (nob < (int)iov->iov_len) {
283 iov->iov_len -= nob;
284 iov->iov_base += nob;
285 return -EAGAIN;
286 }
287
288 nob -= iov->iov_len;
289 conn->ksnc_rx_iov = ++iov;
290 conn->ksnc_rx_niov--;
291 } while (nob != 0);
292
293 return rc;
294 }
295
296 static int
ksocknal_recv_kiov(ksock_conn_t * conn)297 ksocknal_recv_kiov (ksock_conn_t *conn)
298 {
299 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
300 int nob;
301 int rc;
302 LASSERT (conn->ksnc_rx_nkiov > 0);
303
304 /* Never touch conn->ksnc_rx_kiov or change connection
305 * status inside ksocknal_lib_recv_iov */
306 rc = ksocknal_lib_recv_kiov(conn);
307
308 if (rc <= 0)
309 return rc;
310
311 /* received something... */
312 nob = rc;
313
314 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
315 conn->ksnc_rx_deadline =
316 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
317 mb(); /* order with setting rx_started */
318 conn->ksnc_rx_started = 1;
319
320 conn->ksnc_rx_nob_wanted -= nob;
321 conn->ksnc_rx_nob_left -= nob;
322
323 do {
324 LASSERT (conn->ksnc_rx_nkiov > 0);
325
326 if (nob < (int) kiov->kiov_len) {
327 kiov->kiov_offset += nob;
328 kiov->kiov_len -= nob;
329 return -EAGAIN;
330 }
331
332 nob -= kiov->kiov_len;
333 conn->ksnc_rx_kiov = ++kiov;
334 conn->ksnc_rx_nkiov--;
335 } while (nob != 0);
336
337 return 1;
338 }
339
340 static int
ksocknal_receive(ksock_conn_t * conn)341 ksocknal_receive (ksock_conn_t *conn)
342 {
343 /* Return 1 on success, 0 on EOF, < 0 on error.
344 * Caller checks ksnc_rx_nob_wanted to determine
345 * progress/completion. */
346 int rc;
347
348 if (ksocknal_data.ksnd_stall_rx != 0) {
349 set_current_state(TASK_UNINTERRUPTIBLE);
350 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
351 }
352
353 rc = ksocknal_connsock_addref(conn);
354 if (rc != 0) {
355 LASSERT (conn->ksnc_closing);
356 return -ESHUTDOWN;
357 }
358
359 for (;;) {
360 if (conn->ksnc_rx_niov != 0)
361 rc = ksocknal_recv_iov (conn);
362 else
363 rc = ksocknal_recv_kiov (conn);
364
365 if (rc <= 0) {
366 /* error/EOF or partial receive */
367 if (rc == -EAGAIN) {
368 rc = 1;
369 } else if (rc == 0 && conn->ksnc_rx_started) {
370 /* EOF in the middle of a message */
371 rc = -EPROTO;
372 }
373 break;
374 }
375
376 /* Completed a fragment */
377
378 if (conn->ksnc_rx_nob_wanted == 0) {
379 rc = 1;
380 break;
381 }
382 }
383
384 ksocknal_connsock_decref(conn);
385 return rc;
386 }
387
388 void
ksocknal_tx_done(lnet_ni_t * ni,ksock_tx_t * tx)389 ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
390 {
391 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
392 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
393
394 LASSERT(ni != NULL || tx->tx_conn != NULL);
395
396 if (tx->tx_conn != NULL)
397 ksocknal_conn_decref(tx->tx_conn);
398
399 if (ni == NULL && tx->tx_conn != NULL)
400 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
401
402 ksocknal_free_tx (tx);
403 if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
404 lnet_finalize (ni, lnetmsg, rc);
405 }
406
407 void
ksocknal_txlist_done(lnet_ni_t * ni,struct list_head * txlist,int error)408 ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
409 {
410 ksock_tx_t *tx;
411
412 while (!list_empty (txlist)) {
413 tx = list_entry (txlist->next, ksock_tx_t, tx_list);
414
415 if (error && tx->tx_lnetmsg != NULL) {
416 CNETERR("Deleting packet type %d len %d %s->%s\n",
417 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
418 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
419 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
420 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
421 } else if (error) {
422 CNETERR("Deleting noop packet\n");
423 }
424
425 list_del (&tx->tx_list);
426
427 LASSERT (atomic_read(&tx->tx_refcount) == 1);
428 ksocknal_tx_done (ni, tx);
429 }
430 }
431
432 static void
ksocknal_check_zc_req(ksock_tx_t * tx)433 ksocknal_check_zc_req(ksock_tx_t *tx)
434 {
435 ksock_conn_t *conn = tx->tx_conn;
436 ksock_peer_t *peer = conn->ksnc_peer;
437
438 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
439 * to ksnp_zc_req_list if some fragment of this message should be sent
440 * zero-copy. Our peer will send an ACK containing this cookie when
441 * she has received this message to tell us we can signal completion.
442 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
443 * ksnp_zc_req_list. */
444 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
445 LASSERT (tx->tx_zc_capable);
446
447 tx->tx_zc_checked = 1;
448
449 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
450 !conn->ksnc_zc_capable)
451 return;
452
453 /* assign cookie and queue tx to pending list, it will be released when
454 * a matching ack is received. See ksocknal_handle_zcack() */
455
456 ksocknal_tx_addref(tx);
457
458 spin_lock(&peer->ksnp_lock);
459
460 /* ZC_REQ is going to be pinned to the peer */
461 tx->tx_deadline =
462 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
463
464 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
465
466 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
467
468 if (peer->ksnp_zc_next_cookie == 0)
469 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
470
471 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
472
473 spin_unlock(&peer->ksnp_lock);
474 }
475
476 static void
ksocknal_uncheck_zc_req(ksock_tx_t * tx)477 ksocknal_uncheck_zc_req(ksock_tx_t *tx)
478 {
479 ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
480
481 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
482 LASSERT(tx->tx_zc_capable);
483
484 tx->tx_zc_checked = 0;
485
486 spin_lock(&peer->ksnp_lock);
487
488 if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
489 /* Not waiting for an ACK */
490 spin_unlock(&peer->ksnp_lock);
491 return;
492 }
493
494 tx->tx_msg.ksm_zc_cookies[0] = 0;
495 list_del(&tx->tx_zc_list);
496
497 spin_unlock(&peer->ksnp_lock);
498
499 ksocknal_tx_decref(tx);
500 }
501
502 static int
ksocknal_process_transmit(ksock_conn_t * conn,ksock_tx_t * tx)503 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
504 {
505 int rc;
506
507 if (tx->tx_zc_capable && !tx->tx_zc_checked)
508 ksocknal_check_zc_req(tx);
509
510 rc = ksocknal_transmit (conn, tx);
511
512 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
513
514 if (tx->tx_resid == 0) {
515 /* Sent everything OK */
516 LASSERT (rc == 0);
517
518 return 0;
519 }
520
521 if (rc == -EAGAIN)
522 return rc;
523
524 if (rc == -ENOMEM) {
525 static int counter;
526
527 counter++; /* exponential backoff warnings */
528 if ((counter & (-counter)) == counter)
529 CWARN("%u ENOMEM tx %p (%u allocated)\n",
530 counter, conn, atomic_read(&libcfs_kmemory));
531
532 /* Queue on ksnd_enomem_conns for retry after a timeout */
533 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
534
535 /* enomem list takes over scheduler's ref... */
536 LASSERT (conn->ksnc_tx_scheduled);
537 list_add_tail(&conn->ksnc_tx_list,
538 &ksocknal_data.ksnd_enomem_conns);
539 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
540 SOCKNAL_ENOMEM_RETRY),
541 ksocknal_data.ksnd_reaper_waketime))
542 wake_up (&ksocknal_data.ksnd_reaper_waitq);
543
544 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
545 return rc;
546 }
547
548 /* Actual error */
549 LASSERT (rc < 0);
550
551 if (!conn->ksnc_closing) {
552 switch (rc) {
553 case -ECONNRESET:
554 LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
555 &conn->ksnc_ipaddr);
556 break;
557 default:
558 LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
559 &conn->ksnc_ipaddr, rc);
560 break;
561 }
562 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
563 conn, rc,
564 libcfs_id2str(conn->ksnc_peer->ksnp_id),
565 &conn->ksnc_ipaddr,
566 conn->ksnc_port);
567 }
568
569 if (tx->tx_zc_checked)
570 ksocknal_uncheck_zc_req(tx);
571
572 /* it's not an error if conn is being closed */
573 ksocknal_close_conn_and_siblings (conn,
574 (conn->ksnc_closing) ? 0 : rc);
575
576 return rc;
577 }
578
579 static void
ksocknal_launch_connection_locked(ksock_route_t * route)580 ksocknal_launch_connection_locked (ksock_route_t *route)
581 {
582
583 /* called holding write lock on ksnd_global_lock */
584
585 LASSERT (!route->ksnr_scheduled);
586 LASSERT (!route->ksnr_connecting);
587 LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
588
589 route->ksnr_scheduled = 1; /* scheduling conn for connd */
590 ksocknal_route_addref(route); /* extra ref for connd */
591
592 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
593
594 list_add_tail(&route->ksnr_connd_list,
595 &ksocknal_data.ksnd_connd_routes);
596 wake_up(&ksocknal_data.ksnd_connd_waitq);
597
598 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
599 }
600
601 void
ksocknal_launch_all_connections_locked(ksock_peer_t * peer)602 ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
603 {
604 ksock_route_t *route;
605
606 /* called holding write lock on ksnd_global_lock */
607 for (;;) {
608 /* launch any/all connections that need it */
609 route = ksocknal_find_connectable_route_locked(peer);
610 if (route == NULL)
611 return;
612
613 ksocknal_launch_connection_locked(route);
614 }
615 }
616
617 ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t * peer,ksock_tx_t * tx,int nonblk)618 ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
619 {
620 struct list_head *tmp;
621 ksock_conn_t *conn;
622 ksock_conn_t *typed = NULL;
623 ksock_conn_t *fallback = NULL;
624 int tnob = 0;
625 int fnob = 0;
626
627 list_for_each (tmp, &peer->ksnp_conns) {
628 ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
629 int nob = atomic_read(&c->ksnc_tx_nob) +
630 c->ksnc_sock->sk->sk_wmem_queued;
631 int rc;
632
633 LASSERT (!c->ksnc_closing);
634 LASSERT (c->ksnc_proto != NULL &&
635 c->ksnc_proto->pro_match_tx != NULL);
636
637 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
638
639 switch (rc) {
640 default:
641 LBUG();
642 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
643 continue;
644
645 case SOCKNAL_MATCH_YES: /* typed connection */
646 if (typed == NULL || tnob > nob ||
647 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
648 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
649 typed = c;
650 tnob = nob;
651 }
652 break;
653
654 case SOCKNAL_MATCH_MAY: /* fallback connection */
655 if (fallback == NULL || fnob > nob ||
656 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
657 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
658 fallback = c;
659 fnob = nob;
660 }
661 break;
662 }
663 }
664
665 /* prefer the typed selection */
666 conn = (typed != NULL) ? typed : fallback;
667
668 if (conn != NULL)
669 conn->ksnc_tx_last_post = cfs_time_current();
670
671 return conn;
672 }
673
674 void
ksocknal_tx_prep(ksock_conn_t * conn,ksock_tx_t * tx)675 ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
676 {
677 conn->ksnc_proto->pro_pack(tx);
678
679 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
680 ksocknal_conn_addref(conn); /* +1 ref for tx */
681 tx->tx_conn = conn;
682 }
683
684 void
ksocknal_queue_tx_locked(ksock_tx_t * tx,ksock_conn_t * conn)685 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
686 {
687 ksock_sched_t *sched = conn->ksnc_scheduler;
688 ksock_msg_t *msg = &tx->tx_msg;
689 ksock_tx_t *ztx = NULL;
690 int bufnob = 0;
691
692 /* called holding global lock (read or irq-write) and caller may
693 * not have dropped this lock between finding conn and calling me,
694 * so we don't need the {get,put}connsock dance to deref
695 * ksnc_sock... */
696 LASSERT(!conn->ksnc_closing);
697
698 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
699 libcfs_id2str(conn->ksnc_peer->ksnp_id),
700 &conn->ksnc_ipaddr,
701 conn->ksnc_port);
702
703 ksocknal_tx_prep(conn, tx);
704
705 /* Ensure the frags we've been given EXACTLY match the number of
706 * bytes we want to send. Many TCP/IP stacks disregard any total
707 * size parameters passed to them and just look at the frags.
708 *
709 * We always expect at least 1 mapped fragment containing the
710 * complete ksocknal message header. */
711 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
712 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
713 (unsigned int)tx->tx_nob);
714 LASSERT (tx->tx_niov >= 1);
715 LASSERT (tx->tx_resid == tx->tx_nob);
716
717 CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
718 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
719 KSOCK_MSG_NOOP,
720 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
721
722 /*
723 * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
724 * but they're used inside spinlocks a lot.
725 */
726 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
727 spin_lock_bh(&sched->kss_lock);
728
729 if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
730 /* First packet starts the timeout */
731 conn->ksnc_tx_deadline =
732 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
733 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
734 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
735 conn->ksnc_tx_bufnob = 0;
736 mb(); /* order with adding to tx_queue */
737 }
738
739 if (msg->ksm_type == KSOCK_MSG_NOOP) {
740 /* The packet is noop ZC ACK, try to piggyback the ack_cookie
741 * on a normal packet so I don't need to send it */
742 LASSERT (msg->ksm_zc_cookies[1] != 0);
743 LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
744
745 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
746 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
747
748 } else {
749 /* It's a normal packet - can it piggback a noop zc-ack that
750 * has been queued already? */
751 LASSERT (msg->ksm_zc_cookies[1] == 0);
752 LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
753
754 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
755 /* ztx will be released later */
756 }
757
758 if (ztx != NULL) {
759 atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
760 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
761 }
762
763 if (conn->ksnc_tx_ready && /* able to send */
764 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
765 /* +1 ref for scheduler */
766 ksocknal_conn_addref(conn);
767 list_add_tail (&conn->ksnc_tx_list,
768 &sched->kss_tx_conns);
769 conn->ksnc_tx_scheduled = 1;
770 wake_up (&sched->kss_waitq);
771 }
772
773 spin_unlock_bh(&sched->kss_lock);
774 }
775
776
777 ksock_route_t *
ksocknal_find_connectable_route_locked(ksock_peer_t * peer)778 ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
779 {
780 unsigned long now = cfs_time_current();
781 struct list_head *tmp;
782 ksock_route_t *route;
783
784 list_for_each (tmp, &peer->ksnp_routes) {
785 route = list_entry (tmp, ksock_route_t, ksnr_list);
786
787 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
788
789 if (route->ksnr_scheduled) /* connections being established */
790 continue;
791
792 /* all route types connected ? */
793 if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
794 continue;
795
796 if (!(route->ksnr_retry_interval == 0 || /* first attempt */
797 cfs_time_aftereq(now, route->ksnr_timeout))) {
798 CDEBUG(D_NET,
799 "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
800 &route->ksnr_ipaddr,
801 route->ksnr_connected,
802 route->ksnr_retry_interval,
803 cfs_duration_sec(route->ksnr_timeout - now));
804 continue;
805 }
806
807 return route;
808 }
809
810 return NULL;
811 }
812
813 ksock_route_t *
ksocknal_find_connecting_route_locked(ksock_peer_t * peer)814 ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
815 {
816 struct list_head *tmp;
817 ksock_route_t *route;
818
819 list_for_each (tmp, &peer->ksnp_routes) {
820 route = list_entry (tmp, ksock_route_t, ksnr_list);
821
822 LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
823
824 if (route->ksnr_scheduled)
825 return route;
826 }
827
828 return NULL;
829 }
830
831 int
ksocknal_launch_packet(lnet_ni_t * ni,ksock_tx_t * tx,lnet_process_id_t id)832 ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
833 {
834 ksock_peer_t *peer;
835 ksock_conn_t *conn;
836 rwlock_t *g_lock;
837 int retry;
838 int rc;
839
840 LASSERT (tx->tx_conn == NULL);
841
842 g_lock = &ksocknal_data.ksnd_global_lock;
843
844 for (retry = 0;; retry = 1) {
845 read_lock(g_lock);
846 peer = ksocknal_find_peer_locked(ni, id);
847 if (peer != NULL) {
848 if (ksocknal_find_connectable_route_locked(peer) == NULL) {
849 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
850 if (conn != NULL) {
851 /* I've got no routes that need to be
852 * connecting and I do have an actual
853 * connection... */
854 ksocknal_queue_tx_locked (tx, conn);
855 read_unlock(g_lock);
856 return 0;
857 }
858 }
859 }
860
861 /* I'll need a write lock... */
862 read_unlock(g_lock);
863
864 write_lock_bh(g_lock);
865
866 peer = ksocknal_find_peer_locked(ni, id);
867 if (peer != NULL)
868 break;
869
870 write_unlock_bh(g_lock);
871
872 if ((id.pid & LNET_PID_USERFLAG) != 0) {
873 CERROR("Refusing to create a connection to userspace process %s\n",
874 libcfs_id2str(id));
875 return -EHOSTUNREACH;
876 }
877
878 if (retry) {
879 CERROR("Can't find peer %s\n", libcfs_id2str(id));
880 return -EHOSTUNREACH;
881 }
882
883 rc = ksocknal_add_peer(ni, id,
884 LNET_NIDADDR(id.nid),
885 lnet_acceptor_port());
886 if (rc != 0) {
887 CERROR("Can't add peer %s: %d\n",
888 libcfs_id2str(id), rc);
889 return rc;
890 }
891 }
892
893 ksocknal_launch_all_connections_locked(peer);
894
895 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
896 if (conn != NULL) {
897 /* Connection exists; queue message on it */
898 ksocknal_queue_tx_locked (tx, conn);
899 write_unlock_bh(g_lock);
900 return 0;
901 }
902
903 if (peer->ksnp_accepting > 0 ||
904 ksocknal_find_connecting_route_locked (peer) != NULL) {
905 /* the message is going to be pinned to the peer */
906 tx->tx_deadline =
907 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
908
909 /* Queue the message until a connection is established */
910 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
911 write_unlock_bh(g_lock);
912 return 0;
913 }
914
915 write_unlock_bh(g_lock);
916
917 /* NB Routes may be ignored if connections to them failed recently */
918 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
919 return -EHOSTUNREACH;
920 }
921
922 int
ksocknal_send(lnet_ni_t * ni,void * private,lnet_msg_t * lntmsg)923 ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
924 {
925 int mpflag = 1;
926 int type = lntmsg->msg_type;
927 lnet_process_id_t target = lntmsg->msg_target;
928 unsigned int payload_niov = lntmsg->msg_niov;
929 struct kvec *payload_iov = lntmsg->msg_iov;
930 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
931 unsigned int payload_offset = lntmsg->msg_offset;
932 unsigned int payload_nob = lntmsg->msg_len;
933 ksock_tx_t *tx;
934 int desc_size;
935 int rc;
936
937 /* NB 'private' is different depending on what we're sending.
938 * Just ignore it... */
939
940 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
941 payload_nob, payload_niov, libcfs_id2str(target));
942
943 LASSERT (payload_nob == 0 || payload_niov > 0);
944 LASSERT (payload_niov <= LNET_MAX_IOV);
945 /* payload is either all vaddrs or all pages */
946 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
947 LASSERT (!in_interrupt ());
948
949 if (payload_iov != NULL)
950 desc_size = offsetof(ksock_tx_t,
951 tx_frags.virt.iov[1 + payload_niov]);
952 else
953 desc_size = offsetof(ksock_tx_t,
954 tx_frags.paged.kiov[payload_niov]);
955
956 if (lntmsg->msg_vmflush)
957 mpflag = cfs_memory_pressure_get_and_set();
958 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
959 if (tx == NULL) {
960 CERROR("Can't allocate tx desc type %d size %d\n",
961 type, desc_size);
962 if (lntmsg->msg_vmflush)
963 cfs_memory_pressure_restore(mpflag);
964 return -ENOMEM;
965 }
966
967 tx->tx_conn = NULL; /* set when assigned a conn */
968 tx->tx_lnetmsg = lntmsg;
969
970 if (payload_iov != NULL) {
971 tx->tx_kiov = NULL;
972 tx->tx_nkiov = 0;
973 tx->tx_iov = tx->tx_frags.virt.iov;
974 tx->tx_niov = 1 +
975 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
976 payload_niov, payload_iov,
977 payload_offset, payload_nob);
978 } else {
979 tx->tx_niov = 1;
980 tx->tx_iov = &tx->tx_frags.paged.iov;
981 tx->tx_kiov = tx->tx_frags.paged.kiov;
982 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
983 payload_niov, payload_kiov,
984 payload_offset, payload_nob);
985
986 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
987 tx->tx_zc_capable = 1;
988 }
989
990 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
991
992 /* The first fragment will be set later in pro_pack */
993 rc = ksocknal_launch_packet(ni, tx, target);
994 if (!mpflag)
995 cfs_memory_pressure_restore(mpflag);
996
997 if (rc == 0)
998 return 0;
999
1000 ksocknal_free_tx(tx);
1001 return -EIO;
1002 }
1003
1004 int
ksocknal_thread_start(int (* fn)(void * arg),void * arg,char * name)1005 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1006 {
1007 struct task_struct *task = kthread_run(fn, arg, "%s", name);
1008
1009 if (IS_ERR(task))
1010 return PTR_ERR(task);
1011
1012 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1013 ksocknal_data.ksnd_nthreads++;
1014 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1015 return 0;
1016 }
1017
1018 void
ksocknal_thread_fini(void)1019 ksocknal_thread_fini (void)
1020 {
1021 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1022 ksocknal_data.ksnd_nthreads--;
1023 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1024 }
1025
1026 int
ksocknal_new_packet(ksock_conn_t * conn,int nob_to_skip)1027 ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
1028 {
1029 static char ksocknal_slop_buffer[4096];
1030
1031 int nob;
1032 unsigned int niov;
1033 int skipped;
1034
1035 LASSERT(conn->ksnc_proto != NULL);
1036
1037 if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
1038 /* Remind the socket to ack eagerly... */
1039 ksocknal_lib_eager_ack(conn);
1040 }
1041
1042 if (nob_to_skip == 0) { /* right at next packet boundary now */
1043 conn->ksnc_rx_started = 0;
1044 mb(); /* racing with timeout thread */
1045
1046 switch (conn->ksnc_proto->pro_version) {
1047 case KSOCK_PROTO_V2:
1048 case KSOCK_PROTO_V3:
1049 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
1050 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1051 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
1052
1053 conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
1054 conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
1055 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
1056 break;
1057
1058 case KSOCK_PROTO_V1:
1059 /* Receiving bare lnet_hdr_t */
1060 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1061 conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
1062 conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
1063
1064 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1065 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1066 conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
1067 break;
1068
1069 default:
1070 LBUG ();
1071 }
1072 conn->ksnc_rx_niov = 1;
1073
1074 conn->ksnc_rx_kiov = NULL;
1075 conn->ksnc_rx_nkiov = 0;
1076 conn->ksnc_rx_csum = ~0;
1077 return 1;
1078 }
1079
1080 /* Set up to skip as much as possible now. If there's more left
1081 * (ran out of iov entries) we'll get called again */
1082
1083 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1084 conn->ksnc_rx_nob_left = nob_to_skip;
1085 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1086 skipped = 0;
1087 niov = 0;
1088
1089 do {
1090 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
1091
1092 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1093 conn->ksnc_rx_iov[niov].iov_len = nob;
1094 niov++;
1095 skipped += nob;
1096 nob_to_skip -=nob;
1097
1098 } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
1099 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
1100
1101 conn->ksnc_rx_niov = niov;
1102 conn->ksnc_rx_kiov = NULL;
1103 conn->ksnc_rx_nkiov = 0;
1104 conn->ksnc_rx_nob_wanted = skipped;
1105 return 0;
1106 }
1107
1108 static int
ksocknal_process_receive(ksock_conn_t * conn)1109 ksocknal_process_receive (ksock_conn_t *conn)
1110 {
1111 lnet_hdr_t *lhdr;
1112 lnet_process_id_t *id;
1113 int rc;
1114
1115 LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
1116
1117 /* NB: sched lock NOT held */
1118 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
1119 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1120 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1121 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1122 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
1123 again:
1124 if (conn->ksnc_rx_nob_wanted != 0) {
1125 rc = ksocknal_receive(conn);
1126
1127 if (rc <= 0) {
1128 LASSERT (rc != -EAGAIN);
1129
1130 if (rc == 0)
1131 CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
1132 conn,
1133 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1134 &conn->ksnc_ipaddr,
1135 conn->ksnc_port);
1136 else if (!conn->ksnc_closing)
1137 CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
1138 conn, rc,
1139 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1140 &conn->ksnc_ipaddr,
1141 conn->ksnc_port);
1142
1143 /* it's not an error if conn is being closed */
1144 ksocknal_close_conn_and_siblings (conn,
1145 (conn->ksnc_closing) ? 0 : rc);
1146 return (rc == 0 ? -ESHUTDOWN : rc);
1147 }
1148
1149 if (conn->ksnc_rx_nob_wanted != 0) {
1150 /* short read */
1151 return -EAGAIN;
1152 }
1153 }
1154 switch (conn->ksnc_rx_state) {
1155 case SOCKNAL_RX_KSM_HEADER:
1156 if (conn->ksnc_flip) {
1157 __swab32s(&conn->ksnc_msg.ksm_type);
1158 __swab32s(&conn->ksnc_msg.ksm_csum);
1159 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1160 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1161 }
1162
1163 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1164 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1165 CERROR("%s: Unknown message type: %x\n",
1166 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1167 conn->ksnc_msg.ksm_type);
1168 ksocknal_new_packet(conn, 0);
1169 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1170 return -EPROTO;
1171 }
1172
1173 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
1174 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1175 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1176 /* NOOP Checksum error */
1177 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1178 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1179 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1180 ksocknal_new_packet(conn, 0);
1181 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1182 return -EIO;
1183 }
1184
1185 if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
1186 __u64 cookie = 0;
1187
1188 LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
1189
1190 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1191 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1192
1193 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1194 conn->ksnc_msg.ksm_zc_cookies[1]);
1195
1196 if (rc != 0) {
1197 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
1198 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1199 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1200 ksocknal_new_packet(conn, 0);
1201 ksocknal_close_conn_and_siblings(conn, -EPROTO);
1202 return rc;
1203 }
1204 }
1205
1206 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
1207 ksocknal_new_packet (conn, 0);
1208 return 0; /* NOOP is done and just return */
1209 }
1210
1211 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1212 conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
1213 conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
1214
1215 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1216 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
1217 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
1218
1219 conn->ksnc_rx_niov = 1;
1220 conn->ksnc_rx_kiov = NULL;
1221 conn->ksnc_rx_nkiov = 0;
1222
1223 goto again; /* read lnet header now */
1224
1225 case SOCKNAL_RX_LNET_HEADER:
1226 /* unpack message header */
1227 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1228
1229 if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
1230 /* Userspace peer */
1231 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1232 id = &conn->ksnc_peer->ksnp_id;
1233
1234 /* Substitute process ID assigned at connection time */
1235 lhdr->src_pid = cpu_to_le32(id->pid);
1236 lhdr->src_nid = cpu_to_le64(id->nid);
1237 }
1238
1239 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1240 ksocknal_conn_addref(conn); /* ++ref while parsing */
1241
1242 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1243 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1244 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1245 if (rc < 0) {
1246 /* I just received garbage: give up on this conn */
1247 ksocknal_new_packet(conn, 0);
1248 ksocknal_close_conn_and_siblings (conn, rc);
1249 ksocknal_conn_decref(conn);
1250 return -EPROTO;
1251 }
1252
1253 /* I'm racing with ksocknal_recv() */
1254 LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1255 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
1256
1257 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1258 return 0;
1259
1260 /* ksocknal_recv() got called */
1261 goto again;
1262
1263 case SOCKNAL_RX_LNET_PAYLOAD:
1264 /* payload all received */
1265 rc = 0;
1266
1267 if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
1268 conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
1269 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1270 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1271 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1272 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1273 rc = -EIO;
1274 }
1275
1276 if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
1277 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1278
1279 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
1280 id = &conn->ksnc_peer->ksnp_id;
1281
1282 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1283 conn->ksnc_msg.ksm_zc_cookies[0],
1284 *ksocknal_tunables.ksnd_nonblk_zcack ||
1285 le64_to_cpu(lhdr->src_nid) != id->nid);
1286 }
1287
1288 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1289
1290 if (rc != 0) {
1291 ksocknal_new_packet(conn, 0);
1292 ksocknal_close_conn_and_siblings (conn, rc);
1293 return -EPROTO;
1294 }
1295 /* Fall through */
1296
1297 case SOCKNAL_RX_SLOP:
1298 /* starting new packet? */
1299 if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
1300 return 0; /* come back later */
1301 goto again; /* try to finish reading slop now */
1302
1303 default:
1304 break;
1305 }
1306
1307 /* Not Reached */
1308 LBUG ();
1309 return -EINVAL; /* keep gcc happy */
1310 }
1311
1312 int
ksocknal_recv(lnet_ni_t * ni,void * private,lnet_msg_t * msg,int delayed,unsigned int niov,struct kvec * iov,lnet_kiov_t * kiov,unsigned int offset,unsigned int mlen,unsigned int rlen)1313 ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
1314 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1315 unsigned int offset, unsigned int mlen, unsigned int rlen)
1316 {
1317 ksock_conn_t *conn = (ksock_conn_t *)private;
1318 ksock_sched_t *sched = conn->ksnc_scheduler;
1319
1320 LASSERT (mlen <= rlen);
1321 LASSERT (niov <= LNET_MAX_IOV);
1322
1323 conn->ksnc_cookie = msg;
1324 conn->ksnc_rx_nob_wanted = mlen;
1325 conn->ksnc_rx_nob_left = rlen;
1326
1327 if (mlen == 0 || iov != NULL) {
1328 conn->ksnc_rx_nkiov = 0;
1329 conn->ksnc_rx_kiov = NULL;
1330 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1331 conn->ksnc_rx_niov =
1332 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1333 niov, iov, offset, mlen);
1334 } else {
1335 conn->ksnc_rx_niov = 0;
1336 conn->ksnc_rx_iov = NULL;
1337 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1338 conn->ksnc_rx_nkiov =
1339 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1340 niov, kiov, offset, mlen);
1341 }
1342
1343 LASSERT (mlen ==
1344 lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1345 lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
1346
1347 LASSERT (conn->ksnc_rx_scheduled);
1348
1349 spin_lock_bh(&sched->kss_lock);
1350
1351 switch (conn->ksnc_rx_state) {
1352 case SOCKNAL_RX_PARSE_WAIT:
1353 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
1354 wake_up (&sched->kss_waitq);
1355 LASSERT (conn->ksnc_rx_ready);
1356 break;
1357
1358 case SOCKNAL_RX_PARSE:
1359 /* scheduler hasn't noticed I'm parsing yet */
1360 break;
1361 }
1362
1363 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1364
1365 spin_unlock_bh(&sched->kss_lock);
1366 ksocknal_conn_decref(conn);
1367 return 0;
1368 }
1369
1370 static inline int
ksocknal_sched_cansleep(ksock_sched_t * sched)1371 ksocknal_sched_cansleep(ksock_sched_t *sched)
1372 {
1373 int rc;
1374
1375 spin_lock_bh(&sched->kss_lock);
1376
1377 rc = !ksocknal_data.ksnd_shuttingdown &&
1378 list_empty(&sched->kss_rx_conns) &&
1379 list_empty(&sched->kss_tx_conns);
1380
1381 spin_unlock_bh(&sched->kss_lock);
1382 return rc;
1383 }
1384
ksocknal_scheduler(void * arg)1385 int ksocknal_scheduler(void *arg)
1386 {
1387 struct ksock_sched_info *info;
1388 ksock_sched_t *sched;
1389 ksock_conn_t *conn;
1390 ksock_tx_t *tx;
1391 int rc;
1392 int nloops = 0;
1393 long id = (long)arg;
1394
1395 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1396 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1397
1398 cfs_block_allsigs();
1399
1400 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
1401 if (rc != 0) {
1402 CERROR("Can't set CPT affinity to %d: %d\n",
1403 info->ksi_cpt, rc);
1404 }
1405
1406 spin_lock_bh(&sched->kss_lock);
1407
1408 while (!ksocknal_data.ksnd_shuttingdown) {
1409 int did_something = 0;
1410
1411 /* Ensure I progress everything semi-fairly */
1412
1413 if (!list_empty (&sched->kss_rx_conns)) {
1414 conn = list_entry(sched->kss_rx_conns.next,
1415 ksock_conn_t, ksnc_rx_list);
1416 list_del(&conn->ksnc_rx_list);
1417
1418 LASSERT(conn->ksnc_rx_scheduled);
1419 LASSERT(conn->ksnc_rx_ready);
1420
1421 /* clear rx_ready in case receive isn't complete.
1422 * Do it BEFORE we call process_recv, since
1423 * data_ready can set it any time after we release
1424 * kss_lock. */
1425 conn->ksnc_rx_ready = 0;
1426 spin_unlock_bh(&sched->kss_lock);
1427
1428 rc = ksocknal_process_receive(conn);
1429
1430 spin_lock_bh(&sched->kss_lock);
1431
1432 /* I'm the only one that can clear this flag */
1433 LASSERT(conn->ksnc_rx_scheduled);
1434
1435 /* Did process_receive get everything it wanted? */
1436 if (rc == 0)
1437 conn->ksnc_rx_ready = 1;
1438
1439 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
1440 /* Conn blocked waiting for ksocknal_recv()
1441 * I change its state (under lock) to signal
1442 * it can be rescheduled */
1443 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1444 } else if (conn->ksnc_rx_ready) {
1445 /* reschedule for rx */
1446 list_add_tail (&conn->ksnc_rx_list,
1447 &sched->kss_rx_conns);
1448 } else {
1449 conn->ksnc_rx_scheduled = 0;
1450 /* drop my ref */
1451 ksocknal_conn_decref(conn);
1452 }
1453
1454 did_something = 1;
1455 }
1456
1457 if (!list_empty (&sched->kss_tx_conns)) {
1458 LIST_HEAD (zlist);
1459
1460 if (!list_empty(&sched->kss_zombie_noop_txs)) {
1461 list_add(&zlist,
1462 &sched->kss_zombie_noop_txs);
1463 list_del_init(&sched->kss_zombie_noop_txs);
1464 }
1465
1466 conn = list_entry(sched->kss_tx_conns.next,
1467 ksock_conn_t, ksnc_tx_list);
1468 list_del (&conn->ksnc_tx_list);
1469
1470 LASSERT(conn->ksnc_tx_scheduled);
1471 LASSERT(conn->ksnc_tx_ready);
1472 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1473
1474 tx = list_entry(conn->ksnc_tx_queue.next,
1475 ksock_tx_t, tx_list);
1476
1477 if (conn->ksnc_tx_carrier == tx)
1478 ksocknal_next_tx_carrier(conn);
1479
1480 /* dequeue now so empty list => more to send */
1481 list_del(&tx->tx_list);
1482
1483 /* Clear tx_ready in case send isn't complete. Do
1484 * it BEFORE we call process_transmit, since
1485 * write_space can set it any time after we release
1486 * kss_lock. */
1487 conn->ksnc_tx_ready = 0;
1488 spin_unlock_bh(&sched->kss_lock);
1489
1490 if (!list_empty(&zlist)) {
1491 /* free zombie noop txs, it's fast because
1492 * noop txs are just put in freelist */
1493 ksocknal_txlist_done(NULL, &zlist, 0);
1494 }
1495
1496 rc = ksocknal_process_transmit(conn, tx);
1497
1498 if (rc == -ENOMEM || rc == -EAGAIN) {
1499 /* Incomplete send: replace tx on HEAD of tx_queue */
1500 spin_lock_bh(&sched->kss_lock);
1501 list_add(&tx->tx_list,
1502 &conn->ksnc_tx_queue);
1503 } else {
1504 /* Complete send; tx -ref */
1505 ksocknal_tx_decref(tx);
1506
1507 spin_lock_bh(&sched->kss_lock);
1508 /* assume space for more */
1509 conn->ksnc_tx_ready = 1;
1510 }
1511
1512 if (rc == -ENOMEM) {
1513 /* Do nothing; after a short timeout, this
1514 * conn will be reposted on kss_tx_conns. */
1515 } else if (conn->ksnc_tx_ready &&
1516 !list_empty (&conn->ksnc_tx_queue)) {
1517 /* reschedule for tx */
1518 list_add_tail (&conn->ksnc_tx_list,
1519 &sched->kss_tx_conns);
1520 } else {
1521 conn->ksnc_tx_scheduled = 0;
1522 /* drop my ref */
1523 ksocknal_conn_decref(conn);
1524 }
1525
1526 did_something = 1;
1527 }
1528 if (!did_something || /* nothing to do */
1529 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1530 spin_unlock_bh(&sched->kss_lock);
1531
1532 nloops = 0;
1533
1534 if (!did_something) { /* wait for something to do */
1535 rc = wait_event_interruptible_exclusive(
1536 sched->kss_waitq,
1537 !ksocknal_sched_cansleep(sched));
1538 LASSERT (rc == 0);
1539 } else {
1540 cond_resched();
1541 }
1542
1543 spin_lock_bh(&sched->kss_lock);
1544 }
1545 }
1546
1547 spin_unlock_bh(&sched->kss_lock);
1548 ksocknal_thread_fini();
1549 return 0;
1550 }
1551
1552 /*
1553 * Add connection to kss_rx_conns of scheduler
1554 * and wakeup the scheduler.
1555 */
ksocknal_read_callback(ksock_conn_t * conn)1556 void ksocknal_read_callback (ksock_conn_t *conn)
1557 {
1558 ksock_sched_t *sched;
1559
1560 sched = conn->ksnc_scheduler;
1561
1562 spin_lock_bh(&sched->kss_lock);
1563
1564 conn->ksnc_rx_ready = 1;
1565
1566 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
1567 list_add_tail(&conn->ksnc_rx_list,
1568 &sched->kss_rx_conns);
1569 conn->ksnc_rx_scheduled = 1;
1570 /* extra ref for scheduler */
1571 ksocknal_conn_addref(conn);
1572
1573 wake_up (&sched->kss_waitq);
1574 }
1575 spin_unlock_bh(&sched->kss_lock);
1576 }
1577
1578 /*
1579 * Add connection to kss_tx_conns of scheduler
1580 * and wakeup the scheduler.
1581 */
ksocknal_write_callback(ksock_conn_t * conn)1582 void ksocknal_write_callback (ksock_conn_t *conn)
1583 {
1584 ksock_sched_t *sched;
1585
1586 sched = conn->ksnc_scheduler;
1587
1588 spin_lock_bh(&sched->kss_lock);
1589
1590 conn->ksnc_tx_ready = 1;
1591
1592 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1593 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
1594 list_add_tail (&conn->ksnc_tx_list,
1595 &sched->kss_tx_conns);
1596 conn->ksnc_tx_scheduled = 1;
1597 /* extra ref for scheduler */
1598 ksocknal_conn_addref(conn);
1599
1600 wake_up (&sched->kss_waitq);
1601 }
1602
1603 spin_unlock_bh(&sched->kss_lock);
1604 }
1605
1606 static ksock_proto_t *
ksocknal_parse_proto_version(ksock_hello_msg_t * hello)1607 ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
1608 {
1609 __u32 version = 0;
1610
1611 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1612 version = hello->kshm_version;
1613 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1614 version = __swab32(hello->kshm_version);
1615
1616 if (version != 0) {
1617 #if SOCKNAL_VERSION_DEBUG
1618 if (*ksocknal_tunables.ksnd_protocol == 1)
1619 return NULL;
1620
1621 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1622 version == KSOCK_PROTO_V3)
1623 return NULL;
1624 #endif
1625 if (version == KSOCK_PROTO_V2)
1626 return &ksocknal_protocol_v2x;
1627
1628 if (version == KSOCK_PROTO_V3)
1629 return &ksocknal_protocol_v3x;
1630
1631 return NULL;
1632 }
1633
1634 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1635 lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
1636
1637 CLASSERT (sizeof (lnet_magicversion_t) ==
1638 offsetof (ksock_hello_msg_t, kshm_src_nid));
1639
1640 if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
1641 hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
1642 return &ksocknal_protocol_v1x;
1643 }
1644
1645 return NULL;
1646 }
1647
1648 int
ksocknal_send_hello(lnet_ni_t * ni,ksock_conn_t * conn,lnet_nid_t peer_nid,ksock_hello_msg_t * hello)1649 ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1650 lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
1651 {
1652 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
1653 ksock_net_t *net = (ksock_net_t *)ni->ni_data;
1654
1655 LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
1656
1657 /* rely on caller to hold a ref on socket so it wouldn't disappear */
1658 LASSERT (conn->ksnc_proto != NULL);
1659
1660 hello->kshm_src_nid = ni->ni_nid;
1661 hello->kshm_dst_nid = peer_nid;
1662 hello->kshm_src_pid = the_lnet.ln_pid;
1663
1664 hello->kshm_src_incarnation = net->ksnn_incarnation;
1665 hello->kshm_ctype = conn->ksnc_type;
1666
1667 return conn->ksnc_proto->pro_send_hello(conn, hello);
1668 }
1669
1670 static int
ksocknal_invert_type(int type)1671 ksocknal_invert_type(int type)
1672 {
1673 switch (type) {
1674 case SOCKLND_CONN_ANY:
1675 case SOCKLND_CONN_CONTROL:
1676 return type;
1677 case SOCKLND_CONN_BULK_IN:
1678 return SOCKLND_CONN_BULK_OUT;
1679 case SOCKLND_CONN_BULK_OUT:
1680 return SOCKLND_CONN_BULK_IN;
1681 default:
1682 return SOCKLND_CONN_NONE;
1683 }
1684 }
1685
1686 int
ksocknal_recv_hello(lnet_ni_t * ni,ksock_conn_t * conn,ksock_hello_msg_t * hello,lnet_process_id_t * peerid,__u64 * incarnation)1687 ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
1688 ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
1689 __u64 *incarnation)
1690 {
1691 /* Return < 0 fatal error
1692 * 0 success
1693 * EALREADY lost connection race
1694 * EPROTO protocol version mismatch
1695 */
1696 struct socket *sock = conn->ksnc_sock;
1697 int active = (conn->ksnc_proto != NULL);
1698 int timeout;
1699 int proto_match;
1700 int rc;
1701 ksock_proto_t *proto;
1702 lnet_process_id_t recv_id;
1703
1704 /* socket type set on active connections - not set on passive */
1705 LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
1706
1707 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1708 lnet_acceptor_timeout();
1709
1710 rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
1711 if (rc != 0) {
1712 CERROR("Error %d reading HELLO from %pI4h\n",
1713 rc, &conn->ksnc_ipaddr);
1714 LASSERT (rc < 0);
1715 return rc;
1716 }
1717
1718 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1719 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
1720 hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
1721 /* Unexpected magic! */
1722 CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
1723 __cpu_to_le32 (hello->kshm_magic),
1724 LNET_PROTO_TCP_MAGIC,
1725 &conn->ksnc_ipaddr);
1726 return -EPROTO;
1727 }
1728
1729 rc = libcfs_sock_read(sock, &hello->kshm_version,
1730 sizeof(hello->kshm_version), timeout);
1731 if (rc != 0) {
1732 CERROR("Error %d reading HELLO from %pI4h\n",
1733 rc, &conn->ksnc_ipaddr);
1734 LASSERT (rc < 0);
1735 return rc;
1736 }
1737
1738 proto = ksocknal_parse_proto_version(hello);
1739 if (proto == NULL) {
1740 if (!active) {
1741 /* unknown protocol from peer, tell peer my protocol */
1742 conn->ksnc_proto = &ksocknal_protocol_v3x;
1743 #if SOCKNAL_VERSION_DEBUG
1744 if (*ksocknal_tunables.ksnd_protocol == 2)
1745 conn->ksnc_proto = &ksocknal_protocol_v2x;
1746 else if (*ksocknal_tunables.ksnd_protocol == 1)
1747 conn->ksnc_proto = &ksocknal_protocol_v1x;
1748 #endif
1749 hello->kshm_nips = 0;
1750 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1751 }
1752
1753 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1754 conn->ksnc_proto->pro_version,
1755 &conn->ksnc_ipaddr);
1756
1757 return -EPROTO;
1758 }
1759
1760 proto_match = (conn->ksnc_proto == proto);
1761 conn->ksnc_proto = proto;
1762
1763 /* receive the rest of hello message anyway */
1764 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
1765 if (rc != 0) {
1766 CERROR("Error %d reading or checking hello from from %pI4h\n",
1767 rc, &conn->ksnc_ipaddr);
1768 LASSERT (rc < 0);
1769 return rc;
1770 }
1771
1772 *incarnation = hello->kshm_src_incarnation;
1773
1774 if (hello->kshm_src_nid == LNET_NID_ANY) {
1775 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1776 &conn->ksnc_ipaddr);
1777 return -EPROTO;
1778 }
1779
1780 if (!active &&
1781 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1782 /* Userspace NAL assigns peer process ID from socket */
1783 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1784 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1785 } else {
1786 recv_id.nid = hello->kshm_src_nid;
1787 recv_id.pid = hello->kshm_src_pid;
1788 }
1789
1790 if (!active) {
1791 *peerid = recv_id;
1792
1793 /* peer determines type */
1794 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1795 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
1796 CERROR("Unexpected type %d from %s ip %pI4h\n",
1797 hello->kshm_ctype, libcfs_id2str(*peerid),
1798 &conn->ksnc_ipaddr);
1799 return -EPROTO;
1800 }
1801
1802 return 0;
1803 }
1804
1805 if (peerid->pid != recv_id.pid ||
1806 peerid->nid != recv_id.nid) {
1807 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
1808 libcfs_id2str(*peerid),
1809 &conn->ksnc_ipaddr,
1810 libcfs_id2str(recv_id));
1811 return -EPROTO;
1812 }
1813
1814 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1815 /* Possible protocol mismatch or I lost the connection race */
1816 return proto_match ? EALREADY : EPROTO;
1817 }
1818
1819 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
1820 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
1821 conn->ksnc_type, libcfs_id2str(*peerid),
1822 &conn->ksnc_ipaddr,
1823 hello->kshm_ctype);
1824 return -EPROTO;
1825 }
1826
1827 return 0;
1828 }
1829
1830 static int
ksocknal_connect(ksock_route_t * route)1831 ksocknal_connect (ksock_route_t *route)
1832 {
1833 LIST_HEAD (zombies);
1834 ksock_peer_t *peer = route->ksnr_peer;
1835 int type;
1836 int wanted;
1837 struct socket *sock;
1838 unsigned long deadline;
1839 int retry_later = 0;
1840 int rc = 0;
1841
1842 deadline = cfs_time_add(cfs_time_current(),
1843 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1844
1845 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1846
1847 LASSERT (route->ksnr_scheduled);
1848 LASSERT (!route->ksnr_connecting);
1849
1850 route->ksnr_connecting = 1;
1851
1852 for (;;) {
1853 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1854
1855 /* stop connecting if peer/route got closed under me, or
1856 * route got connected while queued */
1857 if (peer->ksnp_closing || route->ksnr_deleted ||
1858 wanted == 0) {
1859 retry_later = 0;
1860 break;
1861 }
1862
1863 /* reschedule if peer is connecting to me */
1864 if (peer->ksnp_accepting > 0) {
1865 CDEBUG(D_NET,
1866 "peer %s(%d) already connecting to me, retry later.\n",
1867 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
1868 retry_later = 1;
1869 }
1870
1871 if (retry_later) /* needs reschedule */
1872 break;
1873
1874 if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
1875 type = SOCKLND_CONN_ANY;
1876 } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
1877 type = SOCKLND_CONN_CONTROL;
1878 } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
1879 type = SOCKLND_CONN_BULK_IN;
1880 } else {
1881 LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
1882 type = SOCKLND_CONN_BULK_OUT;
1883 }
1884
1885 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1886
1887 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1888 rc = -ETIMEDOUT;
1889 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1890 route->ksnr_ipaddr,
1891 route->ksnr_port);
1892 goto failed;
1893 }
1894
1895 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1896 route->ksnr_myipaddr,
1897 route->ksnr_ipaddr, route->ksnr_port);
1898 if (rc != 0)
1899 goto failed;
1900
1901 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1902 if (rc < 0) {
1903 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1904 route->ksnr_ipaddr,
1905 route->ksnr_port);
1906 goto failed;
1907 }
1908
1909 /* A +ve RC means I have to retry because I lost the connection
1910 * race or I have to renegotiate protocol version */
1911 retry_later = (rc != 0);
1912 if (retry_later)
1913 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1914 libcfs_nid2str(peer->ksnp_id.nid));
1915
1916 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1917 }
1918
1919 route->ksnr_scheduled = 0;
1920 route->ksnr_connecting = 0;
1921
1922 if (retry_later) {
1923 /* re-queue for attention; this frees me up to handle
1924 * the peer's incoming connection request */
1925
1926 if (rc == EALREADY ||
1927 (rc == 0 && peer->ksnp_accepting > 0)) {
1928 /* We want to introduce a delay before next
1929 * attempt to connect if we lost conn race,
1930 * but the race is resolved quickly usually,
1931 * so min_reconnectms should be good heuristic */
1932 route->ksnr_retry_interval =
1933 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
1934 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1935 route->ksnr_retry_interval);
1936 }
1937
1938 ksocknal_launch_connection_locked(route);
1939 }
1940
1941 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1942 return retry_later;
1943
1944 failed:
1945 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1946
1947 route->ksnr_scheduled = 0;
1948 route->ksnr_connecting = 0;
1949
1950 /* This is a retry rather than a new connection */
1951 route->ksnr_retry_interval *= 2;
1952 route->ksnr_retry_interval =
1953 max(route->ksnr_retry_interval,
1954 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
1955 route->ksnr_retry_interval =
1956 min(route->ksnr_retry_interval,
1957 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
1958
1959 LASSERT (route->ksnr_retry_interval != 0);
1960 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1961 route->ksnr_retry_interval);
1962
1963 if (!list_empty(&peer->ksnp_tx_queue) &&
1964 peer->ksnp_accepting == 0 &&
1965 ksocknal_find_connecting_route_locked(peer) == NULL) {
1966 ksock_conn_t *conn;
1967
1968 /* ksnp_tx_queue is queued on a conn on successful
1969 * connection for V1.x and V2.x */
1970 if (!list_empty (&peer->ksnp_conns)) {
1971 conn = list_entry(peer->ksnp_conns.next,
1972 ksock_conn_t, ksnc_list);
1973 LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
1974 }
1975
1976 /* take all the blocked packets while I've got the lock and
1977 * complete below... */
1978 list_splice_init(&peer->ksnp_tx_queue, &zombies);
1979 }
1980
1981 #if 0 /* irrelevant with only eager routes */
1982 if (!route->ksnr_deleted) {
1983 /* make this route least-favourite for re-selection */
1984 list_del(&route->ksnr_list);
1985 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1986 }
1987 #endif
1988 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1989
1990 ksocknal_peer_failed(peer);
1991 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
1992 return 0;
1993 }
1994
1995 /*
1996 * check whether we need to create more connds.
1997 * It will try to create new thread if it's necessary, @timeout can
1998 * be updated if failed to create, so caller wouldn't keep try while
1999 * running out of resource.
2000 */
2001 static int
ksocknal_connd_check_start(long sec,long * timeout)2002 ksocknal_connd_check_start(long sec, long *timeout)
2003 {
2004 char name[16];
2005 int rc;
2006 int total = ksocknal_data.ksnd_connd_starting +
2007 ksocknal_data.ksnd_connd_running;
2008
2009 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2010 /* still in initializing */
2011 return 0;
2012 }
2013
2014 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2015 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
2016 /* can't create more connd, or still have enough
2017 * threads to handle more connecting */
2018 return 0;
2019 }
2020
2021 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2022 /* no pending connecting request */
2023 return 0;
2024 }
2025
2026 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2027 /* may run out of resource, retry later */
2028 *timeout = cfs_time_seconds(1);
2029 return 0;
2030 }
2031
2032 if (ksocknal_data.ksnd_connd_starting > 0) {
2033 /* serialize starting to avoid flood */
2034 return 0;
2035 }
2036
2037 ksocknal_data.ksnd_connd_starting_stamp = sec;
2038 ksocknal_data.ksnd_connd_starting++;
2039 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2040
2041 /* NB: total is the next id */
2042 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2043 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2044
2045 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2046 if (rc == 0)
2047 return 1;
2048
2049 /* we tried ... */
2050 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2051 ksocknal_data.ksnd_connd_starting--;
2052 ksocknal_data.ksnd_connd_failed_stamp = get_seconds();
2053
2054 return 1;
2055 }
2056
2057 /*
2058 * check whether current thread can exit, it will return 1 if there are too
2059 * many threads and no creating in past 120 seconds.
2060 * Also, this function may update @timeout to make caller come back
2061 * again to recheck these conditions.
2062 */
2063 static int
ksocknal_connd_check_stop(long sec,long * timeout)2064 ksocknal_connd_check_stop(long sec, long *timeout)
2065 {
2066 int val;
2067
2068 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2069 /* still in initializing */
2070 return 0;
2071 }
2072
2073 if (ksocknal_data.ksnd_connd_starting > 0) {
2074 /* in progress of starting new thread */
2075 return 0;
2076 }
2077
2078 if (ksocknal_data.ksnd_connd_running <=
2079 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2080 return 0;
2081 }
2082
2083 /* created thread in past 120 seconds? */
2084 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2085 SOCKNAL_CONND_TIMEOUT - sec);
2086
2087 *timeout = (val > 0) ? cfs_time_seconds(val) :
2088 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2089 if (val > 0)
2090 return 0;
2091
2092 /* no creating in past 120 seconds */
2093
2094 return ksocknal_data.ksnd_connd_running >
2095 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2096 }
2097
2098 /* Go through connd_routes queue looking for a route that we can process
2099 * right now, @timeout_p can be updated if we need to come back later */
2100 static ksock_route_t *
ksocknal_connd_get_route_locked(signed long * timeout_p)2101 ksocknal_connd_get_route_locked(signed long *timeout_p)
2102 {
2103 ksock_route_t *route;
2104 unsigned long now;
2105
2106 now = cfs_time_current();
2107
2108 /* connd_routes can contain both pending and ordinary routes */
2109 list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
2110 ksnr_connd_list) {
2111
2112 if (route->ksnr_retry_interval == 0 ||
2113 cfs_time_aftereq(now, route->ksnr_timeout))
2114 return route;
2115
2116 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2117 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2118 *timeout_p = (int)(route->ksnr_timeout - now);
2119 }
2120
2121 return NULL;
2122 }
2123
2124 int
ksocknal_connd(void * arg)2125 ksocknal_connd (void *arg)
2126 {
2127 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
2128 ksock_connreq_t *cr;
2129 wait_queue_t wait;
2130 int nloops = 0;
2131 int cons_retry = 0;
2132
2133 cfs_block_allsigs ();
2134
2135 init_waitqueue_entry(&wait, current);
2136
2137 spin_lock_bh(connd_lock);
2138
2139 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2140 ksocknal_data.ksnd_connd_starting--;
2141 ksocknal_data.ksnd_connd_running++;
2142
2143 while (!ksocknal_data.ksnd_shuttingdown) {
2144 ksock_route_t *route = NULL;
2145 long sec = get_seconds();
2146 long timeout = MAX_SCHEDULE_TIMEOUT;
2147 int dropped_lock = 0;
2148
2149 if (ksocknal_connd_check_stop(sec, &timeout)) {
2150 /* wakeup another one to check stop */
2151 wake_up(&ksocknal_data.ksnd_connd_waitq);
2152 break;
2153 }
2154
2155 if (ksocknal_connd_check_start(sec, &timeout)) {
2156 /* created new thread */
2157 dropped_lock = 1;
2158 }
2159
2160 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2161 /* Connection accepted by the listener */
2162 cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
2163 next, ksock_connreq_t, ksncr_list);
2164
2165 list_del(&cr->ksncr_list);
2166 spin_unlock_bh(connd_lock);
2167 dropped_lock = 1;
2168
2169 ksocknal_create_conn(cr->ksncr_ni, NULL,
2170 cr->ksncr_sock, SOCKLND_CONN_NONE);
2171 lnet_ni_decref(cr->ksncr_ni);
2172 LIBCFS_FREE(cr, sizeof(*cr));
2173
2174 spin_lock_bh(connd_lock);
2175 }
2176
2177 /* Only handle an outgoing connection request if there
2178 * is a thread left to handle incoming connections and
2179 * create new connd */
2180 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2181 ksocknal_data.ksnd_connd_running) {
2182 route = ksocknal_connd_get_route_locked(&timeout);
2183 }
2184 if (route != NULL) {
2185 list_del (&route->ksnr_connd_list);
2186 ksocknal_data.ksnd_connd_connecting++;
2187 spin_unlock_bh(connd_lock);
2188 dropped_lock = 1;
2189
2190 if (ksocknal_connect(route)) {
2191 /* consecutive retry */
2192 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
2193 CWARN("massive consecutive re-connecting to %pI4h\n",
2194 &route->ksnr_ipaddr);
2195 cons_retry = 0;
2196 }
2197 } else {
2198 cons_retry = 0;
2199 }
2200
2201 ksocknal_route_decref(route);
2202
2203 spin_lock_bh(connd_lock);
2204 ksocknal_data.ksnd_connd_connecting--;
2205 }
2206
2207 if (dropped_lock) {
2208 if (++nloops < SOCKNAL_RESCHED)
2209 continue;
2210 spin_unlock_bh(connd_lock);
2211 nloops = 0;
2212 cond_resched();
2213 spin_lock_bh(connd_lock);
2214 continue;
2215 }
2216
2217 /* Nothing to do for 'timeout' */
2218 set_current_state(TASK_INTERRUPTIBLE);
2219 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2220 spin_unlock_bh(connd_lock);
2221
2222 nloops = 0;
2223 schedule_timeout(timeout);
2224
2225 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2226 spin_lock_bh(connd_lock);
2227 }
2228 ksocknal_data.ksnd_connd_running--;
2229 spin_unlock_bh(connd_lock);
2230
2231 ksocknal_thread_fini();
2232 return 0;
2233 }
2234
2235 static ksock_conn_t *
ksocknal_find_timed_out_conn(ksock_peer_t * peer)2236 ksocknal_find_timed_out_conn (ksock_peer_t *peer)
2237 {
2238 /* We're called with a shared lock on ksnd_global_lock */
2239 ksock_conn_t *conn;
2240 struct list_head *ctmp;
2241
2242 list_for_each (ctmp, &peer->ksnp_conns) {
2243 int error;
2244 conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
2245
2246 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
2247 LASSERT (!conn->ksnc_closing);
2248
2249 /* SOCK_ERROR will reset error code of socket in
2250 * some platform (like Darwin8.x) */
2251 error = conn->ksnc_sock->sk->sk_err;
2252 if (error != 0) {
2253 ksocknal_conn_addref(conn);
2254
2255 switch (error) {
2256 case ECONNRESET:
2257 CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
2258 libcfs_id2str(peer->ksnp_id),
2259 &conn->ksnc_ipaddr,
2260 conn->ksnc_port);
2261 break;
2262 case ETIMEDOUT:
2263 CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
2264 libcfs_id2str(peer->ksnp_id),
2265 &conn->ksnc_ipaddr,
2266 conn->ksnc_port);
2267 break;
2268 default:
2269 CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
2270 error,
2271 libcfs_id2str(peer->ksnp_id),
2272 &conn->ksnc_ipaddr,
2273 conn->ksnc_port);
2274 break;
2275 }
2276
2277 return conn;
2278 }
2279
2280 if (conn->ksnc_rx_started &&
2281 cfs_time_aftereq(cfs_time_current(),
2282 conn->ksnc_rx_deadline)) {
2283 /* Timed out incomplete incoming message */
2284 ksocknal_conn_addref(conn);
2285 CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
2286 libcfs_id2str(peer->ksnp_id),
2287 &conn->ksnc_ipaddr,
2288 conn->ksnc_port,
2289 conn->ksnc_rx_state,
2290 conn->ksnc_rx_nob_wanted,
2291 conn->ksnc_rx_nob_left);
2292 return conn;
2293 }
2294
2295 if ((!list_empty(&conn->ksnc_tx_queue) ||
2296 conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
2297 cfs_time_aftereq(cfs_time_current(),
2298 conn->ksnc_tx_deadline)) {
2299 /* Timed out messages queued for sending or
2300 * buffered in the socket's send buffer */
2301 ksocknal_conn_addref(conn);
2302 CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
2303 libcfs_id2str(peer->ksnp_id),
2304 &conn->ksnc_ipaddr,
2305 conn->ksnc_port);
2306 return conn;
2307 }
2308 }
2309
2310 return NULL;
2311 }
2312
2313 static inline void
ksocknal_flush_stale_txs(ksock_peer_t * peer)2314 ksocknal_flush_stale_txs(ksock_peer_t *peer)
2315 {
2316 ksock_tx_t *tx;
2317 LIST_HEAD (stale_txs);
2318
2319 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2320
2321 while (!list_empty (&peer->ksnp_tx_queue)) {
2322 tx = list_entry (peer->ksnp_tx_queue.next,
2323 ksock_tx_t, tx_list);
2324
2325 if (!cfs_time_aftereq(cfs_time_current(),
2326 tx->tx_deadline))
2327 break;
2328
2329 list_del (&tx->tx_list);
2330 list_add_tail (&tx->tx_list, &stale_txs);
2331 }
2332
2333 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2334
2335 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2336 }
2337
2338 static int
ksocknal_send_keepalive_locked(ksock_peer_t * peer)2339 ksocknal_send_keepalive_locked(ksock_peer_t *peer)
2340 {
2341 ksock_sched_t *sched;
2342 ksock_conn_t *conn;
2343 ksock_tx_t *tx;
2344
2345 if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
2346 return 0;
2347
2348 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2349 return 0;
2350
2351 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
2352 time_before(cfs_time_current(),
2353 cfs_time_add(peer->ksnp_last_alive,
2354 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
2355 return 0;
2356
2357 if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
2358 return 0;
2359
2360 /* retry 10 secs later, so we wouldn't put pressure
2361 * on this peer if we failed to send keepalive this time */
2362 peer->ksnp_send_keepalive = cfs_time_shift(10);
2363
2364 conn = ksocknal_find_conn_locked(peer, NULL, 1);
2365 if (conn != NULL) {
2366 sched = conn->ksnc_scheduler;
2367
2368 spin_lock_bh(&sched->kss_lock);
2369 if (!list_empty(&conn->ksnc_tx_queue)) {
2370 spin_unlock_bh(&sched->kss_lock);
2371 /* there is an queued ACK, don't need keepalive */
2372 return 0;
2373 }
2374
2375 spin_unlock_bh(&sched->kss_lock);
2376 }
2377
2378 read_unlock(&ksocknal_data.ksnd_global_lock);
2379
2380 /* cookie = 1 is reserved for keepalive PING */
2381 tx = ksocknal_alloc_tx_noop(1, 1);
2382 if (tx == NULL) {
2383 read_lock(&ksocknal_data.ksnd_global_lock);
2384 return -ENOMEM;
2385 }
2386
2387 if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
2388 read_lock(&ksocknal_data.ksnd_global_lock);
2389 return 1;
2390 }
2391
2392 ksocknal_free_tx(tx);
2393 read_lock(&ksocknal_data.ksnd_global_lock);
2394
2395 return -EIO;
2396 }
2397
2398
2399 static void
ksocknal_check_peer_timeouts(int idx)2400 ksocknal_check_peer_timeouts (int idx)
2401 {
2402 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
2403 ksock_peer_t *peer;
2404 ksock_conn_t *conn;
2405 ksock_tx_t *tx;
2406
2407 again:
2408 /* NB. We expect to have a look at all the peers and not find any
2409 * connections to time out, so we just use a shared lock while we
2410 * take a look... */
2411 read_lock(&ksocknal_data.ksnd_global_lock);
2412
2413 list_for_each_entry(peer, peers, ksnp_list) {
2414 unsigned long deadline = 0;
2415 int resid = 0;
2416 int n = 0;
2417
2418 if (ksocknal_send_keepalive_locked(peer) != 0) {
2419 read_unlock(&ksocknal_data.ksnd_global_lock);
2420 goto again;
2421 }
2422
2423 conn = ksocknal_find_timed_out_conn (peer);
2424
2425 if (conn != NULL) {
2426 read_unlock(&ksocknal_data.ksnd_global_lock);
2427
2428 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2429
2430 /* NB we won't find this one again, but we can't
2431 * just proceed with the next peer, since we dropped
2432 * ksnd_global_lock and it might be dead already! */
2433 ksocknal_conn_decref(conn);
2434 goto again;
2435 }
2436
2437 /* we can't process stale txs right here because we're
2438 * holding only shared lock */
2439 if (!list_empty (&peer->ksnp_tx_queue)) {
2440 ksock_tx_t *tx =
2441 list_entry (peer->ksnp_tx_queue.next,
2442 ksock_tx_t, tx_list);
2443
2444 if (cfs_time_aftereq(cfs_time_current(),
2445 tx->tx_deadline)) {
2446
2447 ksocknal_peer_addref(peer);
2448 read_unlock(&ksocknal_data.ksnd_global_lock);
2449
2450 ksocknal_flush_stale_txs(peer);
2451
2452 ksocknal_peer_decref(peer);
2453 goto again;
2454 }
2455 }
2456
2457 if (list_empty(&peer->ksnp_zc_req_list))
2458 continue;
2459
2460 spin_lock(&peer->ksnp_lock);
2461 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2462 if (!cfs_time_aftereq(cfs_time_current(),
2463 tx->tx_deadline))
2464 break;
2465 /* ignore the TX if connection is being closed */
2466 if (tx->tx_conn->ksnc_closing)
2467 continue;
2468 n++;
2469 }
2470
2471 if (n == 0) {
2472 spin_unlock(&peer->ksnp_lock);
2473 continue;
2474 }
2475
2476 tx = list_entry(peer->ksnp_zc_req_list.next,
2477 ksock_tx_t, tx_zc_list);
2478 deadline = tx->tx_deadline;
2479 resid = tx->tx_resid;
2480 conn = tx->tx_conn;
2481 ksocknal_conn_addref(conn);
2482
2483 spin_unlock(&peer->ksnp_lock);
2484 read_unlock(&ksocknal_data.ksnd_global_lock);
2485
2486 CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
2487 n, libcfs_nid2str(peer->ksnp_id.nid), tx,
2488 cfs_duration_sec(cfs_time_current() - deadline),
2489 resid, conn->ksnc_sock->sk->sk_wmem_queued);
2490
2491 ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
2492 ksocknal_conn_decref(conn);
2493 goto again;
2494 }
2495
2496 read_unlock(&ksocknal_data.ksnd_global_lock);
2497 }
2498
2499 int
ksocknal_reaper(void * arg)2500 ksocknal_reaper (void *arg)
2501 {
2502 wait_queue_t wait;
2503 ksock_conn_t *conn;
2504 ksock_sched_t *sched;
2505 struct list_head enomem_conns;
2506 int nenomem_conns;
2507 long timeout;
2508 int i;
2509 int peer_index = 0;
2510 unsigned long deadline = cfs_time_current();
2511
2512 cfs_block_allsigs ();
2513
2514 INIT_LIST_HEAD(&enomem_conns);
2515 init_waitqueue_entry(&wait, current);
2516
2517 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2518
2519 while (!ksocknal_data.ksnd_shuttingdown) {
2520
2521 if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
2522 conn = list_entry (ksocknal_data. \
2523 ksnd_deathrow_conns.next,
2524 ksock_conn_t, ksnc_list);
2525 list_del (&conn->ksnc_list);
2526
2527 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2528
2529 ksocknal_terminate_conn(conn);
2530 ksocknal_conn_decref(conn);
2531
2532 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2533 continue;
2534 }
2535
2536 if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
2537 conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
2538 next, ksock_conn_t, ksnc_list);
2539 list_del (&conn->ksnc_list);
2540
2541 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2542
2543 ksocknal_destroy_conn(conn);
2544
2545 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2546 continue;
2547 }
2548
2549 if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
2550 list_add(&enomem_conns,
2551 &ksocknal_data.ksnd_enomem_conns);
2552 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2553 }
2554
2555 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2556
2557 /* reschedule all the connections that stalled with ENOMEM... */
2558 nenomem_conns = 0;
2559 while (!list_empty (&enomem_conns)) {
2560 conn = list_entry (enomem_conns.next,
2561 ksock_conn_t, ksnc_tx_list);
2562 list_del (&conn->ksnc_tx_list);
2563
2564 sched = conn->ksnc_scheduler;
2565
2566 spin_lock_bh(&sched->kss_lock);
2567
2568 LASSERT(conn->ksnc_tx_scheduled);
2569 conn->ksnc_tx_ready = 1;
2570 list_add_tail(&conn->ksnc_tx_list,
2571 &sched->kss_tx_conns);
2572 wake_up(&sched->kss_waitq);
2573
2574 spin_unlock_bh(&sched->kss_lock);
2575 nenomem_conns++;
2576 }
2577
2578 /* careful with the jiffy wrap... */
2579 while ((timeout = cfs_time_sub(deadline,
2580 cfs_time_current())) <= 0) {
2581 const int n = 4;
2582 const int p = 1;
2583 int chunk = ksocknal_data.ksnd_peer_hash_size;
2584
2585 /* Time to check for timeouts on a few more peers: I do
2586 * checks every 'p' seconds on a proportion of the peer
2587 * table and I need to check every connection 'n' times
2588 * within a timeout interval, to ensure I detect a
2589 * timeout on any connection within (n+1)/n times the
2590 * timeout interval. */
2591
2592 if (*ksocknal_tunables.ksnd_timeout > n * p)
2593 chunk = (chunk * n * p) /
2594 *ksocknal_tunables.ksnd_timeout;
2595 if (chunk == 0)
2596 chunk = 1;
2597
2598 for (i = 0; i < chunk; i++) {
2599 ksocknal_check_peer_timeouts (peer_index);
2600 peer_index = (peer_index + 1) %
2601 ksocknal_data.ksnd_peer_hash_size;
2602 }
2603
2604 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2605 }
2606
2607 if (nenomem_conns != 0) {
2608 /* Reduce my timeout if I rescheduled ENOMEM conns.
2609 * This also prevents me getting woken immediately
2610 * if any go back on my enomem list. */
2611 timeout = SOCKNAL_ENOMEM_RETRY;
2612 }
2613 ksocknal_data.ksnd_reaper_waketime =
2614 cfs_time_add(cfs_time_current(), timeout);
2615
2616 set_current_state (TASK_INTERRUPTIBLE);
2617 add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2618
2619 if (!ksocknal_data.ksnd_shuttingdown &&
2620 list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
2621 list_empty (&ksocknal_data.ksnd_zombie_conns))
2622 schedule_timeout(timeout);
2623
2624 set_current_state (TASK_RUNNING);
2625 remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
2626
2627 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2628 }
2629
2630 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2631
2632 ksocknal_thread_fini();
2633 return 0;
2634 }
2635