root/drivers/crypto/chelsio/chtls/chtls_cm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. chtls_sock_create
  2. chtls_sock_release
  3. chtls_ipv4_netdev
  4. assign_rxopt
  5. chtls_purge_receive_queue
  6. chtls_purge_write_queue
  7. chtls_purge_recv_queue
  8. abort_arp_failure
  9. alloc_ctrl_skb
  10. chtls_send_abort
  11. chtls_send_reset
  12. release_tcp_port
  13. tcp_uncork
  14. chtls_close_conn
  15. make_close_transition
  16. chtls_close
  17. wait_for_states
  18. chtls_disconnect
  19. chtls_shutdown
  20. chtls_destroy_sock
  21. reset_listen_child
  22. chtls_disconnect_acceptq
  23. listen_hashfn
  24. listen_hash_add
  25. listen_hash_find
  26. listen_hash_del
  27. cleanup_syn_rcv_conn
  28. chtls_reset_synq
  29. chtls_listen_start
  30. chtls_listen_stop
  31. chtls_pass_open_rpl
  32. chtls_close_listsrv_rpl
  33. chtls_purge_wr_queue
  34. chtls_release_resources
  35. chtls_conn_done
  36. do_abort_syn_rcv
  37. pass_open_abort
  38. bl_pass_open_abort
  39. chtls_pass_open_arp_failure
  40. chtls_accept_rpl_arp_failure
  41. chtls_select_mss
  42. select_rcv_wscale
  43. chtls_pass_accept_rpl
  44. inet_inherit_port
  45. chtls_backlog_rcv
  46. chtls_set_tcp_window
  47. chtls_recv_sock
  48. mk_tid_release
  49. chtls_get_module
  50. chtls_pass_accept_request
  51. chtls_pass_accept_req
  52. make_established
  53. chtls_abort_conn
  54. DECLARE_TASK_FUNC
  55. add_to_reap_list
  56. add_pass_open_to_parent
  57. bl_add_pass_open_to_parent
  58. chtls_pass_establish
  59. handle_urg_ptr
  60. check_sk_callbacks
  61. handle_excess_rx
  62. chtls_recv_data
  63. chtls_rx_data
  64. chtls_recv_pdu
  65. chtls_rx_pdu
  66. chtls_set_hdrlen
  67. chtls_rx_hdr
  68. chtls_rx_cmp
  69. chtls_timewait
  70. chtls_peer_close
  71. chtls_close_con_rpl
  72. get_cpl_skb
  73. set_abort_rpl_wr
  74. send_defer_abort_rpl
  75. send_abort_rpl
  76. t4_defer_reply
  77. chtls_send_abort_rpl
  78. bl_abort_syn_rcv
  79. abort_syn_rcv
  80. chtls_abort_req_rss
  81. chtls_abort_rpl_rss
  82. chtls_conn_cpl
  83. chtls_rx_ack
  84. chtls_wr_ack

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2018 Chelsio Communications, Inc.
   4  *
   5  * Written by: Atul Gupta (atul.gupta@chelsio.com)
   6  */
   7 
   8 #include <linux/module.h>
   9 #include <linux/list.h>
  10 #include <linux/workqueue.h>
  11 #include <linux/skbuff.h>
  12 #include <linux/timer.h>
  13 #include <linux/notifier.h>
  14 #include <linux/inetdevice.h>
  15 #include <linux/ip.h>
  16 #include <linux/tcp.h>
  17 #include <linux/sched/signal.h>
  18 #include <linux/kallsyms.h>
  19 #include <linux/kprobes.h>
  20 #include <linux/if_vlan.h>
  21 #include <net/inet_common.h>
  22 #include <net/tcp.h>
  23 #include <net/dst.h>
  24 #include <net/tls.h>
  25 
  26 #include "chtls.h"
  27 #include "chtls_cm.h"
  28 
  29 /*
  30  * State transitions and actions for close.  Note that if we are in SYN_SENT
  31  * we remain in that state as we cannot control a connection while it's in
  32  * SYN_SENT; such connections are allowed to establish and are then aborted.
  33  */
  34 static unsigned char new_state[16] = {
  35         /* current state:     new state:      action: */
  36         /* (Invalid)       */ TCP_CLOSE,
  37         /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  38         /* TCP_SYN_SENT    */ TCP_SYN_SENT,
  39         /* TCP_SYN_RECV    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  40         /* TCP_FIN_WAIT1   */ TCP_FIN_WAIT1,
  41         /* TCP_FIN_WAIT2   */ TCP_FIN_WAIT2,
  42         /* TCP_TIME_WAIT   */ TCP_CLOSE,
  43         /* TCP_CLOSE       */ TCP_CLOSE,
  44         /* TCP_CLOSE_WAIT  */ TCP_LAST_ACK | TCP_ACTION_FIN,
  45         /* TCP_LAST_ACK    */ TCP_LAST_ACK,
  46         /* TCP_LISTEN      */ TCP_CLOSE,
  47         /* TCP_CLOSING     */ TCP_CLOSING,
  48 };
  49 
  50 static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev)
  51 {
  52         struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
  53 
  54         if (!csk)
  55                 return NULL;
  56 
  57         csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC);
  58         if (!csk->txdata_skb_cache) {
  59                 kfree(csk);
  60                 return NULL;
  61         }
  62 
  63         kref_init(&csk->kref);
  64         csk->cdev = cdev;
  65         skb_queue_head_init(&csk->txq);
  66         csk->wr_skb_head = NULL;
  67         csk->wr_skb_tail = NULL;
  68         csk->mss = MAX_MSS;
  69         csk->tlshws.ofld = 1;
  70         csk->tlshws.txkey = -1;
  71         csk->tlshws.rxkey = -1;
  72         csk->tlshws.mfs = TLS_MFS;
  73         skb_queue_head_init(&csk->tlshws.sk_recv_queue);
  74         return csk;
  75 }
  76 
  77 static void chtls_sock_release(struct kref *ref)
  78 {
  79         struct chtls_sock *csk =
  80                 container_of(ref, struct chtls_sock, kref);
  81 
  82         kfree(csk);
  83 }
  84 
  85 static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev,
  86                                             struct sock *sk)
  87 {
  88         struct net_device *ndev = cdev->ports[0];
  89 
  90         if (likely(!inet_sk(sk)->inet_rcv_saddr))
  91                 return ndev;
  92 
  93         ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
  94         if (!ndev)
  95                 return NULL;
  96 
  97         if (is_vlan_dev(ndev))
  98                 return vlan_dev_real_dev(ndev);
  99         return ndev;
 100 }
 101 
 102 static void assign_rxopt(struct sock *sk, unsigned int opt)
 103 {
 104         const struct chtls_dev *cdev;
 105         struct chtls_sock *csk;
 106         struct tcp_sock *tp;
 107 
 108         csk = rcu_dereference_sk_user_data(sk);
 109         tp = tcp_sk(sk);
 110 
 111         cdev = csk->cdev;
 112         tp->tcp_header_len           = sizeof(struct tcphdr);
 113         tp->rx_opt.mss_clamp         = cdev->mtus[TCPOPT_MSS_G(opt)] - 40;
 114         tp->mss_cache                = tp->rx_opt.mss_clamp;
 115         tp->rx_opt.tstamp_ok         = TCPOPT_TSTAMP_G(opt);
 116         tp->rx_opt.snd_wscale        = TCPOPT_SACK_G(opt);
 117         tp->rx_opt.wscale_ok         = TCPOPT_WSCALE_OK_G(opt);
 118         SND_WSCALE(tp)               = TCPOPT_SND_WSCALE_G(opt);
 119         if (!tp->rx_opt.wscale_ok)
 120                 tp->rx_opt.rcv_wscale = 0;
 121         if (tp->rx_opt.tstamp_ok) {
 122                 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
 123                 tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED;
 124         } else if (csk->opt2 & TSTAMPS_EN_F) {
 125                 csk->opt2 &= ~TSTAMPS_EN_F;
 126                 csk->mtu_idx = TCPOPT_MSS_G(opt);
 127         }
 128 }
 129 
 130 static void chtls_purge_receive_queue(struct sock *sk)
 131 {
 132         struct sk_buff *skb;
 133 
 134         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
 135                 skb_dst_set(skb, (void *)NULL);
 136                 kfree_skb(skb);
 137         }
 138 }
 139 
 140 static void chtls_purge_write_queue(struct sock *sk)
 141 {
 142         struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
 143         struct sk_buff *skb;
 144 
 145         while ((skb = __skb_dequeue(&csk->txq))) {
 146                 sk->sk_wmem_queued -= skb->truesize;
 147                 __kfree_skb(skb);
 148         }
 149 }
 150 
 151 static void chtls_purge_recv_queue(struct sock *sk)
 152 {
 153         struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
 154         struct chtls_hws *tlsk = &csk->tlshws;
 155         struct sk_buff *skb;
 156 
 157         while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
 158                 skb_dst_set(skb, NULL);
 159                 kfree_skb(skb);
 160         }
 161 }
 162 
 163 static void abort_arp_failure(void *handle, struct sk_buff *skb)
 164 {
 165         struct cpl_abort_req *req = cplhdr(skb);
 166         struct chtls_dev *cdev;
 167 
 168         cdev = (struct chtls_dev *)handle;
 169         req->cmd = CPL_ABORT_NO_RST;
 170         cxgb4_ofld_send(cdev->lldi->ports[0], skb);
 171 }
 172 
 173 static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
 174 {
 175         if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
 176                 __skb_trim(skb, 0);
 177                 refcount_add(2, &skb->users);
 178         } else {
 179                 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
 180         }
 181         return skb;
 182 }
 183 
 184 static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
 185 {
 186         struct cpl_abort_req *req;
 187         struct chtls_sock *csk;
 188         struct tcp_sock *tp;
 189 
 190         csk = rcu_dereference_sk_user_data(sk);
 191         tp = tcp_sk(sk);
 192 
 193         if (!skb)
 194                 skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req));
 195 
 196         req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
 197         INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid);
 198         skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
 199         req->rsvd0 = htonl(tp->snd_nxt);
 200         req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT);
 201         req->cmd = mode;
 202         t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure);
 203         send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
 204 }
 205 
 206 static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
 207 {
 208         struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
 209 
 210         if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) ||
 211                      !csk->cdev)) {
 212                 if (sk->sk_state == TCP_SYN_RECV)
 213                         csk_set_flag(csk, CSK_RST_ABORTED);
 214                 goto out;
 215         }
 216 
 217         if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
 218                 struct tcp_sock *tp = tcp_sk(sk);
 219 
 220                 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
 221                         WARN_ONCE(1, "send tx flowc error");
 222                 csk_set_flag(csk, CSK_TX_DATA_SENT);
 223         }
 224 
 225         csk_set_flag(csk, CSK_ABORT_RPL_PENDING);
 226         chtls_purge_write_queue(sk);
 227 
 228         csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
 229         if (sk->sk_state != TCP_SYN_RECV)
 230                 chtls_send_abort(sk, mode, skb);
 231         else
 232                 goto out;
 233 
 234         return;
 235 out:
 236         kfree_skb(skb);
 237 }
 238 
 239 static void release_tcp_port(struct sock *sk)
 240 {
 241         if (inet_csk(sk)->icsk_bind_hash)
 242                 inet_put_port(sk);
 243 }
 244 
 245 static void tcp_uncork(struct sock *sk)
 246 {
 247         struct tcp_sock *tp = tcp_sk(sk);
 248 
 249         if (tp->nonagle & TCP_NAGLE_CORK) {
 250                 tp->nonagle &= ~TCP_NAGLE_CORK;
 251                 chtls_tcp_push(sk, 0);
 252         }
 253 }
 254 
 255 static void chtls_close_conn(struct sock *sk)
 256 {
 257         struct cpl_close_con_req *req;
 258         struct chtls_sock *csk;
 259         struct sk_buff *skb;
 260         unsigned int tid;
 261         unsigned int len;
 262 
 263         len = roundup(sizeof(struct cpl_close_con_req), 16);
 264         csk = rcu_dereference_sk_user_data(sk);
 265         tid = csk->tid;
 266 
 267         skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
 268         req = (struct cpl_close_con_req *)__skb_put(skb, len);
 269         memset(req, 0, len);
 270         req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) |
 271                               FW_WR_IMMDLEN_V(sizeof(*req) -
 272                                               sizeof(req->wr)));
 273         req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) |
 274                                FW_WR_FLOWID_V(tid));
 275 
 276         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
 277 
 278         tcp_uncork(sk);
 279         skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
 280         if (sk->sk_state != TCP_SYN_SENT)
 281                 chtls_push_frames(csk, 1);
 282 }
 283 
 284 /*
 285  * Perform a state transition during close and return the actions indicated
 286  * for the transition.  Do not make this function inline, the main reason
 287  * it exists at all is to avoid multiple inlining of tcp_set_state.
 288  */
 289 static int make_close_transition(struct sock *sk)
 290 {
 291         int next = (int)new_state[sk->sk_state];
 292 
 293         tcp_set_state(sk, next & TCP_STATE_MASK);
 294         return next & TCP_ACTION_FIN;
 295 }
 296 
 297 void chtls_close(struct sock *sk, long timeout)
 298 {
 299         int data_lost, prev_state;
 300         struct chtls_sock *csk;
 301 
 302         csk = rcu_dereference_sk_user_data(sk);
 303 
 304         lock_sock(sk);
 305         sk->sk_shutdown |= SHUTDOWN_MASK;
 306 
 307         data_lost = skb_queue_len(&sk->sk_receive_queue);
 308         data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue);
 309         chtls_purge_recv_queue(sk);
 310         chtls_purge_receive_queue(sk);
 311 
 312         if (sk->sk_state == TCP_CLOSE) {
 313                 goto wait;
 314         } else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
 315                 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
 316                 release_tcp_port(sk);
 317                 goto unlock;
 318         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
 319                 sk->sk_prot->disconnect(sk, 0);
 320         } else if (make_close_transition(sk)) {
 321                 chtls_close_conn(sk);
 322         }
 323 wait:
 324         if (timeout)
 325                 sk_stream_wait_close(sk, timeout);
 326 
 327 unlock:
 328         prev_state = sk->sk_state;
 329         sock_hold(sk);
 330         sock_orphan(sk);
 331 
 332         release_sock(sk);
 333 
 334         local_bh_disable();
 335         bh_lock_sock(sk);
 336 
 337         if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
 338                 goto out;
 339 
 340         if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
 341             !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
 342                 struct sk_buff *skb;
 343 
 344                 skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
 345                 if (skb)
 346                         chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
 347         }
 348 
 349         if (sk->sk_state == TCP_CLOSE)
 350                 inet_csk_destroy_sock(sk);
 351 
 352 out:
 353         bh_unlock_sock(sk);
 354         local_bh_enable();
 355         sock_put(sk);
 356 }
 357 
 358 /*
 359  * Wait until a socket enters on of the given states.
 360  */
 361 static int wait_for_states(struct sock *sk, unsigned int states)
 362 {
 363         DECLARE_WAITQUEUE(wait, current);
 364         struct socket_wq _sk_wq;
 365         long current_timeo;
 366         int err = 0;
 367 
 368         current_timeo = 200;
 369 
 370         /*
 371          * We want this to work even when there's no associated struct socket.
 372          * In that case we provide a temporary wait_queue_head_t.
 373          */
 374         if (!sk->sk_wq) {
 375                 init_waitqueue_head(&_sk_wq.wait);
 376                 _sk_wq.fasync_list = NULL;
 377                 init_rcu_head_on_stack(&_sk_wq.rcu);
 378                 RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
 379         }
 380 
 381         add_wait_queue(sk_sleep(sk), &wait);
 382         while (!sk_in_state(sk, states)) {
 383                 if (!current_timeo) {
 384                         err = -EBUSY;
 385                         break;
 386                 }
 387                 if (signal_pending(current)) {
 388                         err = sock_intr_errno(current_timeo);
 389                         break;
 390                 }
 391                 set_current_state(TASK_UNINTERRUPTIBLE);
 392                 release_sock(sk);
 393                 if (!sk_in_state(sk, states))
 394                         current_timeo = schedule_timeout(current_timeo);
 395                 __set_current_state(TASK_RUNNING);
 396                 lock_sock(sk);
 397         }
 398         remove_wait_queue(sk_sleep(sk), &wait);
 399 
 400         if (rcu_dereference(sk->sk_wq) == &_sk_wq)
 401                 sk->sk_wq = NULL;
 402         return err;
 403 }
 404 
 405 int chtls_disconnect(struct sock *sk, int flags)
 406 {
 407         struct tcp_sock *tp;
 408         int err;
 409 
 410         tp = tcp_sk(sk);
 411         chtls_purge_recv_queue(sk);
 412         chtls_purge_receive_queue(sk);
 413         chtls_purge_write_queue(sk);
 414 
 415         if (sk->sk_state != TCP_CLOSE) {
 416                 sk->sk_err = ECONNRESET;
 417                 chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
 418                 err = wait_for_states(sk, TCPF_CLOSE);
 419                 if (err)
 420                         return err;
 421         }
 422         chtls_purge_recv_queue(sk);
 423         chtls_purge_receive_queue(sk);
 424         tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale);
 425         return tcp_disconnect(sk, flags);
 426 }
 427 
 428 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
 429                                  TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
 430 void chtls_shutdown(struct sock *sk, int how)
 431 {
 432         if ((how & SEND_SHUTDOWN) &&
 433             sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
 434             make_close_transition(sk))
 435                 chtls_close_conn(sk);
 436 }
 437 
 438 void chtls_destroy_sock(struct sock *sk)
 439 {
 440         struct chtls_sock *csk;
 441 
 442         csk = rcu_dereference_sk_user_data(sk);
 443         chtls_purge_recv_queue(sk);
 444         csk->ulp_mode = ULP_MODE_NONE;
 445         chtls_purge_write_queue(sk);
 446         free_tls_keyid(sk);
 447         kref_put(&csk->kref, chtls_sock_release);
 448         sk->sk_prot = &tcp_prot;
 449         sk->sk_prot->destroy(sk);
 450 }
 451 
 452 static void reset_listen_child(struct sock *child)
 453 {
 454         struct chtls_sock *csk = rcu_dereference_sk_user_data(child);
 455         struct sk_buff *skb;
 456 
 457         skb = alloc_ctrl_skb(csk->txdata_skb_cache,
 458                              sizeof(struct cpl_abort_req));
 459 
 460         chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
 461         sock_orphan(child);
 462         INC_ORPHAN_COUNT(child);
 463         if (child->sk_state == TCP_CLOSE)
 464                 inet_csk_destroy_sock(child);
 465 }
 466 
 467 static void chtls_disconnect_acceptq(struct sock *listen_sk)
 468 {
 469         struct request_sock **pprev;
 470 
 471         pprev = ACCEPT_QUEUE(listen_sk);
 472         while (*pprev) {
 473                 struct request_sock *req = *pprev;
 474 
 475                 if (req->rsk_ops == &chtls_rsk_ops) {
 476                         struct sock *child = req->sk;
 477 
 478                         *pprev = req->dl_next;
 479                         sk_acceptq_removed(listen_sk);
 480                         reqsk_put(req);
 481                         sock_hold(child);
 482                         local_bh_disable();
 483                         bh_lock_sock(child);
 484                         release_tcp_port(child);
 485                         reset_listen_child(child);
 486                         bh_unlock_sock(child);
 487                         local_bh_enable();
 488                         sock_put(child);
 489                 } else {
 490                         pprev = &req->dl_next;
 491                 }
 492         }
 493 }
 494 
 495 static int listen_hashfn(const struct sock *sk)
 496 {
 497         return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
 498 }
 499 
 500 static struct listen_info *listen_hash_add(struct chtls_dev *cdev,
 501                                            struct sock *sk,
 502                                            unsigned int stid)
 503 {
 504         struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL);
 505 
 506         if (p) {
 507                 int key = listen_hashfn(sk);
 508 
 509                 p->sk = sk;
 510                 p->stid = stid;
 511                 spin_lock(&cdev->listen_lock);
 512                 p->next = cdev->listen_hash_tab[key];
 513                 cdev->listen_hash_tab[key] = p;
 514                 spin_unlock(&cdev->listen_lock);
 515         }
 516         return p;
 517 }
 518 
 519 static int listen_hash_find(struct chtls_dev *cdev,
 520                             struct sock *sk)
 521 {
 522         struct listen_info *p;
 523         int stid = -1;
 524         int key;
 525 
 526         key = listen_hashfn(sk);
 527 
 528         spin_lock(&cdev->listen_lock);
 529         for (p = cdev->listen_hash_tab[key]; p; p = p->next)
 530                 if (p->sk == sk) {
 531                         stid = p->stid;
 532                         break;
 533                 }
 534         spin_unlock(&cdev->listen_lock);
 535         return stid;
 536 }
 537 
 538 static int listen_hash_del(struct chtls_dev *cdev,
 539                            struct sock *sk)
 540 {
 541         struct listen_info *p, **prev;
 542         int stid = -1;
 543         int key;
 544 
 545         key = listen_hashfn(sk);
 546         prev = &cdev->listen_hash_tab[key];
 547 
 548         spin_lock(&cdev->listen_lock);
 549         for (p = *prev; p; prev = &p->next, p = p->next)
 550                 if (p->sk == sk) {
 551                         stid = p->stid;
 552                         *prev = p->next;
 553                         kfree(p);
 554                         break;
 555                 }
 556         spin_unlock(&cdev->listen_lock);
 557         return stid;
 558 }
 559 
 560 static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent)
 561 {
 562         struct request_sock *req;
 563         struct chtls_sock *csk;
 564 
 565         csk = rcu_dereference_sk_user_data(child);
 566         req = csk->passive_reap_next;
 567 
 568         reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req);
 569         __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
 570         chtls_reqsk_free(req);
 571         csk->passive_reap_next = NULL;
 572 }
 573 
 574 static void chtls_reset_synq(struct listen_ctx *listen_ctx)
 575 {
 576         struct sock *listen_sk = listen_ctx->lsk;
 577 
 578         while (!skb_queue_empty(&listen_ctx->synq)) {
 579                 struct chtls_sock *csk =
 580                         container_of((struct synq *)__skb_dequeue
 581                                 (&listen_ctx->synq), struct chtls_sock, synq);
 582                 struct sock *child = csk->sk;
 583 
 584                 cleanup_syn_rcv_conn(child, listen_sk);
 585                 sock_hold(child);
 586                 local_bh_disable();
 587                 bh_lock_sock(child);
 588                 release_tcp_port(child);
 589                 reset_listen_child(child);
 590                 bh_unlock_sock(child);
 591                 local_bh_enable();
 592                 sock_put(child);
 593         }
 594 }
 595 
 596 int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
 597 {
 598         struct net_device *ndev;
 599         struct listen_ctx *ctx;
 600         struct adapter *adap;
 601         struct port_info *pi;
 602         int stid;
 603         int ret;
 604 
 605         if (sk->sk_family != PF_INET)
 606                 return -EAGAIN;
 607 
 608         rcu_read_lock();
 609         ndev = chtls_ipv4_netdev(cdev, sk);
 610         rcu_read_unlock();
 611         if (!ndev)
 612                 return -EBADF;
 613 
 614         pi = netdev_priv(ndev);
 615         adap = pi->adapter;
 616         if (!(adap->flags & CXGB4_FULL_INIT_DONE))
 617                 return -EBADF;
 618 
 619         if (listen_hash_find(cdev, sk) >= 0)   /* already have it */
 620                 return -EADDRINUSE;
 621 
 622         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 623         if (!ctx)
 624                 return -ENOMEM;
 625 
 626         __module_get(THIS_MODULE);
 627         ctx->lsk = sk;
 628         ctx->cdev = cdev;
 629         ctx->state = T4_LISTEN_START_PENDING;
 630         skb_queue_head_init(&ctx->synq);
 631 
 632         stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
 633         if (stid < 0)
 634                 goto free_ctx;
 635 
 636         sock_hold(sk);
 637         if (!listen_hash_add(cdev, sk, stid))
 638                 goto free_stid;
 639 
 640         ret = cxgb4_create_server(ndev, stid,
 641                                   inet_sk(sk)->inet_rcv_saddr,
 642                                   inet_sk(sk)->inet_sport, 0,
 643                                   cdev->lldi->rxq_ids[0]);
 644         if (ret > 0)
 645                 ret = net_xmit_errno(ret);
 646         if (ret)
 647                 goto del_hash;
 648         return 0;
 649 del_hash:
 650         listen_hash_del(cdev, sk);
 651 free_stid:
 652         cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
 653         sock_put(sk);
 654 free_ctx:
 655         kfree(ctx);
 656         module_put(THIS_MODULE);
 657         return -EBADF;
 658 }
 659 
 660 void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
 661 {
 662         struct listen_ctx *listen_ctx;
 663         int stid;
 664 
 665         stid = listen_hash_del(cdev, sk);
 666         if (stid < 0)
 667                 return;
 668 
 669         listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
 670         chtls_reset_synq(listen_ctx);
 671 
 672         cxgb4_remove_server(cdev->lldi->ports[0], stid,
 673                             cdev->lldi->rxq_ids[0], 0);
 674         chtls_disconnect_acceptq(sk);
 675 }
 676 
 677 static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
 678 {
 679         struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR;
 680         unsigned int stid = GET_TID(rpl);
 681         struct listen_ctx *listen_ctx;
 682 
 683         listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
 684         if (!listen_ctx)
 685                 return CPL_RET_BUF_DONE;
 686 
 687         if (listen_ctx->state == T4_LISTEN_START_PENDING) {
 688                 listen_ctx->state = T4_LISTEN_STARTED;
 689                 return CPL_RET_BUF_DONE;
 690         }
 691 
 692         if (rpl->status != CPL_ERR_NONE) {
 693                 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
 694                         rpl->status, stid);
 695                 return CPL_RET_BUF_DONE;
 696         }
 697         cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 698         sock_put(listen_ctx->lsk);
 699         kfree(listen_ctx);
 700         module_put(THIS_MODULE);
 701 
 702         return 0;
 703 }
 704 
 705 static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
 706 {
 707         struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR;
 708         struct listen_ctx *listen_ctx;
 709         unsigned int stid;
 710         void *data;
 711 
 712         stid = GET_TID(rpl);
 713         data = lookup_stid(cdev->tids, stid);
 714         listen_ctx = (struct listen_ctx *)data;
 715 
 716         if (rpl->status != CPL_ERR_NONE) {
 717                 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
 718                         rpl->status, stid);
 719                 return CPL_RET_BUF_DONE;
 720         }
 721 
 722         cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 723         sock_put(listen_ctx->lsk);
 724         kfree(listen_ctx);
 725         module_put(THIS_MODULE);
 726 
 727         return 0;
 728 }
 729 
 730 static void chtls_purge_wr_queue(struct sock *sk)
 731 {
 732         struct sk_buff *skb;
 733 
 734         while ((skb = dequeue_wr(sk)) != NULL)
 735                 kfree_skb(skb);
 736 }
 737 
 738 static void chtls_release_resources(struct sock *sk)
 739 {
 740         struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
 741         struct chtls_dev *cdev = csk->cdev;
 742         unsigned int tid = csk->tid;
 743         struct tid_info *tids;
 744 
 745         if (!cdev)
 746                 return;
 747 
 748         tids = cdev->tids;
 749         kfree_skb(csk->txdata_skb_cache);
 750         csk->txdata_skb_cache = NULL;
 751 
 752         if (csk->wr_credits != csk->wr_max_credits) {
 753                 chtls_purge_wr_queue(sk);
 754                 chtls_reset_wr_list(csk);
 755         }
 756 
 757         if (csk->l2t_entry) {
 758                 cxgb4_l2t_release(csk->l2t_entry);
 759                 csk->l2t_entry = NULL;
 760         }
 761 
 762         cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
 763         sock_put(sk);
 764 }
 765 
 766 static void chtls_conn_done(struct sock *sk)
 767 {
 768         if (sock_flag(sk, SOCK_DEAD))
 769                 chtls_purge_receive_queue(sk);
 770         sk_wakeup_sleepers(sk, 0);
 771         tcp_done(sk);
 772 }
 773 
 774 static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
 775 {
 776         /*
 777          * If the server is still open we clean up the child connection,
 778          * otherwise the server already did the clean up as it was purging
 779          * its SYN queue and the skb was just sitting in its backlog.
 780          */
 781         if (likely(parent->sk_state == TCP_LISTEN)) {
 782                 cleanup_syn_rcv_conn(child, parent);
 783                 /* Without the below call to sock_orphan,
 784                  * we leak the socket resource with syn_flood test
 785                  * as inet_csk_destroy_sock will not be called
 786                  * in tcp_done since SOCK_DEAD flag is not set.
 787                  * Kernel handles this differently where new socket is
 788                  * created only after 3 way handshake is done.
 789                  */
 790                 sock_orphan(child);
 791                 percpu_counter_inc((child)->sk_prot->orphan_count);
 792                 chtls_release_resources(child);
 793                 chtls_conn_done(child);
 794         } else {
 795                 if (csk_flag(child, CSK_RST_ABORTED)) {
 796                         chtls_release_resources(child);
 797                         chtls_conn_done(child);
 798                 }
 799         }
 800 }
 801 
 802 static void pass_open_abort(struct sock *child, struct sock *parent,
 803                             struct sk_buff *skb)
 804 {
 805         do_abort_syn_rcv(child, parent);
 806         kfree_skb(skb);
 807 }
 808 
 809 static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb)
 810 {
 811         pass_open_abort(skb->sk, lsk, skb);
 812 }
 813 
 814 static void chtls_pass_open_arp_failure(struct sock *sk,
 815                                         struct sk_buff *skb)
 816 {
 817         const struct request_sock *oreq;
 818         struct chtls_sock *csk;
 819         struct chtls_dev *cdev;
 820         struct sock *parent;
 821         void *data;
 822 
 823         csk = rcu_dereference_sk_user_data(sk);
 824         cdev = csk->cdev;
 825 
 826         /*
 827          * If the connection is being aborted due to the parent listening
 828          * socket going away there's nothing to do, the ABORT_REQ will close
 829          * the connection.
 830          */
 831         if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
 832                 kfree_skb(skb);
 833                 return;
 834         }
 835 
 836         oreq = csk->passive_reap_next;
 837         data = lookup_stid(cdev->tids, oreq->ts_recent);
 838         parent = ((struct listen_ctx *)data)->lsk;
 839 
 840         bh_lock_sock(parent);
 841         if (!sock_owned_by_user(parent)) {
 842                 pass_open_abort(sk, parent, skb);
 843         } else {
 844                 BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort;
 845                 __sk_add_backlog(parent, skb);
 846         }
 847         bh_unlock_sock(parent);
 848 }
 849 
 850 static void chtls_accept_rpl_arp_failure(void *handle,
 851                                          struct sk_buff *skb)
 852 {
 853         struct sock *sk = (struct sock *)handle;
 854 
 855         sock_hold(sk);
 856         process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
 857         sock_put(sk);
 858 }
 859 
 860 static unsigned int chtls_select_mss(const struct chtls_sock *csk,
 861                                      unsigned int pmtu,
 862                                      struct cpl_pass_accept_req *req)
 863 {
 864         struct chtls_dev *cdev;
 865         struct dst_entry *dst;
 866         unsigned int tcpoptsz;
 867         unsigned int iphdrsz;
 868         unsigned int mtu_idx;
 869         struct tcp_sock *tp;
 870         unsigned int mss;
 871         struct sock *sk;
 872 
 873         mss = ntohs(req->tcpopt.mss);
 874         sk = csk->sk;
 875         dst = __sk_dst_get(sk);
 876         cdev = csk->cdev;
 877         tp = tcp_sk(sk);
 878         tcpoptsz = 0;
 879 
 880         iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
 881         if (req->tcpopt.tstamp)
 882                 tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
 883 
 884         tp->advmss = dst_metric_advmss(dst);
 885         if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
 886                 tp->advmss = USER_MSS(tp);
 887         if (tp->advmss > pmtu - iphdrsz)
 888                 tp->advmss = pmtu - iphdrsz;
 889         if (mss && tp->advmss > mss)
 890                 tp->advmss = mss;
 891 
 892         tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus,
 893                                             iphdrsz + tcpoptsz,
 894                                             tp->advmss - tcpoptsz,
 895                                             8, &mtu_idx);
 896         tp->advmss -= iphdrsz;
 897 
 898         inet_csk(sk)->icsk_pmtu_cookie = pmtu;
 899         return mtu_idx;
 900 }
 901 
 902 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
 903 {
 904         int wscale = 0;
 905 
 906         if (space > MAX_RCV_WND)
 907                 space = MAX_RCV_WND;
 908         if (win_clamp && win_clamp < space)
 909                 space = win_clamp;
 910 
 911         if (wscale_ok) {
 912                 while (wscale < 14 && (65535 << wscale) < space)
 913                         wscale++;
 914         }
 915         return wscale;
 916 }
 917 
 918 static void chtls_pass_accept_rpl(struct sk_buff *skb,
 919                                   struct cpl_pass_accept_req *req,
 920                                   unsigned int tid)
 921 
 922 {
 923         struct cpl_t5_pass_accept_rpl *rpl5;
 924         struct cxgb4_lld_info *lldi;
 925         const struct tcphdr *tcph;
 926         const struct tcp_sock *tp;
 927         struct chtls_sock *csk;
 928         unsigned int len;
 929         struct sock *sk;
 930         u32 opt2, hlen;
 931         u64 opt0;
 932 
 933         sk = skb->sk;
 934         tp = tcp_sk(sk);
 935         csk = sk->sk_user_data;
 936         csk->tid = tid;
 937         lldi = csk->cdev->lldi;
 938         len = roundup(sizeof(*rpl5), 16);
 939 
 940         rpl5 = __skb_put_zero(skb, len);
 941         INIT_TP_WR(rpl5, tid);
 942 
 943         OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
 944                                                      csk->tid));
 945         csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
 946                                         req);
 947         opt0 = TCAM_BYPASS_F |
 948                WND_SCALE_V(RCV_WSCALE(tp)) |
 949                MSS_IDX_V(csk->mtu_idx) |
 950                L2T_IDX_V(csk->l2t_entry->idx) |
 951                NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
 952                TX_CHAN_V(csk->tx_chan) |
 953                SMAC_SEL_V(csk->smac_idx) |
 954                DSCP_V(csk->tos >> 2) |
 955                ULP_MODE_V(ULP_MODE_TLS) |
 956                RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M));
 957 
 958         opt2 = RX_CHANNEL_V(0) |
 959                 RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
 960 
 961         if (!is_t5(lldi->adapter_type))
 962                 opt2 |= RX_FC_DISABLE_F;
 963         if (req->tcpopt.tstamp)
 964                 opt2 |= TSTAMPS_EN_F;
 965         if (req->tcpopt.sack)
 966                 opt2 |= SACK_EN_F;
 967         hlen = ntohl(req->hdr_len);
 968 
 969         tcph = (struct tcphdr *)((u8 *)(req + 1) +
 970                         T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
 971         if (tcph->ece && tcph->cwr)
 972                 opt2 |= CCTRL_ECN_V(1);
 973         opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
 974         opt2 |= T5_ISS_F;
 975         opt2 |= T5_OPT_2_VALID_F;
 976         rpl5->opt0 = cpu_to_be64(opt0);
 977         rpl5->opt2 = cpu_to_be32(opt2);
 978         rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
 979         set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 980         t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
 981         cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
 982 }
 983 
 984 static void inet_inherit_port(struct inet_hashinfo *hash_info,
 985                               struct sock *lsk, struct sock *newsk)
 986 {
 987         local_bh_disable();
 988         __inet_inherit_port(lsk, newsk);
 989         local_bh_enable();
 990 }
 991 
 992 static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 993 {
 994         if (skb->protocol) {
 995                 kfree_skb(skb);
 996                 return 0;
 997         }
 998         BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
 999         return 0;
1000 }
1001 
1002 static void chtls_set_tcp_window(struct chtls_sock *csk)
1003 {
1004         struct net_device *ndev = csk->egress_dev;
1005         struct port_info *pi = netdev_priv(ndev);
1006         unsigned int linkspeed;
1007         u8 scale;
1008 
1009         linkspeed = pi->link_cfg.speed;
1010         scale = linkspeed / SPEED_10000;
1011 #define CHTLS_10G_RCVWIN (256 * 1024)
1012         csk->rcv_win = CHTLS_10G_RCVWIN;
1013         if (scale)
1014                 csk->rcv_win *= scale;
1015 #define CHTLS_10G_SNDWIN (256 * 1024)
1016         csk->snd_win = CHTLS_10G_SNDWIN;
1017         if (scale)
1018                 csk->snd_win *= scale;
1019 }
1020 
1021 static struct sock *chtls_recv_sock(struct sock *lsk,
1022                                     struct request_sock *oreq,
1023                                     void *network_hdr,
1024                                     const struct cpl_pass_accept_req *req,
1025                                     struct chtls_dev *cdev)
1026 {
1027         struct inet_sock *newinet;
1028         const struct iphdr *iph;
1029         struct tls_context *ctx;
1030         struct net_device *ndev;
1031         struct chtls_sock *csk;
1032         struct dst_entry *dst;
1033         struct neighbour *n;
1034         struct tcp_sock *tp;
1035         struct sock *newsk;
1036         u16 port_id;
1037         int rxq_idx;
1038         int step;
1039 
1040         iph = (const struct iphdr *)network_hdr;
1041         newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
1042         if (!newsk)
1043                 goto free_oreq;
1044 
1045         dst = inet_csk_route_child_sock(lsk, newsk, oreq);
1046         if (!dst)
1047                 goto free_sk;
1048 
1049         n = dst_neigh_lookup(dst, &iph->saddr);
1050         if (!n)
1051                 goto free_sk;
1052 
1053         ndev = n->dev;
1054         if (!ndev)
1055                 goto free_dst;
1056         port_id = cxgb4_port_idx(ndev);
1057 
1058         csk = chtls_sock_create(cdev);
1059         if (!csk)
1060                 goto free_dst;
1061 
1062         csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0);
1063         if (!csk->l2t_entry)
1064                 goto free_csk;
1065 
1066         newsk->sk_user_data = csk;
1067         newsk->sk_backlog_rcv = chtls_backlog_rcv;
1068 
1069         tp = tcp_sk(newsk);
1070         newinet = inet_sk(newsk);
1071 
1072         newinet->inet_daddr = iph->saddr;
1073         newinet->inet_rcv_saddr = iph->daddr;
1074         newinet->inet_saddr = iph->daddr;
1075 
1076         oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1077         sk_setup_caps(newsk, dst);
1078         ctx = tls_get_ctx(lsk);
1079         newsk->sk_destruct = ctx->sk_destruct;
1080         csk->sk = newsk;
1081         csk->passive_reap_next = oreq;
1082         csk->tx_chan = cxgb4_port_chan(ndev);
1083         csk->port_id = port_id;
1084         csk->egress_dev = ndev;
1085         csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1086         chtls_set_tcp_window(csk);
1087         tp->rcv_wnd = csk->rcv_win;
1088         csk->sndbuf = csk->snd_win;
1089         csk->ulp_mode = ULP_MODE_TLS;
1090         step = cdev->lldi->nrxq / cdev->lldi->nchan;
1091         csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
1092         rxq_idx = port_id * step;
1093         csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
1094                         port_id * step;
1095         csk->sndbuf = newsk->sk_sndbuf;
1096         csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1097         RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
1098                                            sock_net(newsk)->
1099                                                 ipv4.sysctl_tcp_window_scaling,
1100                                            tp->window_clamp);
1101         neigh_release(n);
1102         inet_inherit_port(&tcp_hashinfo, lsk, newsk);
1103         csk_set_flag(csk, CSK_CONN_INLINE);
1104         bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
1105 
1106         return newsk;
1107 free_csk:
1108         chtls_sock_release(&csk->kref);
1109 free_dst:
1110         dst_release(dst);
1111 free_sk:
1112         inet_csk_prepare_forced_close(newsk);
1113         tcp_done(newsk);
1114 free_oreq:
1115         chtls_reqsk_free(oreq);
1116         return NULL;
1117 }
1118 
1119 /*
1120  * Populate a TID_RELEASE WR.  The skb must be already propely sized.
1121  */
1122 static  void mk_tid_release(struct sk_buff *skb,
1123                             unsigned int chan, unsigned int tid)
1124 {
1125         struct cpl_tid_release *req;
1126         unsigned int len;
1127 
1128         len = roundup(sizeof(struct cpl_tid_release), 16);
1129         req = (struct cpl_tid_release *)__skb_put(skb, len);
1130         memset(req, 0, len);
1131         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1132         INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid);
1133 }
1134 
1135 static int chtls_get_module(struct sock *sk)
1136 {
1137         struct inet_connection_sock *icsk = inet_csk(sk);
1138 
1139         if (!try_module_get(icsk->icsk_ulp_ops->owner))
1140                 return -1;
1141 
1142         return 0;
1143 }
1144 
1145 static void chtls_pass_accept_request(struct sock *sk,
1146                                       struct sk_buff *skb)
1147 {
1148         struct cpl_t5_pass_accept_rpl *rpl;
1149         struct cpl_pass_accept_req *req;
1150         struct listen_ctx *listen_ctx;
1151         struct vlan_ethhdr *vlan_eh;
1152         struct request_sock *oreq;
1153         struct sk_buff *reply_skb;
1154         struct chtls_sock *csk;
1155         struct chtls_dev *cdev;
1156         struct tcphdr *tcph;
1157         struct sock *newsk;
1158         struct ethhdr *eh;
1159         struct iphdr *iph;
1160         void *network_hdr;
1161         unsigned int stid;
1162         unsigned int len;
1163         unsigned int tid;
1164         bool th_ecn, ect;
1165         __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1166         u16 eth_hdr_len;
1167         bool ecn_ok;
1168 
1169         req = cplhdr(skb) + RSS_HDR;
1170         tid = GET_TID(req);
1171         cdev = BLOG_SKB_CB(skb)->cdev;
1172         newsk = lookup_tid(cdev->tids, tid);
1173         stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1174         if (newsk) {
1175                 pr_info("tid (%d) already in use\n", tid);
1176                 return;
1177         }
1178 
1179         len = roundup(sizeof(*rpl), 16);
1180         reply_skb = alloc_skb(len, GFP_ATOMIC);
1181         if (!reply_skb) {
1182                 cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
1183                 kfree_skb(skb);
1184                 return;
1185         }
1186 
1187         if (sk->sk_state != TCP_LISTEN)
1188                 goto reject;
1189 
1190         if (inet_csk_reqsk_queue_is_full(sk))
1191                 goto reject;
1192 
1193         if (sk_acceptq_is_full(sk))
1194                 goto reject;
1195 
1196         oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
1197         if (!oreq)
1198                 goto reject;
1199 
1200         oreq->rsk_rcv_wnd = 0;
1201         oreq->rsk_window_clamp = 0;
1202         oreq->cookie_ts = 0;
1203         oreq->mss = 0;
1204         oreq->ts_recent = 0;
1205 
1206         eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
1207         if (eth_hdr_len == ETH_HLEN) {
1208                 eh = (struct ethhdr *)(req + 1);
1209                 iph = (struct iphdr *)(eh + 1);
1210                 network_hdr = (void *)(eh + 1);
1211         } else {
1212                 vlan_eh = (struct vlan_ethhdr *)(req + 1);
1213                 iph = (struct iphdr *)(vlan_eh + 1);
1214                 network_hdr = (void *)(vlan_eh + 1);
1215         }
1216         if (iph->version != 0x4)
1217                 goto free_oreq;
1218 
1219         tcph = (struct tcphdr *)(iph + 1);
1220         skb_set_network_header(skb, (void *)iph - (void *)req);
1221 
1222         tcp_rsk(oreq)->tfo_listener = false;
1223         tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
1224         chtls_set_req_port(oreq, tcph->source, tcph->dest);
1225         chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
1226         ip_dsfield = ipv4_get_dsfield(iph);
1227         if (req->tcpopt.wsf <= 14 &&
1228             sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
1229                 inet_rsk(oreq)->wscale_ok = 1;
1230                 inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
1231         }
1232         inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
1233         th_ecn = tcph->ece && tcph->cwr;
1234         if (th_ecn) {
1235                 ect = !INET_ECN_is_not_ect(ip_dsfield);
1236                 ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
1237                 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
1238                         inet_rsk(oreq)->ecn_ok = 1;
1239         }
1240 
1241         newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
1242         if (!newsk)
1243                 goto reject;
1244 
1245         if (chtls_get_module(newsk))
1246                 goto reject;
1247         inet_csk_reqsk_queue_added(sk);
1248         reply_skb->sk = newsk;
1249         chtls_install_cpl_ops(newsk);
1250         cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family);
1251         csk = rcu_dereference_sk_user_data(newsk);
1252         listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid);
1253         csk->listen_ctx = listen_ctx;
1254         __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq);
1255         chtls_pass_accept_rpl(reply_skb, req, tid);
1256         kfree_skb(skb);
1257         return;
1258 
1259 free_oreq:
1260         chtls_reqsk_free(oreq);
1261 reject:
1262         mk_tid_release(reply_skb, 0, tid);
1263         cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1264         kfree_skb(skb);
1265 }
1266 
1267 /*
1268  * Handle a CPL_PASS_ACCEPT_REQ message.
1269  */
1270 static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb)
1271 {
1272         struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR;
1273         struct listen_ctx *ctx;
1274         unsigned int stid;
1275         unsigned int tid;
1276         struct sock *lsk;
1277         void *data;
1278 
1279         stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1280         tid = GET_TID(req);
1281 
1282         data = lookup_stid(cdev->tids, stid);
1283         if (!data)
1284                 return 1;
1285 
1286         ctx = (struct listen_ctx *)data;
1287         lsk = ctx->lsk;
1288 
1289         if (unlikely(tid >= cdev->tids->ntids)) {
1290                 pr_info("passive open TID %u too large\n", tid);
1291                 return 1;
1292         }
1293 
1294         BLOG_SKB_CB(skb)->cdev = cdev;
1295         process_cpl_msg(chtls_pass_accept_request, lsk, skb);
1296         return 0;
1297 }
1298 
1299 /*
1300  * Completes some final bits of initialization for just established connections
1301  * and changes their state to TCP_ESTABLISHED.
1302  *
1303  * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1304  */
1305 static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
1306 {
1307         struct tcp_sock *tp = tcp_sk(sk);
1308 
1309         tp->pushed_seq = snd_isn;
1310         tp->write_seq = snd_isn;
1311         tp->snd_nxt = snd_isn;
1312         tp->snd_una = snd_isn;
1313         inet_sk(sk)->inet_id = prandom_u32();
1314         assign_rxopt(sk, opt);
1315 
1316         if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
1317                 tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10);
1318 
1319         smp_mb();
1320         tcp_set_state(sk, TCP_ESTABLISHED);
1321 }
1322 
1323 static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
1324 {
1325         struct sk_buff *abort_skb;
1326 
1327         abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC);
1328         if (abort_skb)
1329                 chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
1330 }
1331 
1332 static struct sock *reap_list;
1333 static DEFINE_SPINLOCK(reap_list_lock);
1334 
1335 /*
1336  * Process the reap list.
1337  */
1338 DECLARE_TASK_FUNC(process_reap_list, task_param)
1339 {
1340         spin_lock_bh(&reap_list_lock);
1341         while (reap_list) {
1342                 struct sock *sk = reap_list;
1343                 struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1344 
1345                 reap_list = csk->passive_reap_next;
1346                 csk->passive_reap_next = NULL;
1347                 spin_unlock(&reap_list_lock);
1348                 sock_hold(sk);
1349 
1350                 bh_lock_sock(sk);
1351                 chtls_abort_conn(sk, NULL);
1352                 sock_orphan(sk);
1353                 if (sk->sk_state == TCP_CLOSE)
1354                         inet_csk_destroy_sock(sk);
1355                 bh_unlock_sock(sk);
1356                 sock_put(sk);
1357                 spin_lock(&reap_list_lock);
1358         }
1359         spin_unlock_bh(&reap_list_lock);
1360 }
1361 
1362 static DECLARE_WORK(reap_task, process_reap_list);
1363 
1364 static void add_to_reap_list(struct sock *sk)
1365 {
1366         struct chtls_sock *csk = sk->sk_user_data;
1367 
1368         local_bh_disable();
1369         bh_lock_sock(sk);
1370         release_tcp_port(sk); /* release the port immediately */
1371 
1372         spin_lock(&reap_list_lock);
1373         csk->passive_reap_next = reap_list;
1374         reap_list = sk;
1375         if (!csk->passive_reap_next)
1376                 schedule_work(&reap_task);
1377         spin_unlock(&reap_list_lock);
1378         bh_unlock_sock(sk);
1379         local_bh_enable();
1380 }
1381 
1382 static void add_pass_open_to_parent(struct sock *child, struct sock *lsk,
1383                                     struct chtls_dev *cdev)
1384 {
1385         struct request_sock *oreq;
1386         struct chtls_sock *csk;
1387 
1388         if (lsk->sk_state != TCP_LISTEN)
1389                 return;
1390 
1391         csk = child->sk_user_data;
1392         oreq = csk->passive_reap_next;
1393         csk->passive_reap_next = NULL;
1394 
1395         reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq);
1396         __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq);
1397 
1398         if (sk_acceptq_is_full(lsk)) {
1399                 chtls_reqsk_free(oreq);
1400                 add_to_reap_list(child);
1401         } else {
1402                 refcount_set(&oreq->rsk_refcnt, 1);
1403                 inet_csk_reqsk_queue_add(lsk, oreq, child);
1404                 lsk->sk_data_ready(lsk);
1405         }
1406 }
1407 
1408 static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb)
1409 {
1410         struct sock *child = skb->sk;
1411 
1412         skb->sk = NULL;
1413         add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev);
1414         kfree_skb(skb);
1415 }
1416 
1417 static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
1418 {
1419         struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR;
1420         struct chtls_sock *csk;
1421         struct sock *lsk, *sk;
1422         unsigned int hwtid;
1423 
1424         hwtid = GET_TID(req);
1425         sk = lookup_tid(cdev->tids, hwtid);
1426         if (!sk)
1427                 return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
1428 
1429         bh_lock_sock(sk);
1430         if (unlikely(sock_owned_by_user(sk))) {
1431                 kfree_skb(skb);
1432         } else {
1433                 unsigned int stid;
1434                 void *data;
1435 
1436                 csk = sk->sk_user_data;
1437                 csk->wr_max_credits = 64;
1438                 csk->wr_credits = 64;
1439                 csk->wr_unacked = 0;
1440                 make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
1441                 stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1442                 sk->sk_state_change(sk);
1443                 if (unlikely(sk->sk_socket))
1444                         sk_wake_async(sk, 0, POLL_OUT);
1445 
1446                 data = lookup_stid(cdev->tids, stid);
1447                 lsk = ((struct listen_ctx *)data)->lsk;
1448 
1449                 bh_lock_sock(lsk);
1450                 if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) {
1451                         /* removed from synq */
1452                         bh_unlock_sock(lsk);
1453                         kfree_skb(skb);
1454                         goto unlock;
1455                 }
1456 
1457                 if (likely(!sock_owned_by_user(lsk))) {
1458                         kfree_skb(skb);
1459                         add_pass_open_to_parent(sk, lsk, cdev);
1460                 } else {
1461                         skb->sk = sk;
1462                         BLOG_SKB_CB(skb)->cdev = cdev;
1463                         BLOG_SKB_CB(skb)->backlog_rcv =
1464                                 bl_add_pass_open_to_parent;
1465                         __sk_add_backlog(lsk, skb);
1466                 }
1467                 bh_unlock_sock(lsk);
1468         }
1469 unlock:
1470         bh_unlock_sock(sk);
1471         return 0;
1472 }
1473 
1474 /*
1475  * Handle receipt of an urgent pointer.
1476  */
1477 static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
1478 {
1479         struct tcp_sock *tp = tcp_sk(sk);
1480 
1481         urg_seq--;
1482         if (tp->urg_data && !after(urg_seq, tp->urg_seq))
1483                 return; /* duplicate pointer */
1484 
1485         sk_send_sigurg(sk);
1486         if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
1487             !sock_flag(sk, SOCK_URGINLINE) &&
1488             tp->copied_seq != tp->rcv_nxt) {
1489                 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1490 
1491                 tp->copied_seq++;
1492                 if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len)
1493                         chtls_free_skb(sk, skb);
1494         }
1495 
1496         tp->urg_data = TCP_URG_NOTYET;
1497         tp->urg_seq = urg_seq;
1498 }
1499 
1500 static void check_sk_callbacks(struct chtls_sock *csk)
1501 {
1502         struct sock *sk = csk->sk;
1503 
1504         if (unlikely(sk->sk_user_data &&
1505                      !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD)))
1506                 csk_set_flag(csk, CSK_CALLBACKS_CHKD);
1507 }
1508 
1509 /*
1510  * Handles Rx data that arrives in a state where the socket isn't accepting
1511  * new data.
1512  */
1513 static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
1514 {
1515         if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
1516                 chtls_abort_conn(sk, skb);
1517 
1518         kfree_skb(skb);
1519 }
1520 
1521 static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
1522 {
1523         struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR;
1524         struct chtls_sock *csk;
1525         struct tcp_sock *tp;
1526 
1527         csk = rcu_dereference_sk_user_data(sk);
1528         tp = tcp_sk(sk);
1529 
1530         if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1531                 handle_excess_rx(sk, skb);
1532                 return;
1533         }
1534 
1535         ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1536         ULP_SKB_CB(skb)->psh = hdr->psh;
1537         skb_ulp_mode(skb) = ULP_MODE_NONE;
1538 
1539         skb_reset_transport_header(skb);
1540         __skb_pull(skb, sizeof(*hdr) + RSS_HDR);
1541         if (!skb->data_len)
1542                 __skb_trim(skb, ntohs(hdr->len));
1543 
1544         if (unlikely(hdr->urg))
1545                 handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
1546         if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
1547                      tp->urg_seq - tp->rcv_nxt < skb->len))
1548                 tp->urg_data = TCP_URG_VALID |
1549                                skb->data[tp->urg_seq - tp->rcv_nxt];
1550 
1551         if (unlikely(hdr->dack_mode != csk->delack_mode)) {
1552                 csk->delack_mode = hdr->dack_mode;
1553                 csk->delack_seq = tp->rcv_nxt;
1554         }
1555 
1556         tcp_hdr(skb)->fin = 0;
1557         tp->rcv_nxt += skb->len;
1558 
1559         __skb_queue_tail(&sk->sk_receive_queue, skb);
1560 
1561         if (!sock_flag(sk, SOCK_DEAD)) {
1562                 check_sk_callbacks(csk);
1563                 sk->sk_data_ready(sk);
1564         }
1565 }
1566 
1567 static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
1568 {
1569         struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR;
1570         unsigned int hwtid = GET_TID(req);
1571         struct sock *sk;
1572 
1573         sk = lookup_tid(cdev->tids, hwtid);
1574         if (unlikely(!sk)) {
1575                 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1576                 return -EINVAL;
1577         }
1578         skb_dst_set(skb, NULL);
1579         process_cpl_msg(chtls_recv_data, sk, skb);
1580         return 0;
1581 }
1582 
1583 static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
1584 {
1585         struct cpl_tls_data *hdr = cplhdr(skb);
1586         struct chtls_sock *csk;
1587         struct chtls_hws *tlsk;
1588         struct tcp_sock *tp;
1589 
1590         csk = rcu_dereference_sk_user_data(sk);
1591         tlsk = &csk->tlshws;
1592         tp = tcp_sk(sk);
1593 
1594         if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
1595                 handle_excess_rx(sk, skb);
1596                 return;
1597         }
1598 
1599         ULP_SKB_CB(skb)->seq = ntohl(hdr->seq);
1600         ULP_SKB_CB(skb)->flags = 0;
1601         skb_ulp_mode(skb) = ULP_MODE_TLS;
1602 
1603         skb_reset_transport_header(skb);
1604         __skb_pull(skb, sizeof(*hdr));
1605         if (!skb->data_len)
1606                 __skb_trim(skb,
1607                            CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)));
1608 
1609         if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq -
1610                      tp->rcv_nxt < skb->len))
1611                 tp->urg_data = TCP_URG_VALID |
1612                                skb->data[tp->urg_seq - tp->rcv_nxt];
1613 
1614         tcp_hdr(skb)->fin = 0;
1615         tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd));
1616         __skb_queue_tail(&tlsk->sk_recv_queue, skb);
1617 }
1618 
1619 static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
1620 {
1621         struct cpl_tls_data *req = cplhdr(skb);
1622         unsigned int hwtid = GET_TID(req);
1623         struct sock *sk;
1624 
1625         sk = lookup_tid(cdev->tids, hwtid);
1626         if (unlikely(!sk)) {
1627                 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1628                 return -EINVAL;
1629         }
1630         skb_dst_set(skb, NULL);
1631         process_cpl_msg(chtls_recv_pdu, sk, skb);
1632         return 0;
1633 }
1634 
1635 static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen)
1636 {
1637         struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb);
1638 
1639         skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length);
1640         tls_cmp_hdr->length = ntohs((__force __be16)nlen);
1641 }
1642 
1643 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
1644 {
1645         struct tlsrx_cmp_hdr *tls_hdr_pkt;
1646         struct cpl_rx_tls_cmp *cmp_cpl;
1647         struct sk_buff *skb_rec;
1648         struct chtls_sock *csk;
1649         struct chtls_hws *tlsk;
1650         struct tcp_sock *tp;
1651 
1652         cmp_cpl = cplhdr(skb);
1653         csk = rcu_dereference_sk_user_data(sk);
1654         tlsk = &csk->tlshws;
1655         tp = tcp_sk(sk);
1656 
1657         ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq);
1658         ULP_SKB_CB(skb)->flags = 0;
1659 
1660         skb_reset_transport_header(skb);
1661         __skb_pull(skb, sizeof(*cmp_cpl));
1662         tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
1663         if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
1664                 tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1665         if (!skb->data_len)
1666                 __skb_trim(skb, TLS_HEADER_LENGTH);
1667 
1668         tp->rcv_nxt +=
1669                 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
1670 
1671         ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
1672         skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
1673         if (!skb_rec) {
1674                 __skb_queue_tail(&sk->sk_receive_queue, skb);
1675         } else {
1676                 chtls_set_hdrlen(skb, tlsk->pldlen);
1677                 tlsk->pldlen = 0;
1678                 __skb_queue_tail(&sk->sk_receive_queue, skb);
1679                 __skb_queue_tail(&sk->sk_receive_queue, skb_rec);
1680         }
1681 
1682         if (!sock_flag(sk, SOCK_DEAD)) {
1683                 check_sk_callbacks(csk);
1684                 sk->sk_data_ready(sk);
1685         }
1686 }
1687 
1688 static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
1689 {
1690         struct cpl_rx_tls_cmp *req = cplhdr(skb);
1691         unsigned int hwtid = GET_TID(req);
1692         struct sock *sk;
1693 
1694         sk = lookup_tid(cdev->tids, hwtid);
1695         if (unlikely(!sk)) {
1696                 pr_err("can't find conn. for hwtid %u.\n", hwtid);
1697                 return -EINVAL;
1698         }
1699         skb_dst_set(skb, NULL);
1700         process_cpl_msg(chtls_rx_hdr, sk, skb);
1701 
1702         return 0;
1703 }
1704 
1705 static void chtls_timewait(struct sock *sk)
1706 {
1707         struct tcp_sock *tp = tcp_sk(sk);
1708 
1709         tp->rcv_nxt++;
1710         tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
1711         tp->srtt_us = 0;
1712         tcp_time_wait(sk, TCP_TIME_WAIT, 0);
1713 }
1714 
1715 static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
1716 {
1717         struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
1718 
1719         sk->sk_shutdown |= RCV_SHUTDOWN;
1720         sock_set_flag(sk, SOCK_DONE);
1721 
1722         switch (sk->sk_state) {
1723         case TCP_SYN_RECV:
1724         case TCP_ESTABLISHED:
1725                 tcp_set_state(sk, TCP_CLOSE_WAIT);
1726                 break;
1727         case TCP_FIN_WAIT1:
1728                 tcp_set_state(sk, TCP_CLOSING);
1729                 break;
1730         case TCP_FIN_WAIT2:
1731                 chtls_release_resources(sk);
1732                 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1733                         chtls_conn_done(sk);
1734                 else
1735                         chtls_timewait(sk);
1736                 break;
1737         default:
1738                 pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
1739         }
1740 
1741         if (!sock_flag(sk, SOCK_DEAD)) {
1742                 sk->sk_state_change(sk);
1743                 /* Do not send POLL_HUP for half duplex close. */
1744 
1745                 if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1746                     sk->sk_state == TCP_CLOSE)
1747                         sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
1748                 else
1749                         sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1750         }
1751         kfree_skb(skb);
1752 }
1753 
1754 static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
1755 {
1756         struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR;
1757         struct chtls_sock *csk;
1758         struct tcp_sock *tp;
1759 
1760         csk = rcu_dereference_sk_user_data(sk);
1761         tp = tcp_sk(sk);
1762 
1763         tp->snd_una = ntohl(rpl->snd_nxt) - 1;  /* exclude FIN */
1764 
1765         switch (sk->sk_state) {
1766         case TCP_CLOSING:
1767                 chtls_release_resources(sk);
1768                 if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
1769                         chtls_conn_done(sk);
1770                 else
1771                         chtls_timewait(sk);
1772                 break;
1773         case TCP_LAST_ACK:
1774                 chtls_release_resources(sk);
1775                 chtls_conn_done(sk);
1776                 break;
1777         case TCP_FIN_WAIT1:
1778                 tcp_set_state(sk, TCP_FIN_WAIT2);
1779                 sk->sk_shutdown |= SEND_SHUTDOWN;
1780 
1781                 if (!sock_flag(sk, SOCK_DEAD))
1782                         sk->sk_state_change(sk);
1783                 else if (tcp_sk(sk)->linger2 < 0 &&
1784                          !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
1785                         chtls_abort_conn(sk, skb);
1786                 break;
1787         default:
1788                 pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
1789         }
1790         kfree_skb(skb);
1791 }
1792 
1793 static struct sk_buff *get_cpl_skb(struct sk_buff *skb,
1794                                    size_t len, gfp_t gfp)
1795 {
1796         if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) {
1797                 WARN_ONCE(skb->len < len, "skb alloc error");
1798                 __skb_trim(skb, len);
1799                 skb_get(skb);
1800         } else {
1801                 skb = alloc_skb(len, gfp);
1802                 if (skb)
1803                         __skb_put(skb, len);
1804         }
1805         return skb;
1806 }
1807 
1808 static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid,
1809                              int cmd)
1810 {
1811         struct cpl_abort_rpl *rpl = cplhdr(skb);
1812 
1813         INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid);
1814         rpl->cmd = cmd;
1815 }
1816 
1817 static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
1818 {
1819         struct cpl_abort_req_rss *req = cplhdr(skb);
1820         struct sk_buff *reply_skb;
1821 
1822         reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1823                               GFP_KERNEL | __GFP_NOFAIL);
1824         __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
1825         set_abort_rpl_wr(reply_skb, GET_TID(req),
1826                          (req->status & CPL_ABORT_NO_RST));
1827         set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1);
1828         cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1829         kfree_skb(skb);
1830 }
1831 
1832 static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1833                            struct chtls_dev *cdev, int status, int queue)
1834 {
1835         struct cpl_abort_req_rss *req = cplhdr(skb);
1836         struct sk_buff *reply_skb;
1837         struct chtls_sock *csk;
1838 
1839         csk = rcu_dereference_sk_user_data(sk);
1840 
1841         reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
1842                               GFP_KERNEL);
1843 
1844         if (!reply_skb) {
1845                 req->status = (queue << 1);
1846                 send_defer_abort_rpl(cdev, skb);
1847                 return;
1848         }
1849 
1850         set_abort_rpl_wr(reply_skb, GET_TID(req), status);
1851         kfree_skb(skb);
1852 
1853         set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1854         if (csk_conn_inline(csk)) {
1855                 struct l2t_entry *e = csk->l2t_entry;
1856 
1857                 if (e && sk->sk_state != TCP_SYN_RECV) {
1858                         cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1859                         return;
1860                 }
1861         }
1862         cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1863 }
1864 
1865 /*
1866  * Add an skb to the deferred skb queue for processing from process context.
1867  */
1868 static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
1869                            defer_handler_t handler)
1870 {
1871         DEFERRED_SKB_CB(skb)->handler = handler;
1872         spin_lock_bh(&cdev->deferq.lock);
1873         __skb_queue_tail(&cdev->deferq, skb);
1874         if (skb_queue_len(&cdev->deferq) == 1)
1875                 schedule_work(&cdev->deferq_task);
1876         spin_unlock_bh(&cdev->deferq.lock);
1877 }
1878 
1879 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
1880                                  struct chtls_dev *cdev,
1881                                  int status, int queue)
1882 {
1883         struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1884         struct sk_buff *reply_skb;
1885         struct chtls_sock *csk;
1886         unsigned int tid;
1887 
1888         csk = rcu_dereference_sk_user_data(sk);
1889         tid = GET_TID(req);
1890 
1891         reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any());
1892         if (!reply_skb) {
1893                 req->status = (queue << 1) | status;
1894                 t4_defer_reply(skb, cdev, send_defer_abort_rpl);
1895                 return;
1896         }
1897 
1898         set_abort_rpl_wr(reply_skb, tid, status);
1899         set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
1900         if (csk_conn_inline(csk)) {
1901                 struct l2t_entry *e = csk->l2t_entry;
1902 
1903                 if (e && sk->sk_state != TCP_SYN_RECV) {
1904                         cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
1905                         return;
1906                 }
1907         }
1908         cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
1909         kfree_skb(skb);
1910 }
1911 
1912 /*
1913  * This is run from a listener's backlog to abort a child connection in
1914  * SYN_RCV state (i.e., one on the listener's SYN queue).
1915  */
1916 static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
1917 {
1918         struct chtls_sock *csk;
1919         struct sock *child;
1920         int queue;
1921 
1922         child = skb->sk;
1923         csk = rcu_dereference_sk_user_data(child);
1924         queue = csk->txq_idx;
1925 
1926         skb->sk = NULL;
1927         do_abort_syn_rcv(child, lsk);
1928         send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
1929                        CPL_ABORT_NO_RST, queue);
1930 }
1931 
1932 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
1933 {
1934         const struct request_sock *oreq;
1935         struct listen_ctx *listen_ctx;
1936         struct chtls_sock *csk;
1937         struct chtls_dev *cdev;
1938         struct sock *psk;
1939         void *ctx;
1940 
1941         csk = sk->sk_user_data;
1942         oreq = csk->passive_reap_next;
1943         cdev = csk->cdev;
1944 
1945         if (!oreq)
1946                 return -1;
1947 
1948         ctx = lookup_stid(cdev->tids, oreq->ts_recent);
1949         if (!ctx)
1950                 return -1;
1951 
1952         listen_ctx = (struct listen_ctx *)ctx;
1953         psk = listen_ctx->lsk;
1954 
1955         bh_lock_sock(psk);
1956         if (!sock_owned_by_user(psk)) {
1957                 int queue = csk->txq_idx;
1958 
1959                 do_abort_syn_rcv(sk, psk);
1960                 send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
1961         } else {
1962                 skb->sk = sk;
1963                 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
1964                 __sk_add_backlog(psk, skb);
1965         }
1966         bh_unlock_sock(psk);
1967         return 0;
1968 }
1969 
1970 static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
1971 {
1972         const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR;
1973         struct chtls_sock *csk = sk->sk_user_data;
1974         int rst_status = CPL_ABORT_NO_RST;
1975         int queue = csk->txq_idx;
1976 
1977         if (is_neg_adv(req->status)) {
1978                 if (sk->sk_state == TCP_SYN_RECV)
1979                         chtls_set_tcb_tflag(sk, 0, 0);
1980 
1981                 kfree_skb(skb);
1982                 return;
1983         }
1984 
1985         csk_reset_flag(csk, CSK_ABORT_REQ_RCVD);
1986 
1987         if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) &&
1988             !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) {
1989                 struct tcp_sock *tp = tcp_sk(sk);
1990 
1991                 if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
1992                         WARN_ONCE(1, "send_tx_flowc error");
1993                 csk_set_flag(csk, CSK_TX_DATA_SENT);
1994         }
1995 
1996         csk_set_flag(csk, CSK_ABORT_SHUTDOWN);
1997 
1998         if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
1999                 sk->sk_err = ETIMEDOUT;
2000 
2001                 if (!sock_flag(sk, SOCK_DEAD))
2002                         sk->sk_error_report(sk);
2003 
2004                 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
2005                         return;
2006 
2007                 chtls_release_resources(sk);
2008                 chtls_conn_done(sk);
2009         }
2010 
2011         chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
2012 }
2013 
2014 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
2015 {
2016         struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR;
2017         struct chtls_sock *csk;
2018         struct chtls_dev *cdev;
2019 
2020         csk = rcu_dereference_sk_user_data(sk);
2021         cdev = csk->cdev;
2022 
2023         if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) {
2024                 csk_reset_flag(csk, CSK_ABORT_RPL_PENDING);
2025                 if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) {
2026                         if (sk->sk_state == TCP_SYN_SENT) {
2027                                 cxgb4_remove_tid(cdev->tids,
2028                                                  csk->port_id,
2029                                                  GET_TID(rpl),
2030                                                  sk->sk_family);
2031                                 sock_put(sk);
2032                         }
2033                         chtls_release_resources(sk);
2034                         chtls_conn_done(sk);
2035                 }
2036         }
2037         kfree_skb(skb);
2038 }
2039 
2040 static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
2041 {
2042         struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
2043         void (*fn)(struct sock *sk, struct sk_buff *skb);
2044         unsigned int hwtid = GET_TID(req);
2045         struct sock *sk;
2046         u8 opcode;
2047 
2048         opcode = ((const struct rss_header *)cplhdr(skb))->opcode;
2049 
2050         sk = lookup_tid(cdev->tids, hwtid);
2051         if (!sk)
2052                 goto rel_skb;
2053 
2054         switch (opcode) {
2055         case CPL_PEER_CLOSE:
2056                 fn = chtls_peer_close;
2057                 break;
2058         case CPL_CLOSE_CON_RPL:
2059                 fn = chtls_close_con_rpl;
2060                 break;
2061         case CPL_ABORT_REQ_RSS:
2062                 fn = chtls_abort_req_rss;
2063                 break;
2064         case CPL_ABORT_RPL_RSS:
2065                 fn = chtls_abort_rpl_rss;
2066                 break;
2067         default:
2068                 goto rel_skb;
2069         }
2070 
2071         process_cpl_msg(fn, sk, skb);
2072         return 0;
2073 
2074 rel_skb:
2075         kfree_skb(skb);
2076         return 0;
2077 }
2078 
2079 static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
2080 {
2081         struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;
2082         struct chtls_sock *csk = sk->sk_user_data;
2083         struct tcp_sock *tp = tcp_sk(sk);
2084         u32 credits = hdr->credits;
2085         u32 snd_una;
2086 
2087         snd_una = ntohl(hdr->snd_una);
2088         csk->wr_credits += credits;
2089 
2090         if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits)
2091                 csk->wr_unacked = csk->wr_max_credits - csk->wr_credits;
2092 
2093         while (credits) {
2094                 struct sk_buff *pskb = csk->wr_skb_head;
2095                 u32 csum;
2096 
2097                 if (unlikely(!pskb)) {
2098                         if (csk->wr_nondata)
2099                                 csk->wr_nondata -= credits;
2100                         break;
2101                 }
2102                 csum = (__force u32)pskb->csum;
2103                 if (unlikely(credits < csum)) {
2104                         pskb->csum = (__force __wsum)(csum - credits);
2105                         break;
2106                 }
2107                 dequeue_wr(sk);
2108                 credits -= csum;
2109                 kfree_skb(pskb);
2110         }
2111         if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
2112                 if (unlikely(before(snd_una, tp->snd_una))) {
2113                         kfree_skb(skb);
2114                         return;
2115                 }
2116 
2117                 if (tp->snd_una != snd_una) {
2118                         tp->snd_una = snd_una;
2119                         tp->rcv_tstamp = tcp_time_stamp(tp);
2120                         if (tp->snd_una == tp->snd_nxt &&
2121                             !csk_flag_nochk(csk, CSK_TX_FAILOVER))
2122                                 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2123                 }
2124         }
2125 
2126         if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) {
2127                 unsigned int fclen16 = roundup(failover_flowc_wr_len, 16);
2128 
2129                 csk->wr_credits -= fclen16;
2130                 csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
2131                 csk_reset_flag(csk, CSK_TX_FAILOVER);
2132         }
2133         if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0))
2134                 sk->sk_write_space(sk);
2135 
2136         kfree_skb(skb);
2137 }
2138 
2139 static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
2140 {
2141         struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR;
2142         unsigned int hwtid = GET_TID(rpl);
2143         struct sock *sk;
2144 
2145         sk = lookup_tid(cdev->tids, hwtid);
2146         if (unlikely(!sk)) {
2147                 pr_err("can't find conn. for hwtid %u.\n", hwtid);
2148                 return -EINVAL;
2149         }
2150         process_cpl_msg(chtls_rx_ack, sk, skb);
2151 
2152         return 0;
2153 }
2154 
2155 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
2156         [CPL_PASS_OPEN_RPL]     = chtls_pass_open_rpl,
2157         [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
2158         [CPL_PASS_ACCEPT_REQ]   = chtls_pass_accept_req,
2159         [CPL_PASS_ESTABLISH]    = chtls_pass_establish,
2160         [CPL_RX_DATA]           = chtls_rx_data,
2161         [CPL_TLS_DATA]          = chtls_rx_pdu,
2162         [CPL_RX_TLS_CMP]        = chtls_rx_cmp,
2163         [CPL_PEER_CLOSE]        = chtls_conn_cpl,
2164         [CPL_CLOSE_CON_RPL]     = chtls_conn_cpl,
2165         [CPL_ABORT_REQ_RSS]     = chtls_conn_cpl,
2166         [CPL_ABORT_RPL_RSS]     = chtls_conn_cpl,
2167         [CPL_FW4_ACK]           = chtls_wr_ack,
2168 };

/* [<][>][^][v][top][bottom][index][help] */