Lines Matching refs:csk

184 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  in send_act_open_req()  argument
187 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req()
189 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
192 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
193 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
197 MSS_IDX_V(csk->mss_idx) | in send_act_open_req()
198 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
199 TX_CHAN_V(csk->tx_chan) | in send_act_open_req()
200 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req()
202 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req()
206 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req()
215 req->local_port = csk->saddr.sin_port; in send_act_open_req()
216 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
217 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
218 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
221 csk->cdev->ports[csk->port_id], in send_act_open_req()
222 csk->l2t)); in send_act_open_req()
228 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
230 csk->atid, csk->rss_qid); in send_act_open_req()
239 req->local_port = csk->saddr.sin_port; in send_act_open_req()
240 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
241 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
242 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
246 csk->cdev->ports[csk->port_id], in send_act_open_req()
247 csk->l2t))); in send_act_open_req()
256 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
258 csk->atid, csk->rss_qid); in send_act_open_req()
261 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req()
264 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, in send_act_open_req()
265 csk->state, csk->flags, csk->atid, csk->rss_qid); in send_act_open_req()
267 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req()
271 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req6() argument
274 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req6()
276 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req6()
279 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req6()
280 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req6()
284 MSS_IDX_V(csk->mss_idx) | in send_act_open_req6()
285 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req6()
286 TX_CHAN_V(csk->tx_chan) | in send_act_open_req6()
287 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req6()
289 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req6()
294 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req6()
303 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
304 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
306 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
307 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
309 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
310 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
319 csk->cdev->ports[csk->port_id], in send_act_open_req6()
320 csk->l2t)); in send_act_open_req6()
328 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
329 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
330 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
331 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
333 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
334 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
342 csk->cdev->ports[csk->port_id], in send_act_open_req6()
343 csk->l2t))); in send_act_open_req6()
346 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req6()
349 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, in send_act_open_req6()
350 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), in send_act_open_req6()
351 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), in send_act_open_req6()
352 csk->rss_qid); in send_act_open_req6()
354 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req6()
358 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
360 struct sk_buff *skb = csk->cpl_close; in send_close_req()
362 unsigned int tid = csk->tid; in send_close_req()
366 csk, csk->state, csk->flags, csk->tid); in send_close_req()
367 csk->cpl_close = NULL; in send_close_req()
368 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_close_req()
373 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
374 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
375 push_tx_frames(csk, 1); in send_close_req()
380 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; in abort_arp_failure() local
385 csk, csk->state, csk->flags, csk->tid); in abort_arp_failure()
388 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in abort_arp_failure()
391 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
394 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
396 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) in send_abort_req()
399 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in send_abort_req()
400 send_tx_flowc_wr(csk); in send_abort_req()
401 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
404 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
405 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
406 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
408 csk->cpl_abort_req = NULL; in send_abort_req()
410 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_req()
412 t4_set_arp_err_handler(skb, csk, abort_arp_failure); in send_abort_req()
413 INIT_TP_WR(req, csk->tid); in send_abort_req()
414 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
415 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
416 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
420 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
423 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_abort_req()
426 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
428 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
433 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
435 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
436 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_rpl()
437 INIT_TP_WR(rpl, csk->tid); in send_abort_rpl()
438 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
440 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_abort_rpl()
448 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
455 csk, csk->state, csk->flags, csk->tid, credits); in send_rx_credits()
459 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
464 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); in send_rx_credits()
465 INIT_TP_WR(req, csk->tid); in send_rx_credits()
467 csk->tid)); in send_rx_credits()
470 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_rx_credits()
528 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) in send_tx_flowc_wr() argument
540 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); in send_tx_flowc_wr()
542 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); in send_tx_flowc_wr()
544 flowc->mnemval[1].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
546 flowc->mnemval[2].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
548 flowc->mnemval[3].val = htonl(csk->rss_qid); in send_tx_flowc_wr()
550 flowc->mnemval[4].val = htonl(csk->snd_nxt); in send_tx_flowc_wr()
552 flowc->mnemval[5].val = htonl(csk->rcv_nxt); in send_tx_flowc_wr()
554 flowc->mnemval[6].val = htonl(csk->snd_win); in send_tx_flowc_wr()
556 flowc->mnemval[7].val = htonl(csk->advmss); in send_tx_flowc_wr()
562 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_tx_flowc_wr()
566 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, in send_tx_flowc_wr()
567 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, in send_tx_flowc_wr()
568 csk->advmss); in send_tx_flowc_wr()
570 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_tx_flowc_wr()
575 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
589 req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | in make_tx_data_wr()
597 cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | in make_tx_data_wr()
603 val = skb_peek(&csk->write_queue) ? 0 : 1; in make_tx_data_wr()
607 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) in make_tx_data_wr()
608 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in make_tx_data_wr()
616 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
621 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
622 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
626 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
630 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
653 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in push_tx_frames()
654 flowclen16 = send_tx_flowc_wr(csk); in push_tx_frames()
655 csk->wr_cred -= flowclen16; in push_tx_frames()
656 csk->wr_una_cred += flowclen16; in push_tx_frames()
657 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in push_tx_frames()
660 if (csk->wr_cred < credits_needed) { in push_tx_frames()
663 csk, skb->len, skb->data_len, in push_tx_frames()
664 credits_needed, csk->wr_cred); in push_tx_frames()
667 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
668 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in push_tx_frames()
670 csk->wr_cred -= credits_needed; in push_tx_frames()
671 csk->wr_una_cred += credits_needed; in push_tx_frames()
672 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
676 csk, skb->len, skb->data_len, credits_needed, in push_tx_frames()
677 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
681 make_tx_data_wr(csk, skb, dlen, len, credits_needed, in push_tx_frames()
683 csk->snd_nxt += len; in push_tx_frames()
687 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); in push_tx_frames()
691 csk, csk->state, csk->flags, csk->tid, skb, len); in push_tx_frames()
693 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in push_tx_frames()
698 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
700 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid()
702 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
703 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid()
704 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
705 cxgbi_sock_put(csk); in free_atid()
711 struct cxgbi_sock *csk; in do_act_establish() local
720 csk = lookup_atid(t, atid); in do_act_establish()
721 if (unlikely(!csk)) { in do_act_establish()
726 if (csk->atid != atid) { in do_act_establish()
728 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); in do_act_establish()
733 (&csk->saddr), (&csk->daddr), in do_act_establish()
734 atid, tid, csk, csk->state, csk->flags, rcv_isn); in do_act_establish()
738 cxgbi_sock_get(csk); in do_act_establish()
739 csk->tid = tid; in do_act_establish()
740 cxgb4_insert_tid(lldi->tids, csk, tid); in do_act_establish()
741 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
743 free_atid(csk); in do_act_establish()
745 spin_lock_bh(&csk->lock); in do_act_establish()
746 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
748 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
750 if (csk->retry_timer.function) { in do_act_establish()
751 del_timer(&csk->retry_timer); in do_act_establish()
752 csk->retry_timer.function = NULL; in do_act_establish()
755 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
760 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) in do_act_establish()
761 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); in do_act_establish()
763 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; in do_act_establish()
765 csk->advmss -= 12; in do_act_establish()
766 if (csk->advmss < 128) in do_act_establish()
767 csk->advmss = 128; in do_act_establish()
771 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); in do_act_establish()
773 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
775 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
776 send_abort_req(csk); in do_act_establish()
778 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
779 push_tx_frames(csk, 0); in do_act_establish()
780 cxgbi_conn_tx_open(csk); in do_act_establish()
782 spin_unlock_bh(&csk->lock); in do_act_establish()
809 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; in csk_act_open_retry_timer() local
810 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in csk_act_open_retry_timer()
817 csk, csk->state, csk->flags, csk->tid); in csk_act_open_retry_timer()
819 cxgbi_sock_get(csk); in csk_act_open_retry_timer()
820 spin_lock_bh(&csk->lock); in csk_act_open_retry_timer()
830 if (csk->csk_family == AF_INET) { in csk_act_open_retry_timer()
841 cxgbi_sock_fail_act_open(csk, -ENOMEM); in csk_act_open_retry_timer()
843 skb->sk = (struct sock *)csk; in csk_act_open_retry_timer()
844 t4_set_arp_err_handler(skb, csk, in csk_act_open_retry_timer()
846 send_act_open_func(csk, skb, csk->l2t); in csk_act_open_retry_timer()
849 spin_unlock_bh(&csk->lock); in csk_act_open_retry_timer()
850 cxgbi_sock_put(csk); in csk_act_open_retry_timer()
863 struct cxgbi_sock *csk; in do_act_open_rpl() local
872 csk = lookup_atid(t, atid); in do_act_open_rpl()
873 if (unlikely(!csk)) { in do_act_open_rpl()
879 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), in do_act_open_rpl()
880 atid, tid, status, csk, csk->state, csk->flags); in do_act_open_rpl()
890 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); in do_act_open_rpl()
892 cxgbi_sock_get(csk); in do_act_open_rpl()
893 spin_lock_bh(&csk->lock); in do_act_open_rpl()
896 csk->retry_timer.function != csk_act_open_retry_timer) { in do_act_open_rpl()
897 csk->retry_timer.function = csk_act_open_retry_timer; in do_act_open_rpl()
898 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
900 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
903 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
904 cxgbi_sock_put(csk); in do_act_open_rpl()
911 struct cxgbi_sock *csk; in do_peer_close() local
917 csk = lookup_tid(t, tid); in do_peer_close()
918 if (unlikely(!csk)) { in do_peer_close()
923 (&csk->saddr), (&csk->daddr), in do_peer_close()
924 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
925 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
932 struct cxgbi_sock *csk; in do_close_con_rpl() local
938 csk = lookup_tid(t, tid); in do_close_con_rpl()
939 if (unlikely(!csk)) { in do_close_con_rpl()
944 (&csk->saddr), (&csk->daddr), in do_close_con_rpl()
945 csk, csk->state, csk->flags, csk->tid); in do_close_con_rpl()
946 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
951 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
957 return csk->state > CTP_ESTABLISHED ? in abort_status_to_errno()
971 struct cxgbi_sock *csk; in do_abort_req_rss() local
978 csk = lookup_tid(t, tid); in do_abort_req_rss()
979 if (unlikely(!csk)) { in do_abort_req_rss()
985 (&csk->saddr), (&csk->daddr), in do_abort_req_rss()
986 csk, csk->state, csk->flags, csk->tid, req->status); in do_abort_req_rss()
991 cxgbi_sock_get(csk); in do_abort_req_rss()
992 spin_lock_bh(&csk->lock); in do_abort_req_rss()
994 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
996 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in do_abort_req_rss()
997 send_tx_flowc_wr(csk); in do_abort_req_rss()
998 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in do_abort_req_rss()
1001 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
1002 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req_rss()
1004 send_abort_rpl(csk, rst_status); in do_abort_req_rss()
1006 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req_rss()
1007 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req_rss()
1008 cxgbi_sock_closed(csk); in do_abort_req_rss()
1011 spin_unlock_bh(&csk->lock); in do_abort_req_rss()
1012 cxgbi_sock_put(csk); in do_abort_req_rss()
1019 struct cxgbi_sock *csk; in do_abort_rpl_rss() local
1025 csk = lookup_tid(t, tid); in do_abort_rpl_rss()
1026 if (!csk) in do_abort_rpl_rss()
1029 if (csk) in do_abort_rpl_rss()
1031 (&csk->saddr), (&csk->daddr), csk, in do_abort_rpl_rss()
1032 csk->state, csk->flags, csk->tid, rpl->status); in do_abort_rpl_rss()
1037 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl_rss()
1044 struct cxgbi_sock *csk; in do_rx_data() local
1050 csk = lookup_tid(t, tid); in do_rx_data()
1051 if (!csk) { in do_rx_data()
1055 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); in do_rx_data()
1056 spin_lock_bh(&csk->lock); in do_rx_data()
1057 send_abort_req(csk); in do_rx_data()
1058 spin_unlock_bh(&csk->lock); in do_rx_data()
1065 struct cxgbi_sock *csk; in do_rx_iscsi_hdr() local
1072 csk = lookup_tid(t, tid); in do_rx_iscsi_hdr()
1073 if (unlikely(!csk)) { in do_rx_iscsi_hdr()
1080 csk, csk->state, csk->flags, csk->tid, skb, skb->len, in do_rx_iscsi_hdr()
1083 spin_lock_bh(&csk->lock); in do_rx_iscsi_hdr()
1085 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_hdr()
1088 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_hdr()
1089 if (csk->state != CTP_ABORTING) in do_rx_iscsi_hdr()
1102 if (!csk->skb_ulp_lhdr) { in do_rx_iscsi_hdr()
1108 csk, csk->state, csk->flags, csk->tid, skb); in do_rx_iscsi_hdr()
1109 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_hdr()
1112 if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { in do_rx_iscsi_hdr()
1114 csk->tid, cxgbi_skcb_tcp_seq(skb), in do_rx_iscsi_hdr()
1115 csk->rcv_nxt); in do_rx_iscsi_hdr()
1130 csk->tid, plen, hlen, dlen, in do_rx_iscsi_hdr()
1137 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; in do_rx_iscsi_hdr()
1138 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); in do_rx_iscsi_hdr()
1142 csk, skb, *bhs, hlen, dlen, in do_rx_iscsi_hdr()
1147 struct sk_buff *lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_hdr()
1152 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_hdr()
1155 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_hdr()
1156 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1160 send_abort_req(csk); in do_rx_iscsi_hdr()
1162 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1170 struct cxgbi_sock *csk; in do_rx_data_ddp() local
1178 csk = lookup_tid(t, tid); in do_rx_data_ddp()
1179 if (unlikely(!csk)) { in do_rx_data_ddp()
1186 csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); in do_rx_data_ddp()
1188 spin_lock_bh(&csk->lock); in do_rx_data_ddp()
1190 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_data_ddp()
1193 csk, csk->state, csk->flags, csk->tid); in do_rx_data_ddp()
1194 if (csk->state != CTP_ABORTING) in do_rx_data_ddp()
1200 if (!csk->skb_ulp_lhdr) { in do_rx_data_ddp()
1201 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); in do_rx_data_ddp()
1205 lskb = csk->skb_ulp_lhdr; in do_rx_data_ddp()
1206 csk->skb_ulp_lhdr = NULL; in do_rx_data_ddp()
1212 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); in do_rx_data_ddp()
1216 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1221 csk, lskb, status, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1227 csk, lskb, status); in do_rx_data_ddp()
1234 csk, lskb, status); in do_rx_data_ddp()
1239 csk, lskb, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1242 cxgbi_conn_pdu_ready(csk); in do_rx_data_ddp()
1243 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1247 send_abort_req(csk); in do_rx_data_ddp()
1249 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1256 struct cxgbi_sock *csk; in do_fw4_ack() local
1262 csk = lookup_tid(t, tid); in do_fw4_ack()
1263 if (unlikely(!csk)) in do_fw4_ack()
1268 csk, csk->state, csk->flags, csk->tid); in do_fw4_ack()
1269 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), in do_fw4_ack()
1281 struct cxgbi_sock *csk; in do_set_tcb_rpl() local
1283 csk = lookup_tid(t, tid); in do_set_tcb_rpl()
1284 if (!csk) in do_set_tcb_rpl()
1289 csk, csk->state, csk->flags, csk->tid, rpl->status); in do_set_tcb_rpl()
1293 csk, tid, rpl->status); in do_set_tcb_rpl()
1298 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
1300 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), in alloc_cpls()
1302 if (!csk->cpl_close) in alloc_cpls()
1305 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), in alloc_cpls()
1307 if (!csk->cpl_abort_req) in alloc_cpls()
1310 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), in alloc_cpls()
1312 if (!csk->cpl_abort_rpl) in alloc_cpls()
1317 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
1321 static inline void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
1323 if (csk->l2t) { in l2t_put()
1324 cxgb4_l2t_release(csk->l2t); in l2t_put()
1325 csk->l2t = NULL; in l2t_put()
1326 cxgbi_sock_put(csk); in l2t_put()
1330 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
1334 struct net_device *ndev = csk->cdev->ports[csk->port_id]; in release_offload_resources()
1339 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
1341 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
1342 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
1343 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
1344 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
1347 l2t_put(csk); in release_offload_resources()
1349 if (csk->csk_family == AF_INET6) in release_offload_resources()
1351 (const u32 *)&csk->saddr6.sin6_addr, 1); in release_offload_resources()
1354 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
1355 free_atid(csk); in release_offload_resources()
1356 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
1357 lldi = cxgbi_cdev_priv(csk->cdev); in release_offload_resources()
1358 cxgb4_remove_tid(lldi->tids, 0, csk->tid); in release_offload_resources()
1359 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
1360 cxgbi_sock_put(csk); in release_offload_resources()
1362 csk->dst = NULL; in release_offload_resources()
1363 csk->cdev = NULL; in release_offload_resources()
1366 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
1368 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
1370 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
1382 csk, csk->state, csk->flags, csk->tid); in init_act_open()
1384 if (csk->csk_family == AF_INET) in init_act_open()
1385 daddr = &csk->daddr.sin_addr.s_addr; in init_act_open()
1387 else if (csk->csk_family == AF_INET6) in init_act_open()
1388 daddr = &csk->daddr6.sin6_addr; in init_act_open()
1391 pr_err("address family 0x%x not supported\n", csk->csk_family); in init_act_open()
1395 n = dst_neigh_lookup(csk->dst, daddr); in init_act_open()
1402 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); in init_act_open()
1403 if (csk->atid < 0) { in init_act_open()
1407 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1408 cxgbi_sock_get(csk); in init_act_open()
1410 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); in init_act_open()
1411 if (!csk->l2t) { in init_act_open()
1415 cxgbi_sock_get(csk); in init_act_open()
1418 if (csk->csk_family == AF_INET6) in init_act_open()
1419 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
1430 if (csk->csk_family == AF_INET) in init_act_open()
1439 skb->sk = (struct sock *)csk; in init_act_open()
1440 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); in init_act_open()
1442 if (!csk->mtu) in init_act_open()
1443 csk->mtu = dst_mtu(csk->dst); in init_act_open()
1444 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); in init_act_open()
1445 csk->tx_chan = cxgb4_port_chan(ndev); in init_act_open()
1447 csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; in init_act_open()
1449 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open()
1451 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; in init_act_open()
1453 csk->snd_win = cxgb4i_snd_win; in init_act_open()
1454 csk->rcv_win = cxgb4i_rcv_win; in init_act_open()
1456 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; in init_act_open()
1459 csk->rcv_win *= rcv_winf; in init_act_open()
1462 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; in init_act_open()
1465 csk->snd_win *= snd_winf; in init_act_open()
1467 csk->wr_cred = lldi->wr_cred - in init_act_open()
1469 csk->wr_max_cred = csk->wr_cred; in init_act_open()
1470 csk->wr_una_cred = 0; in init_act_open()
1471 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1472 csk->err = 0; in init_act_open()
1475 (&csk->saddr), (&csk->daddr), csk, csk->state, in init_act_open()
1476 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, in init_act_open()
1477 csk->mtu, csk->mss_idx, csk->smac_idx); in init_act_open()
1481 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1482 if (csk->csk_family == AF_INET) in init_act_open()
1483 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1486 send_act_open_req6(csk, skb, csk->l2t); in init_act_open()
1494 if (csk->csk_family == AF_INET6) in init_act_open()
1496 (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
1614 static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, in ddp_set_map() argument
1625 err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, in ddp_set_map()
1650 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_pgidx() argument
1665 INIT_TP_WR(req, csk->tid); in ddp_setup_conn_pgidx()
1666 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in ddp_setup_conn_pgidx()
1667 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_pgidx()
1671 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_pgidx()
1674 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); in ddp_setup_conn_pgidx()
1676 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_pgidx()
1680 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
1693 csk->hcrc_len = (hcrc ? 4 : 0); in ddp_setup_conn_digest()
1694 csk->dcrc_len = (dcrc ? 4 : 0); in ddp_setup_conn_digest()
1699 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_digest()
1704 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_digest()
1707 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); in ddp_setup_conn_digest()
1709 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_digest()