Lines Matching refs:skb
156 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
160 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; in send_act_open_req()
162 skb->priority = CPL_PRIORITY_SETUP; in send_act_open_req()
184 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_act_open_req()
187 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) in act_open_arp_failure() argument
189 cxgbi_sock_act_open_req_arp_failure(NULL, skb); in act_open_arp_failure()
200 struct sk_buff *skb = csk->cpl_close; in send_close_req() local
201 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; in send_close_req()
214 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
226 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) in abort_arp_failure() argument
228 struct cpl_abort_req *req = cplhdr(skb); in abort_arp_failure()
232 tdev, GET_TID(req), skb); in abort_arp_failure()
234 cxgb3_ofld_send(tdev, skb); in abort_arp_failure()
239 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req() local
242 if (unlikely(csk->state == CTP_ABORTING || !skb)) in send_abort_req()
250 req = (struct cpl_abort_req *)skb->head; in send_abort_req()
251 skb->priority = CPL_PRIORITY_DATA; in send_abort_req()
252 set_arp_failure_handler(skb, abort_arp_failure); in send_abort_req()
265 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_abort_req()
275 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl() local
276 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; in send_abort_rpl()
283 skb->priority = CPL_PRIORITY_DATA; in send_abort_rpl()
288 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_abort_rpl()
298 struct sk_buff *skb; in send_rx_credits() local
306 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); in send_rx_credits()
307 if (!skb) { in send_rx_credits()
311 req = (struct cpl_rx_data_ack *)skb->head; in send_rx_credits()
316 skb->priority = CPL_PRIORITY_ACK; in send_rx_credits()
317 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_rx_credits()
349 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
355 skb_reset_transport_header(skb); in make_tx_data_wr()
356 req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); in make_tx_data_wr()
363 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | in make_tx_data_wr()
388 static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) in arp_failure_skb_discard() argument
390 kfree_skb(skb); in arp_failure_skb_discard()
396 struct sk_buff *skb; in push_tx_frames() local
406 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
407 int len = skb->len; /* length before skb_push */ in push_tx_frames()
408 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); in push_tx_frames()
419 csk, skb->len, skb->data_len, frags, in push_tx_frames()
424 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
425 skb->priority = CPL_PRIORITY_DATA; in push_tx_frames()
426 skb->csum = wrs_needed; /* remember this until the WR_ACK */ in push_tx_frames()
429 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
434 csk, skb->len, skb->data_len, frags, skb->csum, in push_tx_frames()
437 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { in push_tx_frames()
444 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); in push_tx_frames()
445 make_tx_data_wr(csk, skb, len, req_completion); in push_tx_frames()
447 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); in push_tx_frames()
449 total_size += skb->truesize; in push_tx_frames()
452 csk, csk->tid, skb); in push_tx_frames()
453 set_arp_failure_handler(skb, arp_failure_skb_discard); in push_tx_frames()
454 l2t_send(csk->cdev->lldev, skb, csk->l2t); in push_tx_frames()
474 static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) in do_act_establish() argument
477 struct cpl_act_establish *req = cplhdr(skb); in do_act_establish()
493 csk->rss_qid = G_QNUM(ntohs(skb->csum)); in do_act_establish()
521 __kfree_skb(skb); in do_act_establish()
549 struct sk_buff *skb; in act_open_retry_timer() local
558 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); in act_open_retry_timer()
559 if (!skb) in act_open_retry_timer()
562 skb->sk = (struct sock *)csk; in act_open_retry_timer()
563 set_arp_failure_handler(skb, act_open_arp_failure); in act_open_retry_timer()
564 send_act_open_req(csk, skb, csk->l2t); in act_open_retry_timer()
570 static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) in do_act_open_rpl() argument
573 struct cpl_act_open_rpl *rpl = cplhdr(skb); in do_act_open_rpl()
597 __kfree_skb(skb); in do_act_open_rpl()
605 static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) in do_peer_close() argument
614 __kfree_skb(skb); in do_peer_close()
622 static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, in do_close_con_rpl() argument
626 struct cpl_close_con_rpl *rpl = cplhdr(skb); in do_close_con_rpl()
633 __kfree_skb(skb); in do_close_con_rpl()
660 static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) in do_abort_req() argument
662 const struct cpl_abort_req_rss *req = cplhdr(skb); in do_abort_req()
696 __kfree_skb(skb); in do_abort_req()
707 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) in do_abort_rpl() argument
709 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); in do_abort_rpl()
734 __kfree_skb(skb); in do_abort_rpl()
743 static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) in do_iscsi_hdr() argument
746 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); in do_iscsi_hdr()
755 csk, csk->state, csk->flags, csk->tid, skb, skb->len); in do_iscsi_hdr()
769 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); in do_iscsi_hdr()
770 cxgbi_skcb_flags(skb) = 0; in do_iscsi_hdr()
772 skb_reset_transport_header(skb); in do_iscsi_hdr()
773 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); in do_iscsi_hdr()
777 if (skb->len <= hdr_len) { in do_iscsi_hdr()
780 skb->len, hdr_len); in do_iscsi_hdr()
783 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); in do_iscsi_hdr()
785 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, in do_iscsi_hdr()
790 skb->len, sizeof(ddp_cpl), err); in do_iscsi_hdr()
794 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); in do_iscsi_hdr()
795 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); in do_iscsi_hdr()
796 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); in do_iscsi_hdr()
801 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); in do_iscsi_hdr()
804 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); in do_iscsi_hdr()
806 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); in do_iscsi_hdr()
808 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); in do_iscsi_hdr()
810 if (skb->len > (hdr_len + sizeof(ddp_cpl))) { in do_iscsi_hdr()
811 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); in do_iscsi_hdr()
815 csk->tid, sizeof(data_cpl), skb->len, err); in do_iscsi_hdr()
821 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); in do_iscsi_hdr()
824 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); in do_iscsi_hdr()
826 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); in do_iscsi_hdr()
827 __pskb_trim(skb, len); in do_iscsi_hdr()
828 __skb_queue_tail(&csk->receive_queue, skb); in do_iscsi_hdr()
838 __kfree_skb(skb); in do_iscsi_hdr()
847 static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) in do_wr_ack() argument
850 struct cpl_wr_ack *hdr = cplhdr(skb); in do_wr_ack()
857 __kfree_skb(skb); in do_wr_ack()
960 struct sk_buff *skb = NULL; in init_act_open() local
986 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); in init_act_open()
987 if (!skb) in init_act_open()
989 skb->sk = (struct sock *)csk; in init_act_open()
990 set_arp_failure_handler(skb, act_open_arp_failure); in init_act_open()
1005 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1009 if (skb) in init_act_open()
1010 __kfree_skb(skb); in init_act_open()
1068 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) in ulp_mem_io_set_hdr() argument
1070 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; in ulp_mem_io_set_hdr()
1095 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + in ddp_set_map() local
1098 if (!skb) in ddp_set_map()
1101 ulp_mem_io_set_hdr(skb, pm_addr); in ddp_set_map()
1102 cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + in ddp_set_map()
1105 skb->priority = CPL_PRIORITY_CONTROL; in ddp_set_map()
1106 cxgb3_ofld_send(cdev->lldev, skb); in ddp_set_map()
1124 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + in ddp_clear_map() local
1127 if (!skb) { in ddp_clear_map()
1132 ulp_mem_io_set_hdr(skb, pm_addr); in ddp_clear_map()
1133 skb->priority = CPL_PRIORITY_CONTROL; in ddp_clear_map()
1134 cxgb3_ofld_send(cdev->lldev, skb); in ddp_clear_map()
1141 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, in ddp_setup_conn_pgidx() local
1148 if (!skb) in ddp_setup_conn_pgidx()
1152 req = (struct cpl_set_tcb_field *)skb->head; in ddp_setup_conn_pgidx()
1160 skb->priority = CPL_PRIORITY_CONTROL; in ddp_setup_conn_pgidx()
1162 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_pgidx()
1178 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, in ddp_setup_conn_digest() local
1185 if (!skb) in ddp_setup_conn_digest()
1189 req = (struct cpl_set_tcb_field *)skb->head; in ddp_setup_conn_digest()
1197 skb->priority = CPL_PRIORITY_CONTROL; in ddp_setup_conn_digest()
1199 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_digest()