Lines Matching refs:buff

1150 	struct sk_buff *buff;  in tcp_fragment()  local
1166 buff = sk_stream_alloc_skb(sk, nsize, gfp); in tcp_fragment()
1167 if (!buff) in tcp_fragment()
1170 sk->sk_wmem_queued += buff->truesize; in tcp_fragment()
1171 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1173 buff->truesize += nlen; in tcp_fragment()
1177 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1178 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1179 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1184 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1185 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1189 buff->csum = csum_partial_copy_nocheck(skb->data + len, in tcp_fragment()
1190 skb_put(buff, nsize), in tcp_fragment()
1195 skb->csum = csum_block_sub(skb->csum, buff->csum, len); in tcp_fragment()
1198 skb_split(skb, buff, len); in tcp_fragment()
1201 buff->ip_summed = skb->ip_summed; in tcp_fragment()
1203 buff->tstamp = skb->tstamp; in tcp_fragment()
1204 tcp_fragment_tstamp(skb, buff); in tcp_fragment()
1210 tcp_set_skb_tso_segs(sk, buff, mss_now); in tcp_fragment()
1215 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1217 tcp_skb_pcount(buff); in tcp_fragment()
1224 __skb_header_release(buff); in tcp_fragment()
1225 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1717 struct sk_buff *buff; in tso_fragment() local
1725 buff = sk_stream_alloc_skb(sk, 0, gfp); in tso_fragment()
1726 if (unlikely(!buff)) in tso_fragment()
1729 sk->sk_wmem_queued += buff->truesize; in tso_fragment()
1730 sk_mem_charge(sk, buff->truesize); in tso_fragment()
1731 buff->truesize += nlen; in tso_fragment()
1735 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
1736 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
1737 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
1742 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
1745 TCP_SKB_CB(buff)->sacked = 0; in tso_fragment()
1747 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; in tso_fragment()
1748 skb_split(skb, buff, len); in tso_fragment()
1749 tcp_fragment_tstamp(skb, buff); in tso_fragment()
1753 tcp_set_skb_tso_segs(sk, buff, mss_now); in tso_fragment()
1756 __skb_header_release(buff); in tso_fragment()
1757 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
3236 struct sk_buff *buff; in tcp_connect() local
3246 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); in tcp_connect()
3247 if (unlikely(!buff)) in tcp_connect()
3250 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
3252 tcp_connect_queue_skb(sk, buff); in tcp_connect()
3253 tcp_ecn_send_syn(sk, buff); in tcp_connect()
3256 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3257 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3337 struct sk_buff *buff; in tcp_send_ack() local
3349 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()
3350 if (!buff) { in tcp_send_ack()
3359 skb_reserve(buff, MAX_TCP_HEADER); in tcp_send_ack()
3360 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); in tcp_send_ack()
3368 skb_set_tcp_pure_ack(buff); in tcp_send_ack()
3371 skb_mstamp_get(&buff->skb_mstamp); in tcp_send_ack()
3372 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_send_ack()