Lines Matching refs:tp

111 	struct tcp_sock *tp = tcp_sk(sk);  in tcp_twsk_unique()  local
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; in tcp_twsk_unique()
128 if (tp->write_seq == 0) in tcp_twsk_unique()
129 tp->write_seq = 1; in tcp_twsk_unique()
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent; in tcp_twsk_unique()
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; in tcp_twsk_unique()
145 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_connect() local
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { in tcp_v4_connect()
196 tp->rx_opt.ts_recent = 0; in tcp_v4_connect()
197 tp->rx_opt.ts_recent_stamp = 0; in tcp_v4_connect()
198 if (likely(!tp->repair)) in tcp_v4_connect()
199 tp->write_seq = 0; in tcp_v4_connect()
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) in tcp_v4_connect()
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; in tcp_v4_connect()
238 if (!tp->write_seq && likely(!tp->repair)) in tcp_v4_connect()
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, in tcp_v4_connect()
244 inet->inet_id = tp->write_seq ^ jiffies; in tcp_v4_connect()
362 struct tcp_sock *tp; in tcp_v4_err() local
408 tp = tcp_sk(sk); in tcp_v4_err()
410 fastopen = tp->fastopen_rsk; in tcp_v4_err()
411 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; in tcp_v4_err()
413 !between(seq, snd_una, tp->snd_nxt)) { in tcp_v4_err()
440 tp->mtu_info = info; in tcp_v4_err()
444 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) in tcp_v4_err()
455 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_v4_err()
463 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : in tcp_v4_err()
875 const struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_lookup() local
881 md5sig = rcu_dereference_check(tp->md5sig_info, in tcp_md5_do_lookup()
916 struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_add() local
927 md5sig = rcu_dereference_protected(tp->md5sig_info, in tcp_md5_do_add()
937 rcu_assign_pointer(tp->md5sig_info, md5sig); in tcp_md5_do_add()
975 struct tcp_sock *tp = tcp_sk(sk); in tcp_clear_md5_list() local
980 md5sig = rcu_dereference_protected(tp->md5sig_info, 1); in tcp_clear_md5_list()
1497 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue() local
1499 if (sysctl_tcp_low_latency || !tp->ucopy.task) in tcp_prequeue()
1503 skb_queue_len(&tp->ucopy.prequeue) == 0) in tcp_prequeue()
1517 __skb_queue_tail(&tp->ucopy.prequeue, skb); in tcp_prequeue()
1518 tp->ucopy.memory += skb->truesize; in tcp_prequeue()
1519 if (tp->ucopy.memory > sk->sk_rcvbuf) { in tcp_prequeue()
1524 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { in tcp_prequeue()
1530 tp->ucopy.memory = 0; in tcp_prequeue()
1531 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { in tcp_prequeue()
1776 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_destroy_sock() local
1786 __skb_queue_purge(&tp->out_of_order_queue); in tcp_v4_destroy_sock()
1790 if (tp->md5sig_info) { in tcp_v4_destroy_sock()
1792 kfree_rcu(tp->md5sig_info, rcu); in tcp_v4_destroy_sock()
1793 tp->md5sig_info = NULL; in tcp_v4_destroy_sock()
1798 __skb_queue_purge(&tp->ucopy.prequeue); in tcp_v4_destroy_sock()
1804 BUG_ON(tp->fastopen_rsk); in tcp_v4_destroy_sock()
1807 tcp_free_fastopen_req(tp); in tcp_v4_destroy_sock()
2191 const struct tcp_sock *tp = tcp_sk(sk); in get_tcp4_sock() local
2223 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); in get_tcp4_sock()
2228 tp->write_seq - tp->snd_una, in get_tcp4_sock()
2240 tp->snd_cwnd, in get_tcp4_sock()
2243 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); in get_tcp4_sock()