Lines Matching refs:sock
53 void tcp_time_wait(struct sock *sk, int state, int timeo);
293 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure()
317 static inline bool tcp_out_of_memory(struct sock *sk) in tcp_out_of_memory()
325 void sk_forced_mem_schedule(struct sock *sk, int size);
327 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) in tcp_too_many_orphans()
340 bool tcp_check_oom(struct sock *sk, int shift);
355 void tcp_shutdown(struct sock *sk, int how);
361 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
362 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
364 void tcp_release_cb(struct sock *sk);
366 void tcp_write_timer_handler(struct sock *sk);
367 void tcp_delack_timer_handler(struct sock *sk);
368 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
369 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
370 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
372 void tcp_rcv_space_adjust(struct sock *sk);
373 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
374 void tcp_twsk_destructor(struct sock *sk);
379 static inline void tcp_dec_quickack_mode(struct sock *sk, in tcp_dec_quickack_mode()
410 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
412 int tcp_child_process(struct sock *parent, struct sock *child,
414 void tcp_enter_loss(struct sock *sk);
416 void tcp_update_metrics(struct sock *sk);
417 void tcp_init_metrics(struct sock *sk);
421 bool tcp_remember_stamp(struct sock *sk);
423 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
425 void tcp_close(struct sock *sk, long timeout);
426 void tcp_init_sock(struct sock *sk);
427 unsigned int tcp_poll(struct file *file, struct socket *sock,
429 int tcp_getsockopt(struct sock *sk, int level, int optname,
431 int tcp_setsockopt(struct sock *sk, int level, int optname,
433 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
435 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
437 void tcp_set_keepalive(struct sock *sk, int val);
439 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
450 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
451 void tcp_v4_mtu_reduced(struct sock *sk);
452 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
453 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
454 struct sock *tcp_create_openreq_child(const struct sock *sk,
457 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
458 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
463 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
464 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
465 int tcp_connect(struct sock *sk);
466 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
470 int tcp_disconnect(struct sock *sk, int flags);
472 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
473 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
474 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
477 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
482 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
500 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow()
510 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow()
536 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
544 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
546 bool tcp_may_send_now(struct sock *sk);
547 int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
548 int tcp_retransmit_skb(struct sock *, struct sk_buff *);
549 void tcp_retransmit_timer(struct sock *sk);
550 void tcp_xmit_retransmit_queue(struct sock *);
551 void tcp_simple_retransmit(struct sock *);
552 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
553 int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
555 void tcp_send_probe0(struct sock *);
556 void tcp_send_partial(struct sock *);
557 int tcp_write_wakeup(struct sock *, int mib);
558 void tcp_send_fin(struct sock *sk);
559 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
560 int tcp_send_synack(struct sock *);
561 void tcp_push_one(struct sock *, unsigned int mss_now);
562 void tcp_send_ack(struct sock *sk);
563 void tcp_send_delayed_ack(struct sock *sk);
564 void tcp_send_loss_probe(struct sock *sk);
565 bool tcp_schedule_loss_probe(struct sock *sk);
568 void tcp_resume_early_retransmit(struct sock *sk);
569 void tcp_rearm_rto(struct sock *sk);
570 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
571 void tcp_reset(struct sock *sk);
575 void tcp_init_xmit_timers(struct sock *);
576 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers()
581 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
582 unsigned int tcp_current_mss(struct sock *sk);
608 void tcp_get_info(struct sock *, struct tcp_info *);
613 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
616 void tcp_initialize_rcv_mss(struct sock *sk);
618 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
619 int tcp_mss_to_mtu(struct sock *sk, int mss);
620 void tcp_mtup_init(struct sock *sk);
621 void tcp_init_buffer_space(struct sock *sk);
623 static inline void tcp_bound_rto(const struct sock *sk) in tcp_bound_rto()
646 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check()
658 static inline u32 tcp_rto_min(struct sock *sk) in tcp_rto_min()
668 static inline u32 tcp_rto_min_us(struct sock *sk) in tcp_rto_min_us()
701 u32 __tcp_select_window(struct sock *sk);
703 void tcp_send_window_probe(struct sock *sk);
855 void (*init)(struct sock *sk);
857 void (*release)(struct sock *sk);
860 u32 (*ssthresh)(struct sock *sk);
862 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
864 void (*set_state)(struct sock *sk, u8 new_state);
866 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
868 void (*in_ack_event)(struct sock *sk, u32 flags);
870 u32 (*undo_cwnd)(struct sock *sk);
872 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
874 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
884 void tcp_assign_congestion_control(struct sock *sk);
885 void tcp_init_congestion_control(struct sock *sk);
886 void tcp_cleanup_congestion_control(struct sock *sk);
892 int tcp_set_congestion_control(struct sock *sk, const char *name);
896 u32 tcp_reno_ssthresh(struct sock *sk);
897 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
911 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn()
918 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) in tcp_set_ca_state()
927 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event()
1014 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction()
1024 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh()
1039 void tcp_enter_cwr(struct sock *sk);
1080 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited()
1097 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base()
1103 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when()
1111 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer()
1158 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1169 void tcp_set_state(struct sock *sk, int state);
1171 void tcp_done(struct sock *sk);
1180 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1182 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check()
1207 static inline int tcp_space(const struct sock *sk) in tcp_space()
1213 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space()
1219 const struct sock *sk_listener,
1222 void tcp_enter_memory_pressure(struct sock *sk);
1247 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time()
1379 const struct sock *sk, const struct sk_buff *skb);
1380 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1382 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1384 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1385 const struct sock *addr_sk);
1388 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1393 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, in tcp_md5_do_lookup()
1417 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1420 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1434 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1449 static inline void tcp_write_queue_purge(struct sock *sk) in tcp_write_queue_purge()
1459 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) in tcp_write_queue_head()
1464 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail()
1469 static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, in tcp_write_queue_next()
1475 static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, in tcp_write_queue_prev()
1490 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head()
1495 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last()
1501 static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) in tcp_advance_send_head()
1509 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) in tcp_check_send_head()
1515 static inline void tcp_init_send_head(struct sock *sk) in tcp_init_send_head()
1520 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in __tcp_add_write_queue_tail()
1525 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail()
1538 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) in __tcp_add_write_queue_head()
1546 struct sock *sk) in tcp_insert_write_queue_after()
1554 struct sock *sk) in tcp_insert_write_queue_before()
1562 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue()
1567 static inline bool tcp_write_queue_empty(struct sock *sk) in tcp_write_queue_empty()
1572 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames()
1596 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack()
1602 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack()
1607 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset()
1613 static inline void tcp_highest_sack_combine(struct sock *sk, in tcp_highest_sack_combine()
1648 struct sock *syn_wait_sk;
1659 void tcp_v4_destroy_sock(struct sock *sk);
1673 static inline bool tcp_stream_memory_free(const struct sock *sk) in tcp_stream_memory_free()
1686 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1689 struct sock *sk, struct sk_buff *skb);
1694 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
1695 const struct sock *addr_sk);
1698 const struct sock *sk,
1700 int (*md5_parse)(struct sock *sk,
1709 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1710 const struct sock *addr_sk);
1713 const struct sock *sk,
1717 const struct sock *sk_listener,
1723 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1727 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1735 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence()
1744 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence()
1764 extern int tcp_rack_mark_lost(struct sock *sk);