Lines Matching refs:sk
39 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
40 void (*send_check)(struct sock *sk, struct sk_buff *skb);
41 int (*rebuild_header)(struct sock *sk);
42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
43 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
44 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
52 int (*setsockopt)(struct sock *sk, int level, int optname,
54 int (*getsockopt)(struct sock *sk, int level, int optname,
57 int (*compat_setsockopt)(struct sock *sk,
60 int (*compat_getsockopt)(struct sock *sk,
64 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
65 int (*bind_conflict)(const struct sock *sk,
67 void (*mtu_reduced)(struct sock *sk);
102 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
147 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() argument
149 return (struct inet_connection_sock *)sk; in inet_csk()
152 static inline void *inet_csk_ca(const struct sock *sk) in inet_csk_ca() argument
154 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
157 struct sock *inet_csk_clone_lock(const struct sock *sk,
168 void inet_csk_init_xmit_timers(struct sock *sk,
172 void inet_csk_clear_xmit_timers(struct sock *sk);
174 static inline void inet_csk_schedule_ack(struct sock *sk) in inet_csk_schedule_ack() argument
176 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack()
179 static inline int inet_csk_ack_scheduled(const struct sock *sk) in inet_csk_ack_scheduled() argument
181 return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; in inet_csk_ack_scheduled()
184 static inline void inet_csk_delack_init(struct sock *sk) in inet_csk_delack_init() argument
186 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init()
189 void inet_csk_delete_keepalive_timer(struct sock *sk);
190 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
196 static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) in inet_csk_clear_xmit_timer() argument
198 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer()
203 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timer()
208 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timer()
221 static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, in inet_csk_reset_xmit_timer() argument
225 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer()
230 sk, what, when, current_text_addr()); in inet_csk_reset_xmit_timer()
239 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer()
243 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in inet_csk_reset_xmit_timer()
261 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
263 int inet_csk_bind_conflict(const struct sock *sk,
265 int inet_csk_get_port(struct sock *sk, unsigned short snum);
267 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
269 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
273 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
276 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
278 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
282 static inline void inet_csk_reqsk_queue_added(struct sock *sk) in inet_csk_reqsk_queue_added() argument
284 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()
287 static inline int inet_csk_reqsk_queue_len(const struct sock *sk) in inet_csk_reqsk_queue_len() argument
289 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()
292 static inline int inet_csk_reqsk_queue_young(const struct sock *sk) in inet_csk_reqsk_queue_young() argument
294 return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_young()
297 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) in inet_csk_reqsk_queue_is_full() argument
299 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; in inet_csk_reqsk_queue_is_full()
302 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
303 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
305 void inet_csk_destroy_sock(struct sock *sk);
306 void inet_csk_prepare_forced_close(struct sock *sk);
311 static inline unsigned int inet_csk_listen_poll(const struct sock *sk) in inet_csk_listen_poll() argument
313 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()
317 int inet_csk_listen_start(struct sock *sk, int backlog);
318 void inet_csk_listen_stop(struct sock *sk);
320 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
322 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
324 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
327 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);