Lines Matching refs:sk
25 static inline void dccp_event_ack_sent(struct sock *sk) in dccp_event_ack_sent() argument
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); in dccp_event_ack_sent()
31 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) in dccp_skb_entail() argument
33 skb_set_owner_w(skb, sk); in dccp_skb_entail()
34 WARN_ON(sk->sk_send_head); in dccp_skb_entail()
35 sk->sk_send_head = skb; in dccp_skb_entail()
36 return skb_clone(sk->sk_send_head, gfp_any()); in dccp_skb_entail()
45 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) in dccp_transmit_skb() argument
48 struct inet_sock *inet = inet_sk(sk); in dccp_transmit_skb()
49 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb()
50 struct dccp_sock *dp = dccp_sk(sk); in dccp_transmit_skb()
91 WARN_ON(skb->sk); in dccp_transmit_skb()
92 skb_set_owner_w(skb, sk); in dccp_transmit_skb()
96 if (dccp_insert_options(sk, skb)) { in dccp_transmit_skb()
113 dccp_update_gss(sk, dcb->dccpd_seq); in dccp_transmit_skb()
134 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb()
137 dccp_event_ack_sent(sk); in dccp_transmit_skb()
141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in dccp_transmit_skb()
162 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) in dccp_sync_mss() argument
164 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss()
165 struct dccp_sock *dp = dccp_sk(sk); in dccp_sync_mss()
198 void dccp_write_space(struct sock *sk) in dccp_write_space() argument
203 wq = rcu_dereference(sk->sk_wq); in dccp_write_space()
207 if (sock_writeable(sk)) in dccp_write_space()
208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in dccp_write_space()
220 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) in dccp_wait_for_ccid() argument
225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in dccp_wait_for_ccid()
226 sk->sk_write_pending++; in dccp_wait_for_ccid()
227 release_sock(sk); in dccp_wait_for_ccid()
231 lock_sock(sk); in dccp_wait_for_ccid()
232 sk->sk_write_pending--; in dccp_wait_for_ccid()
233 finish_wait(sk_sleep(sk), &wait); in dccp_wait_for_ccid()
235 if (signal_pending(current) || sk->sk_err) in dccp_wait_for_ccid()
244 static void dccp_xmit_packet(struct sock *sk) in dccp_xmit_packet() argument
247 struct dccp_sock *dp = dccp_sk(sk); in dccp_xmit_packet()
248 struct sk_buff *skb = dccp_qpolicy_pop(sk); in dccp_xmit_packet()
254 if (sk->sk_state == DCCP_PARTOPEN) { in dccp_xmit_packet()
265 dccp_send_ack(sk); in dccp_xmit_packet()
269 inet_csk_schedule_ack(sk); in dccp_xmit_packet()
270 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in dccp_xmit_packet()
271 inet_csk(sk)->icsk_rto, in dccp_xmit_packet()
274 } else if (dccp_ack_pending(sk)) { in dccp_xmit_packet()
280 err = dccp_transmit_skb(sk, skb); in dccp_xmit_packet()
288 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); in dccp_xmit_packet()
297 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); in dccp_xmit_packet()
307 void dccp_flush_write_queue(struct sock *sk, long *time_budget) in dccp_flush_write_queue() argument
309 struct dccp_sock *dp = dccp_sk(sk); in dccp_flush_write_queue()
313 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { in dccp_flush_write_queue()
314 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); in dccp_flush_write_queue()
329 rc = dccp_wait_for_ccid(sk, delay); in dccp_flush_write_queue()
336 dccp_xmit_packet(sk); in dccp_flush_write_queue()
339 skb_dequeue(&sk->sk_write_queue); in dccp_flush_write_queue()
346 void dccp_write_xmit(struct sock *sk) in dccp_write_xmit() argument
348 struct dccp_sock *dp = dccp_sk(sk); in dccp_write_xmit()
351 while ((skb = dccp_qpolicy_top(sk))) { in dccp_write_xmit()
352 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); in dccp_write_xmit()
358 sk_reset_timer(sk, &dp->dccps_xmit_timer, in dccp_write_xmit()
362 dccp_xmit_packet(sk); in dccp_write_xmit()
365 dccp_qpolicy_drop(sk, skb); in dccp_write_xmit()
380 int dccp_retransmit_skb(struct sock *sk) in dccp_retransmit_skb() argument
382 WARN_ON(sk->sk_send_head == NULL); in dccp_retransmit_skb()
384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb()
388 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb()
390 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); in dccp_retransmit_skb()
393 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, in dccp_make_response() argument
407 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1, in dccp_make_response()
456 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) in dccp_ctl_make_reset() argument
466 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); in dccp_ctl_make_reset()
470 skb_reserve(skb, sk->sk_prot->max_header); in dccp_ctl_make_reset()
508 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) in dccp_send_reset() argument
515 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset()
520 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); in dccp_send_reset()
525 skb_reserve(skb, sk->sk_prot->max_header); in dccp_send_reset()
529 return dccp_transmit_skb(sk, skb); in dccp_send_reset()
535 int dccp_connect(struct sock *sk) in dccp_connect() argument
538 struct dccp_sock *dp = dccp_sk(sk); in dccp_connect()
539 struct dst_entry *dst = __sk_dst_get(sk); in dccp_connect()
540 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect()
542 sk->sk_err = 0; in dccp_connect()
543 sock_reset_flag(sk, SOCK_DONE); in dccp_connect()
545 dccp_sync_mss(sk, dst_mtu(dst)); in dccp_connect()
548 if (dccp_feat_finalise_settings(dccp_sk(sk))) in dccp_connect()
554 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); in dccp_connect()
559 skb_reserve(skb, sk->sk_prot->max_header); in dccp_connect()
563 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb)); in dccp_connect()
568 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in dccp_connect()
575 void dccp_send_ack(struct sock *sk) in dccp_send_ack() argument
578 if (sk->sk_state != DCCP_CLOSED) { in dccp_send_ack()
579 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, in dccp_send_ack()
583 inet_csk_schedule_ack(sk); in dccp_send_ack()
584 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack()
585 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in dccp_send_ack()
592 skb_reserve(skb, sk->sk_prot->max_header); in dccp_send_ack()
594 dccp_transmit_skb(sk, skb); in dccp_send_ack()
602 void dccp_send_delayed_ack(struct sock *sk)
604 struct inet_connection_sock *icsk = inet_csk(sk);
620 dccp_send_ack(sk);
629 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
633 void dccp_send_sync(struct sock *sk, const u64 ackno, in dccp_send_sync() argument
641 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); in dccp_send_sync()
650 skb_reserve(skb, sk->sk_prot->max_header); in dccp_send_sync()
658 dccp_sk(sk)->dccps_sync_scheduled = 0; in dccp_send_sync()
660 dccp_transmit_skb(sk, skb); in dccp_send_sync()
670 void dccp_send_close(struct sock *sk, const int active) in dccp_send_close() argument
672 struct dccp_sock *dp = dccp_sk(sk); in dccp_send_close()
676 skb = alloc_skb(sk->sk_prot->max_header, prio); in dccp_send_close()
681 skb_reserve(skb, sk->sk_prot->max_header); in dccp_send_close()
688 skb = dccp_skb_entail(sk, skb); in dccp_send_close()
699 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in dccp_send_close()
702 dccp_transmit_skb(sk, skb); in dccp_send_close()