Lines Matching refs:sk
75 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
109 struct sock *sk; in sctp_rcv() local
181 sk = rcvr->sk; in sctp_rcv()
187 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { in sctp_rcv()
195 sk = net->sctp.ctl_sock; in sctp_rcv()
196 ep = sctp_sk(sk)->ep; in sctp_rcv()
216 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) in sctp_rcv()
220 if (sk_filter(sk, skb)) in sctp_rcv()
224 chunk = sctp_chunkify(skb, asoc, sk); in sctp_rcv()
245 bh_lock_sock(sk); in sctp_rcv()
247 if (sk != rcvr->sk) { in sctp_rcv()
255 bh_unlock_sock(sk); in sctp_rcv()
256 sk = rcvr->sk; in sctp_rcv()
257 bh_lock_sock(sk); in sctp_rcv()
260 if (sock_owned_by_user(sk)) { in sctp_rcv()
261 if (sctp_add_backlog(sk, skb)) { in sctp_rcv()
262 bh_unlock_sock(sk); in sctp_rcv()
273 bh_unlock_sock(sk); in sctp_rcv()
303 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sctp_backlog_rcv() argument
321 if (unlikely(rcvr->sk != sk)) { in sctp_backlog_rcv()
333 sk = rcvr->sk; in sctp_backlog_rcv()
334 bh_lock_sock(sk); in sctp_backlog_rcv()
336 if (sock_owned_by_user(sk)) { in sctp_backlog_rcv()
337 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) in sctp_backlog_rcv()
344 bh_unlock_sock(sk); in sctp_backlog_rcv()
365 static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) in sctp_add_backlog() argument
371 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); in sctp_add_backlog()
389 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, in sctp_icmp_frag_needed() argument
395 if (sock_owned_by_user(sk)) { in sctp_icmp_frag_needed()
403 sctp_transport_update_pmtu(sk, t, pmtu); in sctp_icmp_frag_needed()
406 sctp_assoc_sync_pmtu(sk, asoc); in sctp_icmp_frag_needed()
418 void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, in sctp_icmp_redirect() argument
427 dst->ops->redirect(dst, sk, skb); in sctp_icmp_redirect()
441 void sctp_icmp_proto_unreachable(struct sock *sk, in sctp_icmp_proto_unreachable() argument
445 if (sock_owned_by_user(sk)) { in sctp_icmp_proto_unreachable()
454 struct net *net = sock_net(sk); in sctp_icmp_proto_unreachable()
478 struct sock *sk = NULL; in sctp_err_lookup() local
503 sk = asoc->base.sk; in sctp_err_lookup()
529 bh_lock_sock(sk); in sctp_err_lookup()
534 if (sock_owned_by_user(sk)) in sctp_err_lookup()
539 return sk; in sctp_err_lookup()
547 void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) in sctp_err_finish() argument
549 bh_unlock_sock(sk); in sctp_err_finish()
574 struct sock *sk; in sctp_v4_err() local
587 sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport); in sctp_v4_err()
591 if (!sk) { in sctp_v4_err()
609 sctp_icmp_frag_needed(sk, asoc, transport, info); in sctp_v4_err()
613 sctp_icmp_proto_unreachable(sk, asoc, in sctp_v4_err()
630 sctp_icmp_redirect(sk, transport, skb); in sctp_v4_err()
636 inet = inet_sk(sk); in sctp_v4_err()
637 if (!sock_owned_by_user(sk) && inet->recverr) { in sctp_v4_err()
638 sk->sk_err = err; in sctp_v4_err()
639 sk->sk_error_report(sk); in sctp_v4_err()
641 sk->sk_err_soft = err; in sctp_v4_err()
645 sctp_err_finish(sk, asoc); in sctp_v4_err()
711 struct net *net = sock_net(ep->base.sk); in __sctp_hash_endpoint()
736 struct net *net = sock_net(ep->base.sk); in __sctp_unhash_endpoint()
788 struct net *net = sock_net(asoc->base.sk); in __sctp_hash_established()
819 struct net *net = sock_net(asoc->base.sk); in __sctp_unhash_established()