Lines Matching refs:sk
138 struct sock *sk, in udp_lib_lport_inuse() argument
145 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse()
149 sk2 != sk && in udp_lib_lport_inuse()
151 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse()
152 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse()
153 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse()
154 (!sk2->sk_reuseport || !sk->sk_reuseport || in udp_lib_lport_inuse()
156 saddr_comp(sk, sk2)) { in udp_lib_lport_inuse()
171 struct sock *sk, in udp_lib_lport_inuse2() argument
177 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse2()
183 sk2 != sk && in udp_lib_lport_inuse2()
185 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse2()
186 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse2()
187 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse2()
188 (!sk2->sk_reuseport || !sk->sk_reuseport || in udp_lib_lport_inuse2()
190 saddr_comp(sk, sk2)) { in udp_lib_lport_inuse2()
208 int udp_lib_get_port(struct sock *sk, unsigned short snum, in udp_lib_get_port() argument
214 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_get_port()
216 struct net *net = sock_net(sk); in udp_lib_get_port()
238 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, in udp_lib_get_port()
262 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; in udp_lib_get_port()
272 sk, saddr_comp); in udp_lib_get_port()
276 sk, saddr_comp); in udp_lib_get_port()
284 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, in udp_lib_get_port()
289 inet_sk(sk)->inet_num = snum; in udp_lib_get_port()
290 udp_sk(sk)->udp_port_hash = snum; in udp_lib_get_port()
291 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
292 if (sk_unhashed(sk)) { in udp_lib_get_port()
293 sk_nulls_add_node_rcu(sk, &hslot->head); in udp_lib_get_port()
295 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in udp_lib_get_port()
297 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_get_port()
299 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
327 int udp_v4_get_port(struct sock *sk, unsigned short snum) in udp_v4_get_port() argument
330 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); in udp_v4_get_port()
332 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); in udp_v4_get_port()
335 udp_sk(sk)->udp_portaddr_hash = hash2_partial; in udp_v4_get_port()
336 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); in udp_v4_get_port()
339 static inline int compute_score(struct sock *sk, struct net *net, in compute_score() argument
346 if (!net_eq(sock_net(sk), net) || in compute_score()
347 udp_sk(sk)->udp_port_hash != hnum || in compute_score()
348 ipv6_only_sock(sk)) in compute_score()
351 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score()
352 inet = inet_sk(sk); in compute_score()
372 if (sk->sk_bound_dev_if) { in compute_score()
373 if (sk->sk_bound_dev_if != dif) in compute_score()
377 if (sk->sk_incoming_cpu == raw_smp_processor_id()) in compute_score()
385 static inline int compute_score2(struct sock *sk, struct net *net, in compute_score2() argument
392 if (!net_eq(sock_net(sk), net) || in compute_score2()
393 ipv6_only_sock(sk)) in compute_score2()
396 inet = inet_sk(sk); in compute_score2()
402 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score2()
416 if (sk->sk_bound_dev_if) { in compute_score2()
417 if (sk->sk_bound_dev_if != dif) in compute_score2()
422 if (sk->sk_incoming_cpu == raw_smp_processor_id()) in compute_score2()
446 struct sock *sk, *result; in udp4_lib_lookup2() local
454 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { in udp4_lib_lookup2()
455 score = compute_score2(sk, net, saddr, sport, in udp4_lib_lookup2()
458 result = sk; in udp4_lib_lookup2()
460 reuseport = sk->sk_reuseport; in udp4_lib_lookup2()
469 result = sk; in udp4_lib_lookup2()
499 struct sock *sk, *result; in __udp4_lib_lookup() local
535 sk_nulls_for_each_rcu(sk, node, &hslot->head) { in __udp4_lib_lookup()
536 score = compute_score(sk, net, saddr, hnum, sport, in __udp4_lib_lookup()
539 result = sk; in __udp4_lib_lookup()
541 reuseport = sk->sk_reuseport; in __udp4_lib_lookup()
550 result = sk; in __udp4_lib_lookup()
594 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, in __udp_is_mcast_sock() argument
599 struct inet_sock *inet = inet_sk(sk); in __udp_is_mcast_sock()
601 if (!net_eq(sock_net(sk), net) || in __udp_is_mcast_sock()
602 udp_sk(sk)->udp_port_hash != hnum || in __udp_is_mcast_sock()
606 ipv6_only_sock(sk) || in __udp_is_mcast_sock()
607 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) in __udp_is_mcast_sock()
609 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) in __udp_is_mcast_sock()
632 struct sock *sk; in __udp4_lib_err() local
637 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, in __udp4_lib_err()
639 if (!sk) { in __udp4_lib_err()
646 inet = inet_sk(sk); in __udp4_lib_err()
661 ipv4_sk_update_pmtu(skb, sk, info); in __udp4_lib_err()
676 ipv4_sk_redirect(skb, sk); in __udp4_lib_err()
685 if (!harderr || sk->sk_state != TCP_ESTABLISHED) in __udp4_lib_err()
688 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); in __udp4_lib_err()
690 sk->sk_err = err; in __udp4_lib_err()
691 sk->sk_error_report(sk); in __udp4_lib_err()
693 sock_put(sk); in __udp4_lib_err()
704 void udp_flush_pending_frames(struct sock *sk) in udp_flush_pending_frames() argument
706 struct udp_sock *up = udp_sk(sk); in udp_flush_pending_frames()
711 ip_flush_pending_frames(sk); in udp_flush_pending_frames()
801 struct sock *sk = skb->sk; in udp_send_skb() local
802 struct inet_sock *inet = inet_sk(sk); in udp_send_skb()
805 int is_udplite = IS_UDPLITE(sk); in udp_send_skb()
822 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ in udp_send_skb()
837 sk->sk_protocol, csum); in udp_send_skb()
842 err = ip_send_skb(sock_net(sk), skb); in udp_send_skb()
845 UDP_INC_STATS_USER(sock_net(sk), in udp_send_skb()
850 UDP_INC_STATS_USER(sock_net(sk), in udp_send_skb()
858 int udp_push_pending_frames(struct sock *sk) in udp_push_pending_frames() argument
860 struct udp_sock *up = udp_sk(sk); in udp_push_pending_frames()
861 struct inet_sock *inet = inet_sk(sk); in udp_push_pending_frames()
866 skb = ip_finish_skb(sk, fl4); in udp_push_pending_frames()
879 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) in udp_sendmsg() argument
881 struct inet_sock *inet = inet_sk(sk); in udp_sendmsg()
882 struct udp_sock *up = udp_sk(sk); in udp_sendmsg()
893 int err, is_udplite = IS_UDPLITE(sk); in udp_sendmsg()
922 lock_sock(sk); in udp_sendmsg()
925 release_sock(sk); in udp_sendmsg()
930 release_sock(sk); in udp_sendmsg()
951 if (sk->sk_state != TCP_ESTABLISHED) in udp_sendmsg()
962 ipc.oif = sk->sk_bound_dev_if; in udp_sendmsg()
964 sock_tx_timestamp(sk, &ipc.tx_flags); in udp_sendmsg()
967 err = ip_cmsg_send(sock_net(sk), msg, &ipc, in udp_sendmsg()
968 sk->sk_family == AF_INET6); in udp_sendmsg()
1000 if (sock_flag(sk, SOCK_LOCALROUTE) || in udp_sendmsg()
1017 rt = (struct rtable *)sk_dst_check(sk, 0); in udp_sendmsg()
1020 struct net *net = sock_net(sk); in udp_sendmsg()
1021 __u8 flow_flags = inet_sk_flowi_flags(sk); in udp_sendmsg()
1025 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, in udp_sendmsg()
1026 RT_SCOPE_UNIVERSE, sk->sk_protocol, in udp_sendmsg()
1036 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); in udp_sendmsg()
1037 rt = ip_route_output_flow(net, fl4, sk); in udp_sendmsg()
1048 !sock_flag(sk, SOCK_BROADCAST)) in udp_sendmsg()
1051 sk_dst_set(sk, dst_clone(&rt->dst)); in udp_sendmsg()
1064 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1073 lock_sock(sk); in udp_sendmsg()
1077 release_sock(sk); in udp_sendmsg()
1095 err = ip_append_data(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1099 udp_flush_pending_frames(sk); in udp_sendmsg()
1101 err = udp_push_pending_frames(sk); in udp_sendmsg()
1102 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) in udp_sendmsg()
1104 release_sock(sk); in udp_sendmsg()
1119 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in udp_sendmsg()
1120 UDP_INC_STATS_USER(sock_net(sk), in udp_sendmsg()
1134 int udp_sendpage(struct sock *sk, struct page *page, int offset, in udp_sendpage() argument
1137 struct inet_sock *inet = inet_sk(sk); in udp_sendpage()
1138 struct udp_sock *up = udp_sk(sk); in udp_sendpage()
1151 ret = udp_sendmsg(sk, &msg, 0); in udp_sendpage()
1156 lock_sock(sk); in udp_sendpage()
1159 release_sock(sk); in udp_sendpage()
1165 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, in udp_sendpage()
1168 release_sock(sk); in udp_sendpage()
1169 return sock_no_sendpage(sk->sk_socket, page, offset, in udp_sendpage()
1173 udp_flush_pending_frames(sk); in udp_sendpage()
1179 ret = udp_push_pending_frames(sk); in udp_sendpage()
1183 release_sock(sk); in udp_sendpage()
1194 static unsigned int first_packet_length(struct sock *sk) in first_packet_length() argument
1196 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; in first_packet_length()
1205 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, in first_packet_length()
1206 IS_UDPLITE(sk)); in first_packet_length()
1207 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, in first_packet_length()
1208 IS_UDPLITE(sk)); in first_packet_length()
1209 atomic_inc(&sk->sk_drops); in first_packet_length()
1217 bool slow = lock_sock_fast(sk); in first_packet_length()
1220 sk_mem_reclaim_partial(sk); in first_packet_length()
1221 unlock_sock_fast(sk, slow); in first_packet_length()
1230 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) in udp_ioctl() argument
1235 int amount = sk_wmem_alloc_get(sk); in udp_ioctl()
1242 unsigned int amount = first_packet_length(sk); in udp_ioctl()
1268 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, in udp_recvmsg() argument
1271 struct inet_sock *inet = inet_sk(sk); in udp_recvmsg()
1277 int is_udplite = IS_UDPLITE(sk); in udp_recvmsg()
1281 return ip_recv_error(sk, msg, len, addr_len); in udp_recvmsg()
1284 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), in udp_recvmsg()
1321 atomic_inc(&sk->sk_drops); in udp_recvmsg()
1322 UDP_INC_STATS_USER(sock_net(sk), in udp_recvmsg()
1329 UDP_INC_STATS_USER(sock_net(sk), in udp_recvmsg()
1332 sock_recv_ts_and_drops(msg, sk, skb); in udp_recvmsg()
1350 skb_free_datagram_locked(sk, skb); in udp_recvmsg()
1355 slow = lock_sock_fast(sk); in udp_recvmsg()
1356 if (!skb_kill_datagram(sk, skb, flags)) { in udp_recvmsg()
1357 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_recvmsg()
1358 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_recvmsg()
1360 unlock_sock_fast(sk, slow); in udp_recvmsg()
1368 int udp_disconnect(struct sock *sk, int flags) in udp_disconnect() argument
1370 struct inet_sock *inet = inet_sk(sk); in udp_disconnect()
1375 sk->sk_state = TCP_CLOSE; in udp_disconnect()
1378 sock_rps_reset_rxhash(sk); in udp_disconnect()
1379 sk->sk_bound_dev_if = 0; in udp_disconnect()
1380 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in udp_disconnect()
1381 inet_reset_saddr(sk); in udp_disconnect()
1383 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { in udp_disconnect()
1384 sk->sk_prot->unhash(sk); in udp_disconnect()
1387 sk_dst_reset(sk); in udp_disconnect()
1392 void udp_lib_unhash(struct sock *sk) in udp_lib_unhash() argument
1394 if (sk_hashed(sk)) { in udp_lib_unhash()
1395 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_unhash()
1398 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_unhash()
1399 udp_sk(sk)->udp_port_hash); in udp_lib_unhash()
1400 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_unhash()
1403 if (sk_nulls_del_node_init_rcu(sk)) { in udp_lib_unhash()
1405 inet_sk(sk)->inet_num = 0; in udp_lib_unhash()
1406 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in udp_lib_unhash()
1409 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_unhash()
1421 void udp_lib_rehash(struct sock *sk, u16 newhash) in udp_lib_rehash() argument
1423 if (sk_hashed(sk)) { in udp_lib_rehash()
1424 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_rehash()
1427 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_rehash()
1429 udp_sk(sk)->udp_portaddr_hash = newhash; in udp_lib_rehash()
1431 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_rehash()
1432 udp_sk(sk)->udp_port_hash); in udp_lib_rehash()
1437 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_rehash()
1442 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_rehash()
1453 static void udp_v4_rehash(struct sock *sk) in udp_v4_rehash() argument
1455 u16 new_hash = udp4_portaddr_hash(sock_net(sk), in udp_v4_rehash()
1456 inet_sk(sk)->inet_rcv_saddr, in udp_v4_rehash()
1457 inet_sk(sk)->inet_num); in udp_v4_rehash()
1458 udp_lib_rehash(sk, new_hash); in udp_v4_rehash()
1461 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __udp_queue_rcv_skb() argument
1465 if (inet_sk(sk)->inet_daddr) { in __udp_queue_rcv_skb()
1466 sock_rps_save_rxhash(sk, skb); in __udp_queue_rcv_skb()
1467 sk_mark_napi_id(sk, skb); in __udp_queue_rcv_skb()
1468 sk_incoming_cpu_update(sk); in __udp_queue_rcv_skb()
1471 rc = sock_queue_rcv_skb(sk, skb); in __udp_queue_rcv_skb()
1473 int is_udplite = IS_UDPLITE(sk); in __udp_queue_rcv_skb()
1477 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in __udp_queue_rcv_skb()
1479 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in __udp_queue_rcv_skb()
1481 trace_udp_fail_queue_rcv_skb(rc, sk); in __udp_queue_rcv_skb()
1505 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in udp_queue_rcv_skb() argument
1507 struct udp_sock *up = udp_sk(sk); in udp_queue_rcv_skb()
1509 int is_udplite = IS_UDPLITE(sk); in udp_queue_rcv_skb()
1514 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in udp_queue_rcv_skb()
1519 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); in udp_queue_rcv_skb()
1541 ret = encap_rcv(sk, skb); in udp_queue_rcv_skb()
1543 UDP_INC_STATS_BH(sock_net(sk), in udp_queue_rcv_skb()
1587 if (rcu_access_pointer(sk->sk_filter) && in udp_queue_rcv_skb()
1591 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in udp_queue_rcv_skb()
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in udp_queue_rcv_skb()
1599 ipv4_pktinfo_prepare(sk, skb); in udp_queue_rcv_skb()
1600 bh_lock_sock(sk); in udp_queue_rcv_skb()
1601 if (!sock_owned_by_user(sk)) in udp_queue_rcv_skb()
1602 rc = __udp_queue_rcv_skb(sk, skb); in udp_queue_rcv_skb()
1603 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { in udp_queue_rcv_skb()
1604 bh_unlock_sock(sk); in udp_queue_rcv_skb()
1607 bh_unlock_sock(sk); in udp_queue_rcv_skb()
1612 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_queue_rcv_skb()
1614 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_queue_rcv_skb()
1615 atomic_inc(&sk->sk_drops); in udp_queue_rcv_skb()
1625 struct sock *sk; in flush_stack() local
1628 sk = stack[i]; in flush_stack()
1633 atomic_inc(&sk->sk_drops); in flush_stack()
1634 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in flush_stack()
1635 IS_UDPLITE(sk)); in flush_stack()
1636 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, in flush_stack()
1637 IS_UDPLITE(sk)); in flush_stack()
1640 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) in flush_stack()
1643 sock_put(sk); in flush_stack()
1652 static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) in udp_sk_rx_dst_set() argument
1657 old = xchg(&sk->sk_rx_dst, dst); in udp_sk_rx_dst_set()
1672 struct sock *sk, *stack[256 / sizeof(struct sock *)]; in __udp4_lib_mcast_deliver() local
1677 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); in __udp4_lib_mcast_deliver()
1687 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); in __udp4_lib_mcast_deliver()
1691 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { in __udp4_lib_mcast_deliver()
1692 if (__udp_is_mcast_sock(net, sk, in __udp4_lib_mcast_deliver()
1701 stack[count++] = sk; in __udp4_lib_mcast_deliver()
1702 sock_hold(sk); in __udp4_lib_mcast_deliver()
1758 struct sock *sk; in __udp4_lib_rcv() local
1789 sk = skb_steal_sock(skb); in __udp4_lib_rcv()
1790 if (sk) { in __udp4_lib_rcv()
1794 if (unlikely(sk->sk_rx_dst != dst)) in __udp4_lib_rcv()
1795 udp_sk_rx_dst_set(sk, dst); in __udp4_lib_rcv()
1797 ret = udp_queue_rcv_skb(sk, skb); in __udp4_lib_rcv()
1798 sock_put(sk); in __udp4_lib_rcv()
1811 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); in __udp4_lib_rcv()
1812 if (sk) { in __udp4_lib_rcv()
1815 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) in __udp4_lib_rcv()
1819 ret = udp_queue_rcv_skb(sk, skb); in __udp4_lib_rcv()
1820 sock_put(sk); in __udp4_lib_rcv()
1880 struct sock *sk, *result; in __udp4_lib_mcast_demux_lookup() local
1894 sk_nulls_for_each_rcu(sk, node, &hslot->head) { in __udp4_lib_mcast_demux_lookup()
1895 if (__udp_is_mcast_sock(net, sk, in __udp4_lib_mcast_demux_lookup()
1899 result = sk; in __udp4_lib_mcast_demux_lookup()
1936 struct sock *sk, *result; in __udp4_lib_demux_lookup() local
1947 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { in __udp4_lib_demux_lookup()
1948 if (INET_MATCH(sk, net, acookie, in __udp4_lib_demux_lookup()
1950 result = sk; in __udp4_lib_demux_lookup()
1958 else if (unlikely(!INET_MATCH(sk, net, acookie, in __udp4_lib_demux_lookup()
1974 struct sock *sk; in udp_v4_early_demux() local
2001 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2004 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
2010 if (!sk) in udp_v4_early_demux()
2013 skb->sk = sk; in udp_v4_early_demux()
2015 dst = READ_ONCE(sk->sk_rx_dst); in udp_v4_early_demux()
2035 void udp_destroy_sock(struct sock *sk) in udp_destroy_sock() argument
2037 struct udp_sock *up = udp_sk(sk); in udp_destroy_sock()
2038 bool slow = lock_sock_fast(sk); in udp_destroy_sock()
2039 udp_flush_pending_frames(sk); in udp_destroy_sock()
2040 unlock_sock_fast(sk, slow); in udp_destroy_sock()
2042 void (*encap_destroy)(struct sock *sk); in udp_destroy_sock()
2045 encap_destroy(sk); in udp_destroy_sock()
2052 int udp_lib_setsockopt(struct sock *sk, int level, int optname, in udp_lib_setsockopt() argument
2056 struct udp_sock *up = udp_sk(sk); in udp_lib_setsockopt()
2059 int is_udplite = IS_UDPLITE(sk); in udp_lib_setsockopt()
2075 lock_sock(sk); in udp_lib_setsockopt()
2076 push_pending_frames(sk); in udp_lib_setsockopt()
2077 release_sock(sk); in udp_lib_setsockopt()
2145 int udp_setsockopt(struct sock *sk, int level, int optname, in udp_setsockopt() argument
2149 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in udp_setsockopt()
2151 return ip_setsockopt(sk, level, optname, optval, optlen); in udp_setsockopt()
2155 int compat_udp_setsockopt(struct sock *sk, int level, int optname, in compat_udp_setsockopt() argument
2159 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in compat_udp_setsockopt()
2161 return compat_ip_setsockopt(sk, level, optname, optval, optlen); in compat_udp_setsockopt()
2165 int udp_lib_getsockopt(struct sock *sk, int level, int optname, in udp_lib_getsockopt() argument
2168 struct udp_sock *up = udp_sk(sk); in udp_lib_getsockopt()
2218 int udp_getsockopt(struct sock *sk, int level, int optname, in udp_getsockopt() argument
2222 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2223 return ip_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2227 int compat_udp_getsockopt(struct sock *sk, int level, int optname, in compat_udp_getsockopt() argument
2231 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2232 return compat_ip_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2251 struct sock *sk = sock->sk; in udp_poll() local
2253 sock_rps_record_flow(sk); in udp_poll()
2257 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) in udp_poll()
2304 struct sock *sk; in udp_get_first() local
2317 sk_nulls_for_each(sk, node, &hslot->head) { in udp_get_first()
2318 if (!net_eq(sock_net(sk), net)) in udp_get_first()
2320 if (sk->sk_family == state->family) in udp_get_first()
2325 sk = NULL; in udp_get_first()
2327 return sk; in udp_get_first()
2330 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) in udp_get_next() argument
2336 sk = sk_nulls_next(sk); in udp_get_next()
2337 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); in udp_get_next()
2339 if (!sk) { in udp_get_next()
2344 return sk; in udp_get_next()
2349 struct sock *sk = udp_get_first(seq, 0); in udp_get_idx() local
2351 if (sk) in udp_get_idx()
2352 while (pos && (sk = udp_get_next(seq, sk)) != NULL) in udp_get_idx()
2354 return pos ? NULL : sk; in udp_get_idx()
2367 struct sock *sk; in udp_seq_next() local
2370 sk = udp_get_idx(seq, 0); in udp_seq_next()
2372 sk = udp_get_next(seq, v); in udp_seq_next()
2375 return sk; in udp_seq_next()