Lines Matching refs:sk

169 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
217 static void packet_flush_mclist(struct sock *sk);
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
340 static void register_prot_hook(struct sock *sk) in register_prot_hook() argument
342 struct packet_sock *po = pkt_sk(sk); in register_prot_hook()
346 __fanout_link(sk, po); in register_prot_hook()
350 sock_hold(sk); in register_prot_hook()
362 static void __unregister_prot_hook(struct sock *sk, bool sync) in __unregister_prot_hook() argument
364 struct packet_sock *po = pkt_sk(sk); in __unregister_prot_hook()
369 __fanout_unlink(sk, po); in __unregister_prot_hook()
373 __sock_put(sk); in __unregister_prot_hook()
382 static void unregister_prot_hook(struct sock *sk, bool sync) in unregister_prot_hook() argument
384 struct packet_sock *po = pkt_sk(sk); in unregister_prot_hook()
387 __unregister_prot_hook(sk, sync); in unregister_prot_hook()
565 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); in prb_calc_retire_blk_tmo()
678 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
741 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
795 struct sock *sk = &po->sk; in prb_close_block() local
824 sk->sk_data_ready(sk); in prb_close_block()
1267 struct sock *sk = &po->sk; in __packet_rcv_has_room() local
1271 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc) in __packet_rcv_has_room()
1273 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF)) in __packet_rcv_has_room()
1301 spin_lock_bh(&po->sk.sk_receive_queue.lock); in packet_rcv_has_room()
1306 spin_unlock_bh(&po->sk.sk_receive_queue.lock); in packet_rcv_has_room()
1311 static void packet_sock_destruct(struct sock *sk) in packet_sock_destruct() argument
1313 skb_queue_purge(&sk->sk_error_queue); in packet_sock_destruct()
1315 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in packet_sock_destruct()
1316 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in packet_sock_destruct()
1318 if (!sock_flag(sk, SOCK_DEAD)) { in packet_sock_destruct()
1319 pr_err("Attempt to release alive packet socket: %p\n", sk); in packet_sock_destruct()
1323 sk_refcnt_debug_dec(sk); in packet_sock_destruct()
1493 static void __fanout_link(struct sock *sk, struct packet_sock *po) in __fanout_link() argument
1498 f->arr[f->num_members] = sk; in __fanout_link()
1504 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) in __fanout_unlink() argument
1511 if (f->arr[i] == sk) in __fanout_unlink()
1520 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) in match_fanout_group() argument
1522 if (sk->sk_family != PF_PACKET) in match_fanout_group()
1525 return ptype->af_packet_priv == pkt_sk(sk)->fanout; in match_fanout_group()
1563 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_cbpf()
1584 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_ebpf()
1625 static int fanout_add(struct sock *sk, u16 id, u16 type_flags) in fanout_add() argument
1627 struct packet_sock *po = pkt_sk(sk); in fanout_add()
1669 read_pnet(&f->net) == sock_net(sk)) { in fanout_add()
1682 write_pnet(&match->net, sock_net(sk)); in fanout_add()
1707 __fanout_link(sk, po); in fanout_add()
1720 static void fanout_release(struct sock *sk) in fanout_release() argument
1722 struct packet_sock *po = pkt_sk(sk); in fanout_release()
1765 struct sock *sk; in packet_rcv_spkt() local
1773 sk = pt->af_packet_priv; in packet_rcv_spkt()
1789 if (!net_eq(dev_net(dev), sock_net(sk))) in packet_rcv_spkt()
1819 if (sock_queue_rcv_skb(sk, skb) == 0) in packet_rcv_spkt()
1837 struct sock *sk = sock->sk; in packet_sendmsg_spkt() local
1864 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); in packet_sendmsg_spkt()
1878 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { in packet_sendmsg_spkt()
1896 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); in packet_sendmsg_spkt()
1931 skb->priority = sk->sk_priority; in packet_sendmsg_spkt()
1932 skb->mark = sk->sk_mark; in packet_sendmsg_spkt()
1934 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); in packet_sendmsg_spkt()
1953 const struct sock *sk, in run_filter() argument
1959 filter = rcu_dereference(sk->sk_filter); in run_filter()
1982 struct sock *sk; in packet_rcv() local
1992 sk = pt->af_packet_priv; in packet_rcv()
1993 po = pkt_sk(sk); in packet_rcv()
1995 if (!net_eq(dev_net(dev), sock_net(sk))) in packet_rcv()
2008 if (sk->sk_type != SOCK_DGRAM) in packet_rcv()
2018 res = run_filter(skb, sk, snaplen); in packet_rcv()
2024 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in packet_rcv()
2060 skb_set_owner_r(skb, sk); in packet_rcv()
2067 spin_lock(&sk->sk_receive_queue.lock); in packet_rcv()
2069 sock_skb_set_dropcount(sk, skb); in packet_rcv()
2070 __skb_queue_tail(&sk->sk_receive_queue, skb); in packet_rcv()
2071 spin_unlock(&sk->sk_receive_queue.lock); in packet_rcv()
2072 sk->sk_data_ready(sk); in packet_rcv()
2076 spin_lock(&sk->sk_receive_queue.lock); in packet_rcv()
2078 atomic_inc(&sk->sk_drops); in packet_rcv()
2079 spin_unlock(&sk->sk_receive_queue.lock); in packet_rcv()
2094 struct sock *sk; in tpacket_rcv() local
2117 sk = pt->af_packet_priv; in tpacket_rcv()
2118 po = pkt_sk(sk); in tpacket_rcv()
2120 if (!net_eq(dev_net(dev), sock_net(sk))) in tpacket_rcv()
2124 if (sk->sk_type != SOCK_DGRAM) in tpacket_rcv()
2134 res = run_filter(skb, sk, snaplen); in tpacket_rcv()
2148 if (sk->sk_type == SOCK_DGRAM) { in tpacket_rcv()
2161 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { in tpacket_rcv()
2169 skb_set_owner_r(copy_skb, sk); in tpacket_rcv()
2188 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2207 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); in tpacket_rcv()
2209 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2292 sk->sk_data_ready(sk); in tpacket_rcv()
2308 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv()
2310 sk->sk_data_ready(sk); in tpacket_rcv()
2317 struct packet_sock *po = pkt_sk(skb->sk); in tpacket_destruct_skb()
2348 struct socket *sock = po->sk.sk_socket; in tpacket_fill_skb()
2357 skb->priority = po->sk.sk_priority; in tpacket_fill_skb()
2358 skb->mark = po->sk.sk_mark; in tpacket_fill_skb()
2359 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); in tpacket_fill_skb()
2437 atomic_add(to_write, &po->sk.sk_wmem_alloc); in tpacket_fill_skb()
2495 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); in tpacket_snd()
2505 if (po->sk.sk_socket->type == SOCK_RAW) in tpacket_snd()
2525 skb = sock_alloc_send_skb(&po->sk, in tpacket_snd()
2602 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, in packet_alloc_skb() argument
2613 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, in packet_alloc_skb()
2628 struct sock *sk = sock->sk; in packet_snd() local
2639 struct packet_sock *po = pkt_sk(sk); in packet_snd()
2661 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); in packet_snd()
2671 sockc.mark = sk->sk_mark; in packet_snd()
2673 err = sock_cmsg_send(sk, msg, &sockc); in packet_snd()
2730 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { in packet_snd()
2745 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, in packet_snd()
2771 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); in packet_snd()
2781 skb->priority = sk->sk_priority; in packet_snd()
2831 struct sock *sk = sock->sk; in packet_sendmsg() local
2832 struct packet_sock *po = pkt_sk(sk); in packet_sendmsg()
2847 struct sock *sk = sock->sk; in packet_release() local
2852 if (!sk) in packet_release()
2855 net = sock_net(sk); in packet_release()
2856 po = pkt_sk(sk); in packet_release()
2859 sk_del_node_init_rcu(sk); in packet_release()
2863 sock_prot_inuse_add(net, sk->sk_prot, -1); in packet_release()
2867 unregister_prot_hook(sk, false); in packet_release()
2876 packet_flush_mclist(sk); in packet_release()
2880 packet_set_ring(sk, &req_u, 1, 0); in packet_release()
2885 packet_set_ring(sk, &req_u, 1, 1); in packet_release()
2888 fanout_release(sk); in packet_release()
2894 sock_orphan(sk); in packet_release()
2895 sock->sk = NULL; in packet_release()
2899 skb_queue_purge(&sk->sk_receive_queue); in packet_release()
2901 sk_refcnt_debug_release(sk); in packet_release()
2903 sock_put(sk); in packet_release()
2911 static int packet_do_bind(struct sock *sk, const char *name, int ifindex, in packet_do_bind() argument
2914 struct packet_sock *po = pkt_sk(sk); in packet_do_bind()
2925 lock_sock(sk); in packet_do_bind()
2930 dev = dev_get_by_name_rcu(sock_net(sk), name); in packet_do_bind()
2936 dev = dev_get_by_index_rcu(sock_net(sk), ifindex); in packet_do_bind()
2954 __unregister_prot_hook(sk, true); in packet_do_bind()
2958 unlisted = !dev_get_by_index_rcu(sock_net(sk), in packet_do_bind()
2983 register_prot_hook(sk); in packet_do_bind()
2985 sk->sk_err = ENETDOWN; in packet_do_bind()
2986 if (!sock_flag(sk, SOCK_DEAD)) in packet_do_bind()
2987 sk->sk_error_report(sk); in packet_do_bind()
2993 release_sock(sk); in packet_do_bind()
3004 struct sock *sk = sock->sk; in packet_bind_spkt() local
3015 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); in packet_bind_spkt()
3021 struct sock *sk = sock->sk; in packet_bind() local
3032 return packet_do_bind(sk, NULL, sll->sll_ifindex, in packet_bind()
3033 sll->sll_protocol ? : pkt_sk(sk)->num); in packet_bind()
3049 struct sock *sk; in packet_create() local
3063 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern); in packet_create()
3064 if (sk == NULL) in packet_create()
3071 sock_init_data(sock, sk); in packet_create()
3073 po = pkt_sk(sk); in packet_create()
3074 sk->sk_family = PF_PACKET; in packet_create()
3084 sk->sk_destruct = packet_sock_destruct; in packet_create()
3085 sk_refcnt_debug_inc(sk); in packet_create()
3099 po->prot_hook.af_packet_priv = sk; in packet_create()
3103 register_prot_hook(sk); in packet_create()
3107 sk_add_node_rcu(sk, &net->packet.sklist); in packet_create()
3116 sk_free(sk); in packet_create()
3129 struct sock *sk = sock->sk; in packet_recvmsg() local
3141 if (pkt_sk(sk)->ifindex < 0) in packet_recvmsg()
3146 err = sock_recv_errqueue(sk, msg, len, in packet_recvmsg()
3160 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); in packet_recvmsg()
3171 if (pkt_sk(sk)->pressure) in packet_recvmsg()
3172 packet_rcv_has_room(pkt_sk(sk), NULL); in packet_recvmsg()
3174 if (pkt_sk(sk)->has_vnet_hdr) { in packet_recvmsg()
3245 sock_recv_ts_and_drops(msg, sk, skb); in packet_recvmsg()
3264 if (pkt_sk(sk)->auxdata) { in packet_recvmsg()
3297 skb_free_datagram(sk, skb); in packet_recvmsg()
3306 struct sock *sk = sock->sk; in packet_getname_spkt() local
3314 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); in packet_getname_spkt()
3327 struct sock *sk = sock->sk; in packet_getname() local
3328 struct packet_sock *po = pkt_sk(sk); in packet_getname()
3339 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); in packet_getname()
3399 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) in packet_mc_add() argument
3401 struct packet_sock *po = pkt_sk(sk); in packet_mc_add()
3409 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex); in packet_mc_add()
3454 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) in packet_mc_drop() argument
3460 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { in packet_mc_drop()
3468 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); in packet_mc_drop()
3480 static void packet_flush_mclist(struct sock *sk) in packet_flush_mclist() argument
3482 struct packet_sock *po = pkt_sk(sk); in packet_flush_mclist()
3493 dev = __dev_get_by_index(sock_net(sk), ml->ifindex); in packet_flush_mclist()
3504 struct sock *sk = sock->sk; in packet_setsockopt() local
3505 struct packet_sock *po = pkt_sk(sk); in packet_setsockopt()
3527 ret = packet_mc_add(sk, &mreq); in packet_setsockopt()
3529 ret = packet_mc_drop(sk, &mreq); in packet_setsockopt()
3551 if (pkt_sk(sk)->has_vnet_hdr) in packet_setsockopt()
3555 return packet_set_ring(sk, &req_u, 0, in packet_setsockopt()
3567 pkt_sk(sk)->copy_thresh = val; in packet_setsockopt()
3677 return fanout_add(sk, val & 0xffff, val >> 16); in packet_setsockopt()
3721 struct sock *sk = sock->sk; in packet_getsockopt() local
3722 struct packet_sock *po = pkt_sk(sk); in packet_getsockopt()
3738 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_getsockopt()
3741 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_getsockopt()
3833 struct sock *sk; in packet_notifier() local
3838 sk_for_each_rcu(sk, &net->packet.sklist) { in packet_notifier()
3839 struct packet_sock *po = pkt_sk(sk); in packet_notifier()
3851 __unregister_prot_hook(sk, false); in packet_notifier()
3852 sk->sk_err = ENETDOWN; in packet_notifier()
3853 if (!sock_flag(sk, SOCK_DEAD)) in packet_notifier()
3854 sk->sk_error_report(sk); in packet_notifier()
3870 register_prot_hook(sk); in packet_notifier()
3884 struct sock *sk = sock->sk; in packet_ioctl() local
3889 int amount = sk_wmem_alloc_get(sk); in packet_ioctl()
3898 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_ioctl()
3899 skb = skb_peek(&sk->sk_receive_queue); in packet_ioctl()
3902 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_ioctl()
3906 return sock_get_timestamp(sk, (struct timeval __user *)arg); in packet_ioctl()
3908 return sock_get_timestampns(sk, (struct timespec __user *)arg); in packet_ioctl()
3937 struct sock *sk = sock->sk; in packet_poll() local
3938 struct packet_sock *po = pkt_sk(sk); in packet_poll()
3941 spin_lock_bh(&sk->sk_receive_queue.lock); in packet_poll()
3949 spin_unlock_bh(&sk->sk_receive_queue.lock); in packet_poll()
3950 spin_lock_bh(&sk->sk_write_queue.lock); in packet_poll()
3955 spin_unlock_bh(&sk->sk_write_queue.lock); in packet_poll()
3968 struct sock *sk = sock->sk; in packet_mm_open() local
3970 if (sk) in packet_mm_open()
3971 atomic_inc(&pkt_sk(sk)->mapped); in packet_mm_open()
3978 struct sock *sk = sock->sk; in packet_mm_close() local
3980 if (sk) in packet_mm_close()
3981 atomic_dec(&pkt_sk(sk)->mapped); in packet_mm_close()
4057 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, in packet_set_ring() argument
4061 struct packet_sock *po = pkt_sk(sk); in packet_set_ring()
4077 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; in packet_set_ring()
4151 lock_sock(sk); in packet_set_ring()
4159 __unregister_prot_hook(sk, false); in packet_set_ring()
4192 register_prot_hook(sk); in packet_set_ring()
4200 release_sock(sk); in packet_set_ring()
4211 struct sock *sk = sock->sk; in packet_mmap() local
4212 struct packet_sock *po = pkt_sk(sk); in packet_mmap()