Lines Matching refs:po

190 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
194 static void *packet_previous_frame(struct packet_sock *po,
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
286 static struct net_device *packet_cached_dev_get(struct packet_sock *po) in packet_cached_dev_get() argument
291 dev = rcu_dereference(po->cached_dev); in packet_cached_dev_get()
299 static void packet_cached_dev_assign(struct packet_sock *po, in packet_cached_dev_assign() argument
302 rcu_assign_pointer(po->cached_dev, dev); in packet_cached_dev_assign()
305 static void packet_cached_dev_reset(struct packet_sock *po) in packet_cached_dev_reset() argument
307 RCU_INIT_POINTER(po->cached_dev, NULL); in packet_cached_dev_reset()
310 static bool packet_use_direct_xmit(const struct packet_sock *po) in packet_use_direct_xmit() argument
312 return po->xmit == packet_direct_xmit; in packet_use_direct_xmit()
342 struct packet_sock *po = pkt_sk(sk); in register_prot_hook() local
344 if (!po->running) { in register_prot_hook()
345 if (po->fanout) in register_prot_hook()
346 __fanout_link(sk, po); in register_prot_hook()
348 dev_add_pack(&po->prot_hook); in register_prot_hook()
351 po->running = 1; in register_prot_hook()
364 struct packet_sock *po = pkt_sk(sk); in __unregister_prot_hook() local
366 po->running = 0; in __unregister_prot_hook()
368 if (po->fanout) in __unregister_prot_hook()
369 __fanout_unlink(sk, po); in __unregister_prot_hook()
371 __dev_remove_pack(&po->prot_hook); in __unregister_prot_hook()
376 spin_unlock(&po->bind_lock); in __unregister_prot_hook()
378 spin_lock(&po->bind_lock); in __unregister_prot_hook()
384 struct packet_sock *po = pkt_sk(sk); in unregister_prot_hook() local
386 if (po->running) in unregister_prot_hook()
397 static void __packet_set_status(struct packet_sock *po, void *frame, int status) in __packet_set_status() argument
402 switch (po->tp_version) { in __packet_set_status()
420 static int __packet_get_status(struct packet_sock *po, void *frame) in __packet_get_status() argument
427 switch (po->tp_version) { in __packet_get_status()
458 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, in __packet_set_timestamp() argument
465 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) in __packet_set_timestamp()
469 switch (po->tp_version) { in __packet_set_timestamp()
491 static void *packet_lookup_frame(struct packet_sock *po, in packet_lookup_frame() argument
505 if (status != __packet_get_status(po, h.raw)) in packet_lookup_frame()
511 static void *packet_current_frame(struct packet_sock *po, in packet_current_frame() argument
515 return packet_lookup_frame(po, rb, rb->head, status); in packet_current_frame()
523 static void prb_shutdown_retire_blk_timer(struct packet_sock *po, in prb_shutdown_retire_blk_timer() argument
528 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_shutdown_retire_blk_timer()
537 static void prb_init_blk_timer(struct packet_sock *po, in prb_init_blk_timer() argument
542 pkc->retire_blk_timer.data = (long)po; in prb_init_blk_timer()
547 static void prb_setup_retire_blk_timer(struct packet_sock *po) in prb_setup_retire_blk_timer() argument
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_setup_retire_blk_timer()
552 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired); in prb_setup_retire_blk_timer()
555 static int prb_calc_retire_blk_tmo(struct packet_sock *po, in prb_calc_retire_blk_tmo() argument
565 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex); in prb_calc_retire_blk_tmo()
604 static void init_prb_bdqc(struct packet_sock *po, in init_prb_bdqc() argument
620 p1->hdrlen = po->tp_hdrlen; in init_prb_bdqc()
621 p1->version = po->tp_version; in init_prb_bdqc()
623 po->stats.stats3.tp_freeze_q_cnt = 0; in init_prb_bdqc()
627 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po, in init_prb_bdqc()
634 prb_setup_retire_blk_timer(po); in init_prb_bdqc()
673 struct packet_sock *po = (struct packet_sock *)data; in prb_retire_rx_blk_timer_expired() local
674 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in prb_retire_rx_blk_timer_expired()
678 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
708 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); in prb_retire_rx_blk_timer_expired()
709 if (!prb_dispatch_next_block(pkc, po)) in prb_retire_rx_blk_timer_expired()
741 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired()
789 struct packet_sock *po, unsigned int stat) in prb_close_block() argument
795 struct sock *sk = &po->sk; in prb_close_block()
797 if (po->stats.stats3.tp_drops) in prb_close_block()
902 struct packet_sock *po) in prb_freeze_queue() argument
905 po->stats.stats3.tp_freeze_q_cnt++; in prb_freeze_queue()
917 struct packet_sock *po) in prb_dispatch_next_block() argument
928 prb_freeze_queue(pkc, po); in prb_dispatch_next_block()
942 struct packet_sock *po, unsigned int status) in prb_retire_current_block() argument
963 prb_close_block(pkc, pbd, po, status); in prb_retire_current_block()
1041 static void *__packet_lookup_frame_in_block(struct packet_sock *po, in __packet_lookup_frame_in_block() argument
1051 pkc = GET_PBDQC_FROM_RB(&po->rx_ring); in __packet_lookup_frame_in_block()
1086 prb_retire_current_block(pkc, po, 0); in __packet_lookup_frame_in_block()
1089 curr = (char *)prb_dispatch_next_block(pkc, po); in __packet_lookup_frame_in_block()
1103 static void *packet_current_rx_frame(struct packet_sock *po, in packet_current_rx_frame() argument
1108 switch (po->tp_version) { in packet_current_rx_frame()
1111 curr = packet_lookup_frame(po, &po->rx_ring, in packet_current_rx_frame()
1112 po->rx_ring.head, status); in packet_current_rx_frame()
1115 return __packet_lookup_frame_in_block(po, skb, status, len); in packet_current_rx_frame()
1123 static void *prb_lookup_block(struct packet_sock *po, in prb_lookup_block() argument
1147 static void *__prb_previous_block(struct packet_sock *po, in __prb_previous_block() argument
1152 return prb_lookup_block(po, rb, previous, status); in __prb_previous_block()
1155 static void *packet_previous_rx_frame(struct packet_sock *po, in packet_previous_rx_frame() argument
1159 if (po->tp_version <= TPACKET_V2) in packet_previous_rx_frame()
1160 return packet_previous_frame(po, rb, status); in packet_previous_rx_frame()
1162 return __prb_previous_block(po, rb, status); in packet_previous_rx_frame()
1165 static void packet_increment_rx_head(struct packet_sock *po, in packet_increment_rx_head() argument
1168 switch (po->tp_version) { in packet_increment_rx_head()
1180 static void *packet_previous_frame(struct packet_sock *po, in packet_previous_frame() argument
1185 return packet_lookup_frame(po, rb, previous, status); in packet_previous_frame()
1218 static int packet_alloc_pending(struct packet_sock *po) in packet_alloc_pending() argument
1220 po->rx_ring.pending_refcnt = NULL; in packet_alloc_pending()
1222 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); in packet_alloc_pending()
1223 if (unlikely(po->tx_ring.pending_refcnt == NULL)) in packet_alloc_pending()
1229 static void packet_free_pending(struct packet_sock *po) in packet_free_pending() argument
1231 free_percpu(po->tx_ring.pending_refcnt); in packet_free_pending()
1239 static bool __tpacket_has_room(struct packet_sock *po, int pow_off) in __tpacket_has_room() argument
1243 len = po->rx_ring.frame_max + 1; in __tpacket_has_room()
1244 idx = po->rx_ring.head; in __tpacket_has_room()
1249 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_has_room()
1252 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off) in __tpacket_v3_has_room() argument
1256 len = po->rx_ring.prb_bdqc.knum_blocks; in __tpacket_v3_has_room()
1257 idx = po->rx_ring.prb_bdqc.kactive_blk_num; in __tpacket_v3_has_room()
1262 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL); in __tpacket_v3_has_room()
1265 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) in __packet_rcv_has_room() argument
1267 struct sock *sk = &po->sk; in __packet_rcv_has_room()
1270 if (po->prot_hook.func != tpacket_rcv) { in __packet_rcv_has_room()
1281 if (po->tp_version == TPACKET_V3) { in __packet_rcv_has_room()
1282 if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) in __packet_rcv_has_room()
1284 else if (__tpacket_v3_has_room(po, 0)) in __packet_rcv_has_room()
1287 if (__tpacket_has_room(po, ROOM_POW_OFF)) in __packet_rcv_has_room()
1289 else if (__tpacket_has_room(po, 0)) in __packet_rcv_has_room()
1296 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) in packet_rcv_has_room() argument
1301 spin_lock_bh(&po->sk.sk_receive_queue.lock); in packet_rcv_has_room()
1302 ret = __packet_rcv_has_room(po, skb); in packet_rcv_has_room()
1304 if (po->pressure == has_room) in packet_rcv_has_room()
1305 po->pressure = !has_room; in packet_rcv_has_room()
1306 spin_unlock_bh(&po->sk.sk_receive_queue.lock); in packet_rcv_has_room()
1326 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) in fanout_flow_is_huge() argument
1333 if (po->rollover->history[i] == rxhash) in fanout_flow_is_huge()
1336 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash; in fanout_flow_is_huge()
1375 struct packet_sock *po, *po_next, *po_skip = NULL; in fanout_demux_rollover() local
1378 po = pkt_sk(f->arr[idx]); in fanout_demux_rollover()
1381 room = packet_rcv_has_room(po, skb); in fanout_demux_rollover()
1383 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) in fanout_demux_rollover()
1385 po_skip = po; in fanout_demux_rollover()
1388 i = j = min_t(int, po->rollover->sock, num - 1); in fanout_demux_rollover()
1394 po->rollover->sock = i; in fanout_demux_rollover()
1395 atomic_long_inc(&po->rollover->num); in fanout_demux_rollover()
1397 atomic_long_inc(&po->rollover->num_huge); in fanout_demux_rollover()
1405 atomic_long_inc(&po->rollover->num_failed); in fanout_demux_rollover()
1443 struct packet_sock *po; in packet_rcv_fanout() local
1485 po = pkt_sk(f->arr[idx]); in packet_rcv_fanout()
1486 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); in packet_rcv_fanout()
1493 static void __fanout_link(struct sock *sk, struct packet_sock *po) in __fanout_link() argument
1495 struct packet_fanout *f = po->fanout; in __fanout_link()
1504 static void __fanout_unlink(struct sock *sk, struct packet_sock *po) in __fanout_unlink() argument
1506 struct packet_fanout *f = po->fanout; in __fanout_unlink()
1556 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, in fanout_set_data_cbpf() argument
1563 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_cbpf()
1574 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_cbpf()
1578 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, in fanout_set_data_ebpf() argument
1584 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) in fanout_set_data_ebpf()
1599 __fanout_set_data_bpf(po->fanout, new); in fanout_set_data_ebpf()
1603 static int fanout_set_data(struct packet_sock *po, char __user *data, in fanout_set_data() argument
1606 switch (po->fanout->type) { in fanout_set_data()
1608 return fanout_set_data_cbpf(po, data, len); in fanout_set_data()
1610 return fanout_set_data_ebpf(po, data, len); in fanout_set_data()
1627 struct packet_sock *po = pkt_sk(sk); in fanout_add() local
1649 if (!po->running) in fanout_add()
1652 if (po->fanout) in fanout_add()
1657 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); in fanout_add()
1658 if (!po->rollover) in fanout_add()
1660 atomic_long_set(&po->rollover->num, 0); in fanout_add()
1661 atomic_long_set(&po->rollover->num_huge, 0); in fanout_add()
1662 atomic_long_set(&po->rollover->num_failed, 0); in fanout_add()
1690 match->prot_hook.type = po->prot_hook.type; in fanout_add()
1691 match->prot_hook.dev = po->prot_hook.dev; in fanout_add()
1700 match->prot_hook.type == po->prot_hook.type && in fanout_add()
1701 match->prot_hook.dev == po->prot_hook.dev) { in fanout_add()
1704 __dev_remove_pack(&po->prot_hook); in fanout_add()
1705 po->fanout = match; in fanout_add()
1707 __fanout_link(sk, po); in fanout_add()
1714 kfree(po->rollover); in fanout_add()
1715 po->rollover = NULL; in fanout_add()
1722 struct packet_sock *po = pkt_sk(sk); in fanout_release() local
1725 f = po->fanout; in fanout_release()
1730 po->fanout = NULL; in fanout_release()
1740 if (po->rollover) in fanout_release()
1741 kfree_rcu(po->rollover, rcu); in fanout_release()
1984 struct packet_sock *po; in packet_rcv() local
1993 po = pkt_sk(sk); in packet_rcv()
2045 if (unlikely(po->origdev)) in packet_rcv()
2068 po->stats.stats1.tp_packets++; in packet_rcv()
2077 po->stats.stats1.tp_drops++; in packet_rcv()
2095 struct packet_sock *po; in tpacket_rcv() local
2118 po = pkt_sk(sk); in tpacket_rcv()
2149 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + in tpacket_rcv()
2150 po->tp_reserve; in tpacket_rcv()
2153 netoff = TPACKET_ALIGN(po->tp_hdrlen + in tpacket_rcv()
2155 po->tp_reserve; in tpacket_rcv()
2158 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2159 if (macoff + snaplen > po->rx_ring.frame_size) { in tpacket_rcv()
2160 if (po->copy_thresh && in tpacket_rcv()
2171 snaplen = po->rx_ring.frame_size - macoff; in tpacket_rcv()
2176 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { in tpacket_rcv()
2179 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; in tpacket_rcv()
2185 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; in tpacket_rcv()
2189 h.raw = packet_current_rx_frame(po, skb, in tpacket_rcv()
2193 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2194 packet_increment_rx_head(po, &po->rx_ring); in tpacket_rcv()
2201 if (po->stats.stats1.tp_drops) in tpacket_rcv()
2204 po->stats.stats1.tp_packets++; in tpacket_rcv()
2213 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp))) in tpacket_rcv()
2218 switch (po->tp_version) { in tpacket_rcv()
2270 if (unlikely(po->origdev)) in tpacket_rcv()
2278 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2290 if (po->tp_version <= TPACKET_V2) { in tpacket_rcv()
2291 __packet_set_status(po, h.raw, status); in tpacket_rcv()
2294 prb_clear_blk_fill_status(&po->rx_ring); in tpacket_rcv()
2307 po->stats.stats1.tp_drops++; in tpacket_rcv()
2317 struct packet_sock *po = pkt_sk(skb->sk); in tpacket_destruct_skb() local
2319 if (likely(po->tx_ring.pg_vec)) { in tpacket_destruct_skb()
2324 packet_dec_pending(&po->tx_ring); in tpacket_destruct_skb()
2326 ts = __packet_set_timestamp(po, ph, skb); in tpacket_destruct_skb()
2327 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); in tpacket_destruct_skb()
2342 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, in tpacket_fill_skb() argument
2348 struct socket *sock = po->sk.sk_socket; in tpacket_fill_skb()
2357 skb->priority = po->sk.sk_priority; in tpacket_fill_skb()
2358 skb->mark = po->sk.sk_mark; in tpacket_fill_skb()
2359 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); in tpacket_fill_skb()
2362 switch (po->tp_version) { in tpacket_fill_skb()
2378 if (unlikely(po->tp_tx_has_off)) { in tpacket_fill_skb()
2380 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_fill_skb()
2381 off_max = po->tx_ring.frame_size - tp_len; in tpacket_fill_skb()
2383 switch (po->tp_version) { in tpacket_fill_skb()
2392 switch (po->tp_version) { in tpacket_fill_skb()
2405 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); in tpacket_fill_skb()
2437 atomic_add(to_write, &po->sk.sk_wmem_alloc); in tpacket_fill_skb()
2464 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) in tpacket_snd() argument
2479 mutex_lock(&po->pg_vec_lock); in tpacket_snd()
2482 dev = packet_cached_dev_get(po); in tpacket_snd()
2483 proto = po->num; in tpacket_snd()
2495 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); in tpacket_snd()
2505 if (po->sk.sk_socket->type == SOCK_RAW) in tpacket_snd()
2507 size_max = po->tx_ring.frame_size in tpacket_snd()
2508 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); in tpacket_snd()
2514 ph = packet_current_frame(po, &po->tx_ring, in tpacket_snd()
2525 skb = sock_alloc_send_skb(&po->sk, in tpacket_snd()
2535 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, in tpacket_snd()
2543 if (po->tp_loss) { in tpacket_snd()
2544 __packet_set_status(po, ph, in tpacket_snd()
2546 packet_increment_head(&po->tx_ring); in tpacket_snd()
2559 __packet_set_status(po, ph, TP_STATUS_SENDING); in tpacket_snd()
2560 packet_inc_pending(&po->tx_ring); in tpacket_snd()
2563 err = po->xmit(skb); in tpacket_snd()
2566 if (err && __packet_get_status(po, ph) == in tpacket_snd()
2578 packet_increment_head(&po->tx_ring); in tpacket_snd()
2587 (need_wait && packet_read_pending(&po->tx_ring)))); in tpacket_snd()
2593 __packet_set_status(po, ph, status); in tpacket_snd()
2598 mutex_unlock(&po->pg_vec_lock); in tpacket_snd()
2639 struct packet_sock *po = pkt_sk(sk); in packet_snd() local
2650 dev = packet_cached_dev_get(po); in packet_snd()
2651 proto = po->num; in packet_snd()
2680 if (po->has_vnet_hdr) { in packet_snd()
2786 if (po->has_vnet_hdr) { in packet_snd()
2812 err = po->xmit(skb); in packet_snd()
2832 struct packet_sock *po = pkt_sk(sk); in packet_sendmsg() local
2834 if (po->tx_ring.pg_vec) in packet_sendmsg()
2835 return tpacket_snd(po, msg); in packet_sendmsg()
2848 struct packet_sock *po; in packet_release() local
2856 po = pkt_sk(sk); in packet_release()
2866 spin_lock(&po->bind_lock); in packet_release()
2868 packet_cached_dev_reset(po); in packet_release()
2870 if (po->prot_hook.dev) { in packet_release()
2871 dev_put(po->prot_hook.dev); in packet_release()
2872 po->prot_hook.dev = NULL; in packet_release()
2874 spin_unlock(&po->bind_lock); in packet_release()
2878 if (po->rx_ring.pg_vec) { in packet_release()
2883 if (po->tx_ring.pg_vec) { in packet_release()
2900 packet_free_pending(po); in packet_release()
2914 struct packet_sock *po = pkt_sk(sk); in packet_do_bind() local
2922 if (po->fanout) in packet_do_bind()
2926 spin_lock(&po->bind_lock); in packet_do_bind()
2946 proto_curr = po->prot_hook.type; in packet_do_bind()
2947 dev_curr = po->prot_hook.dev; in packet_do_bind()
2952 if (po->running) { in packet_do_bind()
2956 dev_curr = po->prot_hook.dev; in packet_do_bind()
2962 po->num = proto; in packet_do_bind()
2963 po->prot_hook.type = proto; in packet_do_bind()
2967 po->prot_hook.dev = NULL; in packet_do_bind()
2968 po->ifindex = -1; in packet_do_bind()
2969 packet_cached_dev_reset(po); in packet_do_bind()
2971 po->prot_hook.dev = dev; in packet_do_bind()
2972 po->ifindex = dev ? dev->ifindex : 0; in packet_do_bind()
2973 packet_cached_dev_assign(po, dev); in packet_do_bind()
2992 spin_unlock(&po->bind_lock); in packet_do_bind()
3050 struct packet_sock *po; in packet_create() local
3073 po = pkt_sk(sk); in packet_create()
3075 po->num = proto; in packet_create()
3076 po->xmit = dev_queue_xmit; in packet_create()
3078 err = packet_alloc_pending(po); in packet_create()
3082 packet_cached_dev_reset(po); in packet_create()
3091 spin_lock_init(&po->bind_lock); in packet_create()
3092 mutex_init(&po->pg_vec_lock); in packet_create()
3093 po->rollover = NULL; in packet_create()
3094 po->prot_hook.func = packet_rcv; in packet_create()
3097 po->prot_hook.func = packet_rcv_spkt; in packet_create()
3099 po->prot_hook.af_packet_priv = sk; in packet_create()
3102 po->prot_hook.type = proto; in packet_create()
3328 struct packet_sock *po = pkt_sk(sk); in packet_getname() local
3335 sll->sll_ifindex = po->ifindex; in packet_getname()
3336 sll->sll_protocol = po->num; in packet_getname()
3339 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); in packet_getname()
3401 struct packet_sock *po = pkt_sk(sk); in packet_mc_add() local
3423 for (ml = po->mclist; ml; ml = ml->next) { in packet_mc_add()
3441 i->next = po->mclist; in packet_mc_add()
3442 po->mclist = i; in packet_mc_add()
3445 po->mclist = i->next; in packet_mc_add()
3482 struct packet_sock *po = pkt_sk(sk); in packet_flush_mclist() local
3485 if (!po->mclist) in packet_flush_mclist()
3489 while ((ml = po->mclist) != NULL) { in packet_flush_mclist()
3492 po->mclist = ml->next; in packet_flush_mclist()
3505 struct packet_sock *po = pkt_sk(sk); in packet_setsockopt() local
3539 switch (po->tp_version) { in packet_setsockopt()
3576 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) in packet_setsockopt()
3584 po->tp_version = val; in packet_setsockopt()
3596 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) in packet_setsockopt()
3600 po->tp_reserve = val; in packet_setsockopt()
3609 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) in packet_setsockopt()
3613 po->tp_loss = !!val; in packet_setsockopt()
3625 po->auxdata = !!val; in packet_setsockopt()
3637 po->origdev = !!val; in packet_setsockopt()
3646 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) in packet_setsockopt()
3653 po->has_vnet_hdr = !!val; in packet_setsockopt()
3665 po->tp_tstamp = val; in packet_setsockopt()
3681 if (!po->fanout) in packet_setsockopt()
3684 return fanout_set_data(po, optval, optlen); in packet_setsockopt()
3692 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) in packet_setsockopt()
3696 po->tp_tx_has_off = !!val; in packet_setsockopt()
3708 po->xmit = val ? packet_direct_xmit : dev_queue_xmit; in packet_setsockopt()
3722 struct packet_sock *po = pkt_sk(sk); in packet_getsockopt() local
3739 memcpy(&st, &po->stats, sizeof(st)); in packet_getsockopt()
3740 memset(&po->stats, 0, sizeof(po->stats)); in packet_getsockopt()
3743 if (po->tp_version == TPACKET_V3) { in packet_getsockopt()
3755 val = po->auxdata; in packet_getsockopt()
3758 val = po->origdev; in packet_getsockopt()
3761 val = po->has_vnet_hdr; in packet_getsockopt()
3764 val = po->tp_version; in packet_getsockopt()
3786 val = po->tp_reserve; in packet_getsockopt()
3789 val = po->tp_loss; in packet_getsockopt()
3792 val = po->tp_tstamp; in packet_getsockopt()
3795 val = (po->fanout ? in packet_getsockopt()
3796 ((u32)po->fanout->id | in packet_getsockopt()
3797 ((u32)po->fanout->type << 16) | in packet_getsockopt()
3798 ((u32)po->fanout->flags << 24)) : in packet_getsockopt()
3802 if (!po->rollover) in packet_getsockopt()
3804 rstats.tp_all = atomic_long_read(&po->rollover->num); in packet_getsockopt()
3805 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); in packet_getsockopt()
3806 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); in packet_getsockopt()
3811 val = po->tp_tx_has_off; in packet_getsockopt()
3814 val = packet_use_direct_xmit(po); in packet_getsockopt()
3839 struct packet_sock *po = pkt_sk(sk); in packet_notifier() local
3843 if (po->mclist) in packet_notifier()
3844 packet_dev_mclist_delete(dev, &po->mclist); in packet_notifier()
3848 if (dev->ifindex == po->ifindex) { in packet_notifier()
3849 spin_lock(&po->bind_lock); in packet_notifier()
3850 if (po->running) { in packet_notifier()
3857 packet_cached_dev_reset(po); in packet_notifier()
3858 po->ifindex = -1; in packet_notifier()
3859 if (po->prot_hook.dev) in packet_notifier()
3860 dev_put(po->prot_hook.dev); in packet_notifier()
3861 po->prot_hook.dev = NULL; in packet_notifier()
3863 spin_unlock(&po->bind_lock); in packet_notifier()
3867 if (dev->ifindex == po->ifindex) { in packet_notifier()
3868 spin_lock(&po->bind_lock); in packet_notifier()
3869 if (po->num) in packet_notifier()
3871 spin_unlock(&po->bind_lock); in packet_notifier()
3938 struct packet_sock *po = pkt_sk(sk); in packet_poll() local
3942 if (po->rx_ring.pg_vec) { in packet_poll()
3943 if (!packet_previous_rx_frame(po, &po->rx_ring, in packet_poll()
3947 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) in packet_poll()
3948 po->pressure = 0; in packet_poll()
3951 if (po->tx_ring.pg_vec) { in packet_poll()
3952 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) in packet_poll()
4061 struct packet_sock *po = pkt_sk(sk); in packet_set_ring() local
4071 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { in packet_set_ring()
4076 rb = tx_ring ? &po->tx_ring : &po->rx_ring; in packet_set_ring()
4081 if (atomic_read(&po->mapped)) in packet_set_ring()
4093 switch (po->tp_version) { in packet_set_ring()
4095 po->tp_hdrlen = TPACKET_HDRLEN; in packet_set_ring()
4098 po->tp_hdrlen = TPACKET2_HDRLEN; in packet_set_ring()
4101 po->tp_hdrlen = TPACKET3_HDRLEN; in packet_set_ring()
4110 if (po->tp_version >= TPACKET_V3 && in packet_set_ring()
4114 if (unlikely(req->tp_frame_size < po->tp_hdrlen + in packet_set_ring()
4115 po->tp_reserve)) in packet_set_ring()
4132 switch (po->tp_version) { in packet_set_ring()
4138 init_prb_bdqc(po, rb, pg_vec, req_u); in packet_set_ring()
4154 spin_lock(&po->bind_lock); in packet_set_ring()
4155 was_running = po->running; in packet_set_ring()
4156 num = po->num; in packet_set_ring()
4158 po->num = 0; in packet_set_ring()
4161 spin_unlock(&po->bind_lock); in packet_set_ring()
4166 mutex_lock(&po->pg_vec_lock); in packet_set_ring()
4167 if (closing || atomic_read(&po->mapped) == 0) { in packet_set_ring()
4180 po->prot_hook.func = (po->rx_ring.pg_vec) ? in packet_set_ring()
4183 if (atomic_read(&po->mapped)) in packet_set_ring()
4185 atomic_read(&po->mapped)); in packet_set_ring()
4187 mutex_unlock(&po->pg_vec_lock); in packet_set_ring()
4189 spin_lock(&po->bind_lock); in packet_set_ring()
4191 po->num = num; in packet_set_ring()
4194 spin_unlock(&po->bind_lock); in packet_set_ring()
4195 if (closing && (po->tp_version > TPACKET_V2)) { in packet_set_ring()
4198 prb_shutdown_retire_blk_timer(po, rb_queue); in packet_set_ring()
4212 struct packet_sock *po = pkt_sk(sk); in packet_mmap() local
4222 mutex_lock(&po->pg_vec_lock); in packet_mmap()
4225 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4241 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { in packet_mmap()
4261 atomic_inc(&po->mapped); in packet_mmap()
4266 mutex_unlock(&po->pg_vec_lock); in packet_mmap()
4351 const struct packet_sock *po = pkt_sk(s); in packet_seq_show() local
4358 ntohs(po->num), in packet_seq_show()
4359 po->ifindex, in packet_seq_show()
4360 po->running, in packet_seq_show()