gso_skb          3344 drivers/net/usb/lan78xx.c 			goto gso_skb;
gso_skb          3371 drivers/net/usb/lan78xx.c gso_skb:
gso_skb           100 include/net/sch_generic.h 	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
gso_skb          1066 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
gso_skb          1073 include/net/sch_generic.h 			__skb_queue_head(&sch->gso_skb, skb);
gso_skb          1112 include/net/sch_generic.h 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
gso_skb          1115 include/net/sch_generic.h 		skb = __skb_dequeue(&sch->gso_skb);
gso_skb           173 include/net/udp.h struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
gso_skb            65 net/ipv4/tcp_offload.c 	struct sk_buff *gso_skb = skb;
gso_skb            93 net/ipv4/tcp_offload.c 	copy_destructor = gso_skb->destructor == tcp_wfree;
gso_skb            94 net/ipv4/tcp_offload.c 	ooo_okay = gso_skb->ooo_okay;
gso_skb           118 net/ipv4/tcp_offload.c 	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
gso_skb           119 net/ipv4/tcp_offload.c 		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
gso_skb           135 net/ipv4/tcp_offload.c 			skb->destructor = gso_skb->destructor;
gso_skb           136 net/ipv4/tcp_offload.c 			skb->sk = gso_skb->sk;
gso_skb           154 net/ipv4/tcp_offload.c 		swap(gso_skb->sk, skb->sk);
gso_skb           155 net/ipv4/tcp_offload.c 		swap(gso_skb->destructor, skb->destructor);
gso_skb           157 net/ipv4/tcp_offload.c 		delta = sum_truesize - gso_skb->truesize;
gso_skb           187 net/ipv4/udp_offload.c struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
gso_skb           190 net/ipv4/udp_offload.c 	struct sock *sk = gso_skb->sk;
gso_skb           199 net/ipv4/udp_offload.c 	mss = skb_shinfo(gso_skb)->gso_size;
gso_skb           200 net/ipv4/udp_offload.c 	if (gso_skb->len <= sizeof(*uh) + mss)
gso_skb           203 net/ipv4/udp_offload.c 	skb_pull(gso_skb, sizeof(*uh));
gso_skb           206 net/ipv4/udp_offload.c 	copy_dtor = gso_skb->destructor == sock_wfree;
gso_skb           208 net/ipv4/udp_offload.c 		gso_skb->destructor = NULL;
gso_skb           210 net/ipv4/udp_offload.c 	segs = skb_segment(gso_skb, features);
gso_skb           213 net/ipv4/udp_offload.c 			gso_skb->destructor = sock_wfree;
gso_skb           228 net/ipv4/udp_offload.c 	skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey;
gso_skb           230 net/ipv4/udp_offload.c 			(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP);
gso_skb           274 net/ipv4/udp_offload.c 		int delta = sum_truesize - gso_skb->truesize;
gso_skb           756 net/netfilter/nfnetlink_queue.c 		struct sk_buff *gso_skb = entry->skb;
gso_skb           760 net/netfilter/nfnetlink_queue.c 			entry->skb = gso_skb;
gso_skb           132 net/sched/sch_generic.c 		__skb_queue_tail(&q->gso_skb, skb);
gso_skb           209 net/sched/sch_generic.c 	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
gso_skb           217 net/sched/sch_generic.c 		skb = skb_peek(&q->gso_skb);
gso_skb           235 net/sched/sch_generic.c 			skb = __skb_dequeue(&q->gso_skb);
gso_skb           562 net/sched/sch_generic.c 	.gso_skb = {
gso_skb           563 net/sched/sch_generic.c 		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
gso_skb           564 net/sched/sch_generic.c 		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
gso_skb           566 net/sched/sch_generic.c 		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
gso_skb           835 net/sched/sch_generic.c 	__skb_queue_head_init(&sch->gso_skb);
gso_skb           917 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
gso_skb           918 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->gso_skb);
gso_skb           968 net/sched/sch_generic.c 	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
gso_skb           969 net/sched/sch_generic.c 		__skb_unlink(skb, &qdisc->gso_skb);