skb1 641 drivers/atm/iphase.c struct sk_buff *skb = NULL, *skb1 = NULL; skb1 666 drivers/atm/iphase.c skb1 = skb_dequeue(&iavcc->txing_skb); skb1 667 drivers/atm/iphase.c while (skb1 && (skb1 != skb)) { skb1 668 drivers/atm/iphase.c if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) { skb1 672 drivers/atm/iphase.c if ((vcc->pop) && (skb1->len != 0)) skb1 674 drivers/atm/iphase.c vcc->pop(vcc, skb1); skb1 676 drivers/atm/iphase.c (long)skb1);) skb1 679 drivers/atm/iphase.c dev_kfree_skb_any(skb1); skb1 680 drivers/atm/iphase.c skb1 = skb_dequeue(&iavcc->txing_skb); skb1 682 drivers/atm/iphase.c if (!skb1) { skb1 1098 drivers/net/ethernet/amd/ni65.c struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; skb1 1102 drivers/net/ethernet/amd/ni65.c skb = skb1; skb1 2046 drivers/net/ethernet/qlogic/qla3xxx.c struct sk_buff *skb1 = NULL, *skb2; skb1 2060 drivers/net/ethernet/qlogic/qla3xxx.c skb1 = lrg_buf_cb1->skb; skb1 2062 drivers/net/ethernet/qlogic/qla3xxx.c if (*((u16 *) skb1->data) != 0xFFFF) skb1 2083 drivers/net/ethernet/qlogic/qla3xxx.c skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb1 2700 drivers/net/vxlan.c struct sk_buff *skb1; skb1 2706 drivers/net/vxlan.c skb1 = skb_clone(skb, GFP_ATOMIC); skb1 2707 drivers/net/vxlan.c if (skb1) skb1 2708 drivers/net/vxlan.c vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); skb1 1315 drivers/net/wireless/ath/ath6kl/txrx.c struct sk_buff *skb1 = NULL; skb1 1572 drivers/net/wireless/ath/ath6kl/txrx.c skb1 = skb_copy(skb, GFP_ATOMIC); skb1 1583 drivers/net/wireless/ath/ath6kl/txrx.c skb1 = skb; skb1 1590 drivers/net/wireless/ath/ath6kl/txrx.c if (skb1) skb1 1591 drivers/net/wireless/ath/ath6kl/txrx.c ath6kl_data_tx(skb1, vif->ndev); skb1 1063 include/linux/skbuff.h struct sk_buff skb1; skb1 1084 include/linux/skbuff.h fclones = container_of(skb, struct sk_buff_fclones, skb1); skb1 3522 include/linux/skbuff.h void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); skb1 856 net/batman-adv/send.c struct sk_buff *skb1; skb1 939 net/batman-adv/send.c skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); skb1 940 net/batman-adv/send.c if (skb1) skb1 941 net/batman-adv/send.c batadv_send_broadcast_skb(skb1, hard_iface); skb1 244 net/core/skbuff.c fclones = container_of(skb, struct sk_buff_fclones, skb1); skb1 627 net/core/skbuff.c fclones = container_of(skb, struct sk_buff_fclones, skb1); skb1 1434 net/core/skbuff.c skb1); skb1 3191 net/core/skbuff.c struct sk_buff* skb1, skb1 3196 net/core/skbuff.c skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), skb1 3200 net/core/skbuff.c skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; skb1 3202 net/core/skbuff.c skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; skb1 3204 net/core/skbuff.c skb1->data_len = skb->data_len; skb1 3205 net/core/skbuff.c skb1->len += skb1->data_len; skb1 3212 net/core/skbuff.c struct sk_buff* skb1, skb1 3219 net/core/skbuff.c skb1->len = skb1->data_len = skb->len - len; skb1 3227 net/core/skbuff.c skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; skb1 3239 net/core/skbuff.c skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); skb1 3240 net/core/skbuff.c skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); skb1 3249 net/core/skbuff.c skb_shinfo(skb1)->nr_frags = k; skb1 3258 net/core/skbuff.c void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) skb1 3262 net/core/skbuff.c skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & skb1 3264 net/core/skbuff.c skb_zerocopy_clone(skb1, skb, 0); skb1 3266 net/core/skbuff.c skb_split_inside_header(skb, skb1, len, pos); skb1 3268 net/core/skbuff.c skb_split_no_header(skb, skb1, len, pos); skb1 4307 net/core/skbuff.c struct sk_buff *skb1, **skb_p; skb1 4339 net/core/skbuff.c while ((skb1 = *skb_p) != NULL) { skb1 4346 net/core/skbuff.c if (skb_shared(skb1)) skb1 4351 net/core/skbuff.c if (skb1->next == NULL && tailbits) { skb1 4352 net/core/skbuff.c if (skb_shinfo(skb1)->nr_frags || skb1 4353 net/core/skbuff.c skb_has_frag_list(skb1) || skb1 4354 net/core/skbuff.c skb_tailroom(skb1) < tailbits) skb1 4359 net/core/skbuff.c skb_cloned(skb1) || skb1 4361 net/core/skbuff.c skb_shinfo(skb1)->nr_frags || skb1 4362 net/core/skbuff.c skb_has_frag_list(skb1)) { skb1 4367 net/core/skbuff.c skb2 = skb_copy(skb1, GFP_ATOMIC); skb1 4369 net/core/skbuff.c skb2 = skb_copy_expand(skb1, skb1 4370 net/core/skbuff.c skb_headroom(skb1), skb1 4376 net/core/skbuff.c if (skb1->sk) skb1 4377 net/core/skbuff.c skb_set_owner_w(skb2, skb1->sk); skb1 4382 net/core/skbuff.c skb2->next = skb1->next; skb1 4384 net/core/skbuff.c kfree_skb(skb1); skb1 4385 net/core/skbuff.c skb1 = skb2; skb1 4388 net/core/skbuff.c *trailer = skb1; skb1 4389 net/core/skbuff.c skb_p = &skb1->next; skb1 380 net/ipv4/icmp.c struct sk_buff *skb1; skb1 382 net/ipv4/icmp.c skb_queue_walk(&sk->sk_write_queue, skb1) { skb1 383 net/ipv4/icmp.c csum = csum_add(csum, skb1->csum); skb1 4549 net/ipv4/tcp_input.c struct sk_buff *skb1; skb1 4606 net/ipv4/tcp_input.c skb1 = rb_to_skb(parent); skb1 4607 net/ipv4/tcp_input.c if (before(seq, TCP_SKB_CB(skb1)->seq)) { skb1 4611 net/ipv4/tcp_input.c if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { skb1 4612 net/ipv4/tcp_input.c if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { skb1 4621 net/ipv4/tcp_input.c if (after(seq, TCP_SKB_CB(skb1)->seq)) { skb1 4623 net/ipv4/tcp_input.c tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); skb1 4628 net/ipv4/tcp_input.c rb_replace_node(&skb1->rbnode, &skb->rbnode, skb1 4631 net/ipv4/tcp_input.c TCP_SKB_CB(skb1)->seq, skb1 4632 net/ipv4/tcp_input.c TCP_SKB_CB(skb1)->end_seq); skb1 4635 net/ipv4/tcp_input.c tcp_drop(sk, skb1); skb1 4638 net/ipv4/tcp_input.c } else if (tcp_ooo_try_coalesce(sk, skb1, skb1 4651 net/ipv4/tcp_input.c while ((skb1 = skb_rb_next(skb)) != NULL) { skb1 4652 net/ipv4/tcp_input.c if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) skb1 4654 net/ipv4/tcp_input.c if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { skb1 4655 net/ipv4/tcp_input.c tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, skb1 4659 net/ipv4/tcp_input.c rb_erase(&skb1->rbnode, &tp->out_of_order_queue); skb1 4660 net/ipv4/tcp_input.c tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, skb1 4661 net/ipv4/tcp_input.c TCP_SKB_CB(skb1)->end_seq); skb1 4663 net/ipv4/tcp_input.c tcp_drop(sk, skb1); skb1 4666 net/ipv4/tcp_input.c if (!skb1) skb1 4890 net/ipv4/tcp_input.c struct sk_buff *skb1; skb1 4894 net/ipv4/tcp_input.c skb1 = rb_to_skb(parent); skb1 4895 net/ipv4/tcp_input.c if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) skb1 363 net/llc/llc_sap.c struct sk_buff *skb1; skb1 367 net/llc/llc_sap.c skb1 = skb_clone(skb, GFP_ATOMIC); skb1 368 net/llc/llc_sap.c if (!skb1) { skb1 373 net/llc/llc_sap.c llc_sap_rcv(sap, skb1, stack[i]); skb1 155 net/sched/sch_choke.c static bool choke_match_flow(struct sk_buff *skb1, skb1 160 net/sched/sch_choke.c if (skb1->protocol != skb2->protocol) skb1 163 net/sched/sch_choke.c if (!choke_skb_cb(skb1)->keys_valid) { skb1 164 net/sched/sch_choke.c choke_skb_cb(skb1)->keys_valid = 1; skb1 165 net/sched/sch_choke.c skb_flow_dissect_flow_keys(skb1, &temp, 0); skb1 166 net/sched/sch_choke.c make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp); skb1 175 net/sched/sch_choke.c return !memcmp(&choke_skb_cb(skb1)->keys, skb1 177 net/sched/sch_choke.c sizeof(choke_skb_cb(skb1)->keys));